xref: /freebsd/sys/netinet/sctputil.c (revision 652a9748855320619e075c4e83aef2f5294412d2)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * The conversion from time to ticks and vice versa is done by rounding
777  * upwards. This way we can test in the code the time to be positive and
778  * know that this corresponds to a positive number of ticks.
779  */
780 
781 uint32_t
782 sctp_msecs_to_ticks(uint32_t msecs)
783 {
784 	uint64_t temp;
785 	uint32_t ticks;
786 
787 	if (hz == 1000) {
788 		ticks = msecs;
789 	} else {
790 		temp = (((uint64_t)msecs * hz) + 999) / 1000;
791 		if (temp > UINT32_MAX) {
792 			ticks = UINT32_MAX;
793 		} else {
794 			ticks = (uint32_t)temp;
795 		}
796 	}
797 	return (ticks);
798 }
799 
800 uint32_t
801 sctp_ticks_to_msecs(uint32_t ticks)
802 {
803 	uint64_t temp;
804 	uint32_t msecs;
805 
806 	if (hz == 1000) {
807 		msecs = ticks;
808 	} else {
809 		temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz;
810 		if (temp > UINT32_MAX) {
811 			msecs = UINT32_MAX;
812 		} else {
813 			msecs = (uint32_t)temp;
814 		}
815 	}
816 	return (msecs);
817 }
818 
819 uint32_t
820 sctp_secs_to_ticks(uint32_t secs)
821 {
822 	uint64_t temp;
823 	uint32_t ticks;
824 
825 	temp = (uint64_t)secs * hz;
826 	if (temp > UINT32_MAX) {
827 		ticks = UINT32_MAX;
828 	} else {
829 		ticks = (uint32_t)temp;
830 	}
831 	return (ticks);
832 }
833 
834 uint32_t
835 sctp_ticks_to_secs(uint32_t ticks)
836 {
837 	uint64_t temp;
838 	uint32_t secs;
839 
840 	temp = ((uint64_t)ticks + (hz - 1)) / hz;
841 	if (temp > UINT32_MAX) {
842 		secs = UINT32_MAX;
843 	} else {
844 		secs = (uint32_t)temp;
845 	}
846 	return (secs);
847 }
848 
849 /*
850  * sctp_stop_timers_for_shutdown() should be called
851  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
852  * state to make sure that all timers are stopped.
853  */
854 void
855 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
856 {
857 	struct sctp_inpcb *inp;
858 	struct sctp_nets *net;
859 
860 	inp = stcb->sctp_ep;
861 
862 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
863 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
864 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
865 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
866 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
867 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
868 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
869 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
870 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
871 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
872 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
873 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
874 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
875 	}
876 }
877 
878 void
879 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
880 {
881 	struct sctp_inpcb *inp;
882 	struct sctp_nets *net;
883 
884 	inp = stcb->sctp_ep;
885 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
886 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
887 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
888 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
889 	if (stop_assoc_kill_timer) {
890 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
892 	}
893 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
894 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
895 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
896 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
897 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
898 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
899 	/* Mobility adaptation */
900 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
901 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
902 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
903 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
904 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
905 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
906 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
907 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
908 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
909 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
910 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
911 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
913 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
914 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
915 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
916 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
917 	}
918 }
919 
920 /*
921  * A list of sizes based on typical mtu's, used only if next hop size not
922  * returned. These values MUST be multiples of 4 and MUST be ordered.
923  */
924 static uint32_t sctp_mtu_sizes[] = {
925 	68,
926 	296,
927 	508,
928 	512,
929 	544,
930 	576,
931 	1004,
932 	1492,
933 	1500,
934 	1536,
935 	2000,
936 	2048,
937 	4352,
938 	4464,
939 	8168,
940 	17912,
941 	32000,
942 	65532
943 };
944 
945 /*
946  * Return the largest MTU in sctp_mtu_sizes smaller than val.
947  * If val is smaller than the minimum, just return the largest
948  * multiple of 4 smaller or equal to val.
949  * Ensure that the result is a multiple of 4.
950  */
951 uint32_t
952 sctp_get_prev_mtu(uint32_t val)
953 {
954 	uint32_t i;
955 
956 	val &= 0xfffffffc;
957 	if (val <= sctp_mtu_sizes[0]) {
958 		return (val);
959 	}
960 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
961 		if (val <= sctp_mtu_sizes[i]) {
962 			break;
963 		}
964 	}
965 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
966 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
967 	return (sctp_mtu_sizes[i - 1]);
968 }
969 
970 /*
971  * Return the smallest MTU in sctp_mtu_sizes larger than val.
972  * If val is larger than the maximum, just return the largest multiple of 4 smaller
973  * or equal to val.
974  * Ensure that the result is a multiple of 4.
975  */
976 uint32_t
977 sctp_get_next_mtu(uint32_t val)
978 {
979 	/* select another MTU that is just bigger than this one */
980 	uint32_t i;
981 
982 	val &= 0xfffffffc;
983 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
984 		if (val < sctp_mtu_sizes[i]) {
985 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
986 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
987 			return (sctp_mtu_sizes[i]);
988 		}
989 	}
990 	return (val);
991 }
992 
993 void
994 sctp_fill_random_store(struct sctp_pcb *m)
995 {
996 	/*
997 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
998 	 * our counter. The result becomes our good random numbers and we
999 	 * then setup to give these out. Note that we do no locking to
1000 	 * protect this. This is ok, since if competing folks call this we
1001 	 * will get more gobbled gook in the random store which is what we
1002 	 * want. There is a danger that two guys will use the same random
1003 	 * numbers, but thats ok too since that is random as well :->
1004 	 */
1005 	m->store_at = 0;
1006 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
1007 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
1008 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
1009 	m->random_counter++;
1010 }
1011 
1012 uint32_t
1013 sctp_select_initial_TSN(struct sctp_pcb *inp)
1014 {
1015 	/*
1016 	 * A true implementation should use random selection process to get
1017 	 * the initial stream sequence number, using RFC1750 as a good
1018 	 * guideline
1019 	 */
1020 	uint32_t x, *xp;
1021 	uint8_t *p;
1022 	int store_at, new_store;
1023 
1024 	if (inp->initial_sequence_debug != 0) {
1025 		uint32_t ret;
1026 
1027 		ret = inp->initial_sequence_debug;
1028 		inp->initial_sequence_debug++;
1029 		return (ret);
1030 	}
1031 retry:
1032 	store_at = inp->store_at;
1033 	new_store = store_at + sizeof(uint32_t);
1034 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
1035 		new_store = 0;
1036 	}
1037 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
1038 		goto retry;
1039 	}
1040 	if (new_store == 0) {
1041 		/* Refill the random store */
1042 		sctp_fill_random_store(inp);
1043 	}
1044 	p = &inp->random_store[store_at];
1045 	xp = (uint32_t *)p;
1046 	x = *xp;
1047 	return (x);
1048 }
1049 
1050 uint32_t
1051 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
1052 {
1053 	uint32_t x;
1054 	struct timeval now;
1055 
1056 	if (check) {
1057 		(void)SCTP_GETTIME_TIMEVAL(&now);
1058 	}
1059 	for (;;) {
1060 		x = sctp_select_initial_TSN(&inp->sctp_ep);
1061 		if (x == 0) {
1062 			/* we never use 0 */
1063 			continue;
1064 		}
1065 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
1066 			break;
1067 		}
1068 	}
1069 	return (x);
1070 }
1071 
1072 int32_t
1073 sctp_map_assoc_state(int kernel_state)
1074 {
1075 	int32_t user_state;
1076 
1077 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1078 		user_state = SCTP_CLOSED;
1079 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1080 		user_state = SCTP_SHUTDOWN_PENDING;
1081 	} else {
1082 		switch (kernel_state & SCTP_STATE_MASK) {
1083 		case SCTP_STATE_EMPTY:
1084 			user_state = SCTP_CLOSED;
1085 			break;
1086 		case SCTP_STATE_INUSE:
1087 			user_state = SCTP_CLOSED;
1088 			break;
1089 		case SCTP_STATE_COOKIE_WAIT:
1090 			user_state = SCTP_COOKIE_WAIT;
1091 			break;
1092 		case SCTP_STATE_COOKIE_ECHOED:
1093 			user_state = SCTP_COOKIE_ECHOED;
1094 			break;
1095 		case SCTP_STATE_OPEN:
1096 			user_state = SCTP_ESTABLISHED;
1097 			break;
1098 		case SCTP_STATE_SHUTDOWN_SENT:
1099 			user_state = SCTP_SHUTDOWN_SENT;
1100 			break;
1101 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1102 			user_state = SCTP_SHUTDOWN_RECEIVED;
1103 			break;
1104 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1105 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1106 			break;
1107 		default:
1108 			user_state = SCTP_CLOSED;
1109 			break;
1110 		}
1111 	}
1112 	return (user_state);
1113 }
1114 
1115 int
1116 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1117     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1118 {
1119 	struct sctp_association *asoc;
1120 
1121 	/*
1122 	 * Anything set to zero is taken care of by the allocation routine's
1123 	 * bzero
1124 	 */
1125 
1126 	/*
1127 	 * Up front select what scoping to apply on addresses I tell my peer
1128 	 * Not sure what to do with these right now, we will need to come up
1129 	 * with a way to set them. We may need to pass them through from the
1130 	 * caller in the sctp_aloc_assoc() function.
1131 	 */
1132 	int i;
1133 #if defined(SCTP_DETAILED_STR_STATS)
1134 	int j;
1135 #endif
1136 
1137 	asoc = &stcb->asoc;
1138 	/* init all variables to a known value. */
1139 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1140 	asoc->max_burst = inp->sctp_ep.max_burst;
1141 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1142 	asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1143 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1144 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1145 	asoc->ecn_supported = inp->ecn_supported;
1146 	asoc->prsctp_supported = inp->prsctp_supported;
1147 	asoc->idata_supported = inp->idata_supported;
1148 	asoc->auth_supported = inp->auth_supported;
1149 	asoc->asconf_supported = inp->asconf_supported;
1150 	asoc->reconfig_supported = inp->reconfig_supported;
1151 	asoc->nrsack_supported = inp->nrsack_supported;
1152 	asoc->pktdrop_supported = inp->pktdrop_supported;
1153 	asoc->idata_supported = inp->idata_supported;
1154 	asoc->sctp_cmt_pf = (uint8_t)0;
1155 	asoc->sctp_frag_point = inp->sctp_frag_point;
1156 	asoc->sctp_features = inp->sctp_features;
1157 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1158 	asoc->max_cwnd = inp->max_cwnd;
1159 #ifdef INET6
1160 	if (inp->sctp_ep.default_flowlabel) {
1161 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1162 	} else {
1163 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1164 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1165 			asoc->default_flowlabel &= 0x000fffff;
1166 			asoc->default_flowlabel |= 0x80000000;
1167 		} else {
1168 			asoc->default_flowlabel = 0;
1169 		}
1170 	}
1171 #endif
1172 	asoc->sb_send_resv = 0;
1173 	if (override_tag) {
1174 		asoc->my_vtag = override_tag;
1175 	} else {
1176 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1177 	}
1178 	/* Get the nonce tags */
1179 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1180 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1181 	asoc->vrf_id = vrf_id;
1182 
1183 #ifdef SCTP_ASOCLOG_OF_TSNS
1184 	asoc->tsn_in_at = 0;
1185 	asoc->tsn_out_at = 0;
1186 	asoc->tsn_in_wrapped = 0;
1187 	asoc->tsn_out_wrapped = 0;
1188 	asoc->cumack_log_at = 0;
1189 	asoc->cumack_log_atsnt = 0;
1190 #endif
1191 #ifdef SCTP_FS_SPEC_LOG
1192 	asoc->fs_index = 0;
1193 #endif
1194 	asoc->refcnt = 0;
1195 	asoc->assoc_up_sent = 0;
1196 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1197 	    sctp_select_initial_TSN(&inp->sctp_ep);
1198 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1199 	/* we are optimisitic here */
1200 	asoc->peer_supports_nat = 0;
1201 	asoc->sent_queue_retran_cnt = 0;
1202 
1203 	/* for CMT */
1204 	asoc->last_net_cmt_send_started = NULL;
1205 
1206 	/* This will need to be adjusted */
1207 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1208 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1209 	asoc->asconf_seq_in = asoc->last_acked_seq;
1210 
1211 	/* here we are different, we hold the next one we expect */
1212 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1213 
1214 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1215 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1216 
1217 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1218 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1219 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1220 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1221 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1222 	asoc->free_chunk_cnt = 0;
1223 
1224 	asoc->iam_blocking = 0;
1225 	asoc->context = inp->sctp_context;
1226 	asoc->local_strreset_support = inp->local_strreset_support;
1227 	asoc->def_send = inp->def_send;
1228 	asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1229 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1230 	asoc->pr_sctp_cnt = 0;
1231 	asoc->total_output_queue_size = 0;
1232 
1233 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1234 		asoc->scope.ipv6_addr_legal = 1;
1235 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1236 			asoc->scope.ipv4_addr_legal = 1;
1237 		} else {
1238 			asoc->scope.ipv4_addr_legal = 0;
1239 		}
1240 	} else {
1241 		asoc->scope.ipv6_addr_legal = 0;
1242 		asoc->scope.ipv4_addr_legal = 1;
1243 	}
1244 
1245 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1246 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1247 
1248 	asoc->smallest_mtu = inp->sctp_frag_point;
1249 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1250 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1251 
1252 	asoc->stream_locked_on = 0;
1253 	asoc->ecn_echo_cnt_onq = 0;
1254 	asoc->stream_locked = 0;
1255 
1256 	asoc->send_sack = 1;
1257 
1258 	LIST_INIT(&asoc->sctp_restricted_addrs);
1259 
1260 	TAILQ_INIT(&asoc->nets);
1261 	TAILQ_INIT(&asoc->pending_reply_queue);
1262 	TAILQ_INIT(&asoc->asconf_ack_sent);
1263 	/* Setup to fill the hb random cache at first HB */
1264 	asoc->hb_random_idx = 4;
1265 
1266 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1267 
1268 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1269 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1270 
1271 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1272 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1273 
1274 	/*
1275 	 * Now the stream parameters, here we allocate space for all streams
1276 	 * that we request by default.
1277 	 */
1278 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1279 	    o_strms;
1280 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1281 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1282 	    SCTP_M_STRMO);
1283 	if (asoc->strmout == NULL) {
1284 		/* big trouble no memory */
1285 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1286 		return (ENOMEM);
1287 	}
1288 	for (i = 0; i < asoc->streamoutcnt; i++) {
1289 		/*
1290 		 * inbound side must be set to 0xffff, also NOTE when we get
1291 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1292 		 * count (streamoutcnt) but first check if we sent to any of
1293 		 * the upper streams that were dropped (if some were). Those
1294 		 * that were dropped must be notified to the upper layer as
1295 		 * failed to send.
1296 		 */
1297 		asoc->strmout[i].next_mid_ordered = 0;
1298 		asoc->strmout[i].next_mid_unordered = 0;
1299 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1300 		asoc->strmout[i].chunks_on_queues = 0;
1301 #if defined(SCTP_DETAILED_STR_STATS)
1302 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1303 			asoc->strmout[i].abandoned_sent[j] = 0;
1304 			asoc->strmout[i].abandoned_unsent[j] = 0;
1305 		}
1306 #else
1307 		asoc->strmout[i].abandoned_sent[0] = 0;
1308 		asoc->strmout[i].abandoned_unsent[0] = 0;
1309 #endif
1310 		asoc->strmout[i].sid = i;
1311 		asoc->strmout[i].last_msg_incomplete = 0;
1312 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1313 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1314 	}
1315 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1316 
1317 	/* Now the mapping array */
1318 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1319 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1320 	    SCTP_M_MAP);
1321 	if (asoc->mapping_array == NULL) {
1322 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1323 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1324 		return (ENOMEM);
1325 	}
1326 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1327 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1328 	    SCTP_M_MAP);
1329 	if (asoc->nr_mapping_array == NULL) {
1330 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1331 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1332 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1333 		return (ENOMEM);
1334 	}
1335 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1336 
1337 	/* Now the init of the other outqueues */
1338 	TAILQ_INIT(&asoc->free_chunks);
1339 	TAILQ_INIT(&asoc->control_send_queue);
1340 	TAILQ_INIT(&asoc->asconf_send_queue);
1341 	TAILQ_INIT(&asoc->send_queue);
1342 	TAILQ_INIT(&asoc->sent_queue);
1343 	TAILQ_INIT(&asoc->resetHead);
1344 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1345 	TAILQ_INIT(&asoc->asconf_queue);
1346 	/* authentication fields */
1347 	asoc->authinfo.random = NULL;
1348 	asoc->authinfo.active_keyid = 0;
1349 	asoc->authinfo.assoc_key = NULL;
1350 	asoc->authinfo.assoc_keyid = 0;
1351 	asoc->authinfo.recv_key = NULL;
1352 	asoc->authinfo.recv_keyid = 0;
1353 	LIST_INIT(&asoc->shared_keys);
1354 	asoc->marked_retrans = 0;
1355 	asoc->port = inp->sctp_ep.port;
1356 	asoc->timoinit = 0;
1357 	asoc->timodata = 0;
1358 	asoc->timosack = 0;
1359 	asoc->timoshutdown = 0;
1360 	asoc->timoheartbeat = 0;
1361 	asoc->timocookie = 0;
1362 	asoc->timoshutdownack = 0;
1363 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1364 	asoc->discontinuity_time = asoc->start_time;
1365 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1366 		asoc->abandoned_unsent[i] = 0;
1367 		asoc->abandoned_sent[i] = 0;
1368 	}
1369 	/*
1370 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1371 	 * freed later when the association is freed.
1372 	 */
1373 	return (0);
1374 }
1375 
1376 void
1377 sctp_print_mapping_array(struct sctp_association *asoc)
1378 {
1379 	unsigned int i, limit;
1380 
1381 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1382 	    asoc->mapping_array_size,
1383 	    asoc->mapping_array_base_tsn,
1384 	    asoc->cumulative_tsn,
1385 	    asoc->highest_tsn_inside_map,
1386 	    asoc->highest_tsn_inside_nr_map);
1387 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1388 		if (asoc->mapping_array[limit - 1] != 0) {
1389 			break;
1390 		}
1391 	}
1392 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1393 	for (i = 0; i < limit; i++) {
1394 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1395 	}
1396 	if (limit % 16)
1397 		SCTP_PRINTF("\n");
1398 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1399 		if (asoc->nr_mapping_array[limit - 1]) {
1400 			break;
1401 		}
1402 	}
1403 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1404 	for (i = 0; i < limit; i++) {
1405 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1406 	}
1407 	if (limit % 16)
1408 		SCTP_PRINTF("\n");
1409 }
1410 
1411 int
1412 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1413 {
1414 	/* mapping array needs to grow */
1415 	uint8_t *new_array1, *new_array2;
1416 	uint32_t new_size;
1417 
1418 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1419 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1420 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1421 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1422 		/* can't get more, forget it */
1423 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1424 		if (new_array1) {
1425 			SCTP_FREE(new_array1, SCTP_M_MAP);
1426 		}
1427 		if (new_array2) {
1428 			SCTP_FREE(new_array2, SCTP_M_MAP);
1429 		}
1430 		return (-1);
1431 	}
1432 	memset(new_array1, 0, new_size);
1433 	memset(new_array2, 0, new_size);
1434 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1435 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1436 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1437 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1438 	asoc->mapping_array = new_array1;
1439 	asoc->nr_mapping_array = new_array2;
1440 	asoc->mapping_array_size = new_size;
1441 	return (0);
1442 }
1443 
1444 
1445 static void
1446 sctp_iterator_work(struct sctp_iterator *it)
1447 {
1448 	struct epoch_tracker et;
1449 	struct sctp_inpcb *tinp;
1450 	int iteration_count = 0;
1451 	int inp_skip = 0;
1452 	int first_in = 1;
1453 
1454 	NET_EPOCH_ENTER(et);
1455 	SCTP_INP_INFO_RLOCK();
1456 	SCTP_ITERATOR_LOCK();
1457 	sctp_it_ctl.cur_it = it;
1458 	if (it->inp) {
1459 		SCTP_INP_RLOCK(it->inp);
1460 		SCTP_INP_DECR_REF(it->inp);
1461 	}
1462 	if (it->inp == NULL) {
1463 		/* iterator is complete */
1464 done_with_iterator:
1465 		sctp_it_ctl.cur_it = NULL;
1466 		SCTP_ITERATOR_UNLOCK();
1467 		SCTP_INP_INFO_RUNLOCK();
1468 		if (it->function_atend != NULL) {
1469 			(*it->function_atend) (it->pointer, it->val);
1470 		}
1471 		SCTP_FREE(it, SCTP_M_ITER);
1472 		NET_EPOCH_EXIT(et);
1473 		return;
1474 	}
1475 select_a_new_ep:
1476 	if (first_in) {
1477 		first_in = 0;
1478 	} else {
1479 		SCTP_INP_RLOCK(it->inp);
1480 	}
1481 	while (((it->pcb_flags) &&
1482 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1483 	    ((it->pcb_features) &&
1484 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1485 		/* endpoint flags or features don't match, so keep looking */
1486 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1487 			SCTP_INP_RUNLOCK(it->inp);
1488 			goto done_with_iterator;
1489 		}
1490 		tinp = it->inp;
1491 		it->inp = LIST_NEXT(it->inp, sctp_list);
1492 		SCTP_INP_RUNLOCK(tinp);
1493 		if (it->inp == NULL) {
1494 			goto done_with_iterator;
1495 		}
1496 		SCTP_INP_RLOCK(it->inp);
1497 	}
1498 	/* now go through each assoc which is in the desired state */
1499 	if (it->done_current_ep == 0) {
1500 		if (it->function_inp != NULL)
1501 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1502 		it->done_current_ep = 1;
1503 	}
1504 	if (it->stcb == NULL) {
1505 		/* run the per instance function */
1506 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1507 	}
1508 	if ((inp_skip) || it->stcb == NULL) {
1509 		if (it->function_inp_end != NULL) {
1510 			inp_skip = (*it->function_inp_end) (it->inp,
1511 			    it->pointer,
1512 			    it->val);
1513 		}
1514 		SCTP_INP_RUNLOCK(it->inp);
1515 		goto no_stcb;
1516 	}
1517 	while (it->stcb) {
1518 		SCTP_TCB_LOCK(it->stcb);
1519 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1520 			/* not in the right state... keep looking */
1521 			SCTP_TCB_UNLOCK(it->stcb);
1522 			goto next_assoc;
1523 		}
1524 		/* see if we have limited out the iterator loop */
1525 		iteration_count++;
1526 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1527 			/* Pause to let others grab the lock */
1528 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1529 			SCTP_TCB_UNLOCK(it->stcb);
1530 			SCTP_INP_INCR_REF(it->inp);
1531 			SCTP_INP_RUNLOCK(it->inp);
1532 			SCTP_ITERATOR_UNLOCK();
1533 			SCTP_INP_INFO_RUNLOCK();
1534 			SCTP_INP_INFO_RLOCK();
1535 			SCTP_ITERATOR_LOCK();
1536 			if (sctp_it_ctl.iterator_flags) {
1537 				/* We won't be staying here */
1538 				SCTP_INP_DECR_REF(it->inp);
1539 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1540 				if (sctp_it_ctl.iterator_flags &
1541 				    SCTP_ITERATOR_STOP_CUR_IT) {
1542 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1543 					goto done_with_iterator;
1544 				}
1545 				if (sctp_it_ctl.iterator_flags &
1546 				    SCTP_ITERATOR_STOP_CUR_INP) {
1547 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1548 					goto no_stcb;
1549 				}
1550 				/* If we reach here huh? */
1551 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1552 				    sctp_it_ctl.iterator_flags);
1553 				sctp_it_ctl.iterator_flags = 0;
1554 			}
1555 			SCTP_INP_RLOCK(it->inp);
1556 			SCTP_INP_DECR_REF(it->inp);
1557 			SCTP_TCB_LOCK(it->stcb);
1558 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1559 			iteration_count = 0;
1560 		}
1561 
1562 		/* run function on this one */
1563 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1564 
1565 		/*
1566 		 * we lie here, it really needs to have its own type but
1567 		 * first I must verify that this won't effect things :-0
1568 		 */
1569 		if (it->no_chunk_output == 0)
1570 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1571 
1572 		SCTP_TCB_UNLOCK(it->stcb);
1573 next_assoc:
1574 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1575 		if (it->stcb == NULL) {
1576 			/* Run last function */
1577 			if (it->function_inp_end != NULL) {
1578 				inp_skip = (*it->function_inp_end) (it->inp,
1579 				    it->pointer,
1580 				    it->val);
1581 			}
1582 		}
1583 	}
1584 	SCTP_INP_RUNLOCK(it->inp);
1585 no_stcb:
1586 	/* done with all assocs on this endpoint, move on to next endpoint */
1587 	it->done_current_ep = 0;
1588 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1589 		it->inp = NULL;
1590 	} else {
1591 		it->inp = LIST_NEXT(it->inp, sctp_list);
1592 	}
1593 	if (it->inp == NULL) {
1594 		goto done_with_iterator;
1595 	}
1596 	goto select_a_new_ep;
1597 }
1598 
1599 void
1600 sctp_iterator_worker(void)
1601 {
1602 	struct sctp_iterator *it;
1603 
1604 	/* This function is called with the WQ lock in place */
1605 	sctp_it_ctl.iterator_running = 1;
1606 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1607 		/* now lets work on this one */
1608 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1609 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1610 		CURVNET_SET(it->vn);
1611 		sctp_iterator_work(it);
1612 		CURVNET_RESTORE();
1613 		SCTP_IPI_ITERATOR_WQ_LOCK();
1614 		/* sa_ignore FREED_MEMORY */
1615 	}
1616 	sctp_it_ctl.iterator_running = 0;
1617 	return;
1618 }
1619 
1620 
1621 static void
1622 sctp_handle_addr_wq(void)
1623 {
1624 	/* deal with the ADDR wq from the rtsock calls */
1625 	struct sctp_laddr *wi, *nwi;
1626 	struct sctp_asconf_iterator *asc;
1627 
1628 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1629 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1630 	if (asc == NULL) {
1631 		/* Try later, no memory */
1632 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1633 		    (struct sctp_inpcb *)NULL,
1634 		    (struct sctp_tcb *)NULL,
1635 		    (struct sctp_nets *)NULL);
1636 		return;
1637 	}
1638 	LIST_INIT(&asc->list_of_work);
1639 	asc->cnt = 0;
1640 
1641 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1642 		LIST_REMOVE(wi, sctp_nxt_addr);
1643 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1644 		asc->cnt++;
1645 	}
1646 
1647 	if (asc->cnt == 0) {
1648 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1649 	} else {
1650 		int ret;
1651 
1652 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1653 		    sctp_asconf_iterator_stcb,
1654 		    NULL,	/* No ep end for boundall */
1655 		    SCTP_PCB_FLAGS_BOUNDALL,
1656 		    SCTP_PCB_ANY_FEATURES,
1657 		    SCTP_ASOC_ANY_STATE,
1658 		    (void *)asc, 0,
1659 		    sctp_asconf_iterator_end, NULL, 0);
1660 		if (ret) {
1661 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1662 			/*
1663 			 * Freeing if we are stopping or put back on the
1664 			 * addr_wq.
1665 			 */
1666 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1667 				sctp_asconf_iterator_end(asc, 0);
1668 			} else {
1669 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1670 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1671 				}
1672 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1673 			}
1674 		}
1675 	}
1676 }
1677 
1678 /*-
1679  * The following table shows which pointers for the inp, stcb, or net are
1680  * stored for each timer after it was started.
1681  *
1682  *|Name                         |Timer                        |inp |stcb|net |
1683  *|-----------------------------|-----------------------------|----|----|----|
1684  *|SCTP_TIMER_TYPE_SEND         |net->rxt_timer               |Yes |Yes |Yes |
1685  *|SCTP_TIMER_TYPE_INIT         |net->rxt_timer               |Yes |Yes |Yes |
1686  *|SCTP_TIMER_TYPE_RECV         |stcb->asoc.dack_timer        |Yes |Yes |No  |
1687  *|SCTP_TIMER_TYPE_SHUTDOWN     |net->rxt_timer               |Yes |Yes |Yes |
1688  *|SCTP_TIMER_TYPE_HEARTBEAT    |net->hb_timer                |Yes |Yes |Yes |
1689  *|SCTP_TIMER_TYPE_COOKIE       |net->rxt_timer               |Yes |Yes |Yes |
1690  *|SCTP_TIMER_TYPE_NEWCOOKIE    |inp->sctp_ep.signature_change|Yes |No  |No  |
1691  *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer              |Yes |Yes |Yes |
1692  *|SCTP_TIMER_TYPE_SHUTDOWNACK  |net->rxt_timer               |Yes |Yes |Yes |
1693  *|SCTP_TIMER_TYPE_ASCONF       |stcb->asoc.asconf_timer      |Yes |Yes |Yes |
1694  *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer  |Yes |Yes |No  |
1695  *|SCTP_TIMER_TYPE_AUTOCLOSE    |stcb->asoc.autoclose_timer   |Yes |Yes |No  |
1696  *|SCTP_TIMER_TYPE_STRRESET     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1697  *|SCTP_TIMER_TYPE_INPKILL      |inp->sctp_ep.signature_change|Yes |No  |No  |
1698  *|SCTP_TIMER_TYPE_ASOCKILL     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1699  *|SCTP_TIMER_TYPE_ADDR_WQ      |SCTP_BASE_INFO(addr_wq_timer)|No  |No  |No  |
1700  *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No  |
1701  */
1702 
1703 void
1704 sctp_timeout_handler(void *t)
1705 {
1706 	struct epoch_tracker et;
1707 	struct timeval tv;
1708 	struct sctp_inpcb *inp;
1709 	struct sctp_tcb *stcb;
1710 	struct sctp_nets *net;
1711 	struct sctp_timer *tmr;
1712 	struct mbuf *op_err;
1713 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1714 	struct socket *so;
1715 #endif
1716 	int did_output;
1717 	int type;
1718 	int i, secret;
1719 
1720 	tmr = (struct sctp_timer *)t;
1721 	inp = (struct sctp_inpcb *)tmr->ep;
1722 	stcb = (struct sctp_tcb *)tmr->tcb;
1723 	net = (struct sctp_nets *)tmr->net;
1724 	CURVNET_SET((struct vnet *)tmr->vnet);
1725 	did_output = 1;
1726 
1727 #ifdef SCTP_AUDITING_ENABLED
1728 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1729 	sctp_auditing(3, inp, stcb, net);
1730 #endif
1731 
1732 	/* sanity checks... */
1733 	KASSERT(tmr->self == tmr, ("tmr->self corrupted"));
1734 	KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), ("Invalid timer type %d", tmr->type));
1735 	type = tmr->type;
1736 	if (inp) {
1737 		SCTP_INP_INCR_REF(inp);
1738 	}
1739 	tmr->stopped_from = 0xa001;
1740 	if (stcb) {
1741 		atomic_add_int(&stcb->asoc.refcnt, 1);
1742 		if (stcb->asoc.state == 0) {
1743 			atomic_add_int(&stcb->asoc.refcnt, -1);
1744 			if (inp) {
1745 				SCTP_INP_DECR_REF(inp);
1746 			}
1747 			SCTPDBG(SCTP_DEBUG_TIMER2,
1748 			    "Timer type %d handler exiting due to CLOSED association.\n",
1749 			    type);
1750 			CURVNET_RESTORE();
1751 			return;
1752 		}
1753 	}
1754 	tmr->stopped_from = 0xa002;
1755 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
1756 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1757 		if (inp) {
1758 			SCTP_INP_DECR_REF(inp);
1759 		}
1760 		if (stcb) {
1761 			atomic_add_int(&stcb->asoc.refcnt, -1);
1762 		}
1763 		SCTPDBG(SCTP_DEBUG_TIMER2,
1764 		    "Timer type %d handler exiting due to not being active.\n",
1765 		    type);
1766 		CURVNET_RESTORE();
1767 		return;
1768 	}
1769 
1770 	tmr->stopped_from = 0xa003;
1771 	if (stcb) {
1772 		SCTP_TCB_LOCK(stcb);
1773 		atomic_add_int(&stcb->asoc.refcnt, -1);
1774 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1775 		    ((stcb->asoc.state == 0) ||
1776 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1777 			SCTP_TCB_UNLOCK(stcb);
1778 			if (inp) {
1779 				SCTP_INP_DECR_REF(inp);
1780 			}
1781 			SCTPDBG(SCTP_DEBUG_TIMER2,
1782 			    "Timer type %d handler exiting due to CLOSED association.\n",
1783 			    type);
1784 			CURVNET_RESTORE();
1785 			return;
1786 		}
1787 	} else if (inp != NULL) {
1788 		SCTP_INP_WLOCK(inp);
1789 	} else {
1790 		SCTP_WQ_ADDR_LOCK();
1791 	}
1792 
1793 	/* Record in stopped_from which timeout occurred. */
1794 	tmr->stopped_from = type;
1795 	NET_EPOCH_ENTER(et);
1796 	/* mark as being serviced now */
1797 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1798 		/*
1799 		 * Callout has been rescheduled.
1800 		 */
1801 		goto get_out;
1802 	}
1803 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1804 		/*
1805 		 * Not active, so no action.
1806 		 */
1807 		goto get_out;
1808 	}
1809 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1810 
1811 	/* call the handler for the appropriate timer type */
1812 	switch (type) {
1813 	case SCTP_TIMER_TYPE_SEND:
1814 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1815 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1816 		    type, inp, stcb, net));
1817 		SCTP_STAT_INCR(sctps_timodata);
1818 		stcb->asoc.timodata++;
1819 		stcb->asoc.num_send_timers_up--;
1820 		if (stcb->asoc.num_send_timers_up < 0) {
1821 			stcb->asoc.num_send_timers_up = 0;
1822 		}
1823 		SCTP_TCB_LOCK_ASSERT(stcb);
1824 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1825 			/* no need to unlock on tcb its gone */
1826 
1827 			goto out_decr;
1828 		}
1829 		SCTP_TCB_LOCK_ASSERT(stcb);
1830 #ifdef SCTP_AUDITING_ENABLED
1831 		sctp_auditing(4, inp, stcb, net);
1832 #endif
1833 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1834 		if ((stcb->asoc.num_send_timers_up == 0) &&
1835 		    (stcb->asoc.sent_queue_cnt > 0)) {
1836 			struct sctp_tmit_chunk *chk;
1837 
1838 			/*
1839 			 * safeguard. If there on some on the sent queue
1840 			 * somewhere but no timers running something is
1841 			 * wrong... so we start a timer on the first chunk
1842 			 * on the send queue on whatever net it is sent to.
1843 			 */
1844 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1845 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1846 			    chk->whoTo);
1847 		}
1848 		break;
1849 	case SCTP_TIMER_TYPE_INIT:
1850 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1851 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1852 		    type, inp, stcb, net));
1853 		SCTP_STAT_INCR(sctps_timoinit);
1854 		stcb->asoc.timoinit++;
1855 		if (sctp_t1init_timer(inp, stcb, net)) {
1856 			/* no need to unlock on tcb its gone */
1857 			goto out_decr;
1858 		}
1859 		/* We do output but not here */
1860 		did_output = 0;
1861 		break;
1862 	case SCTP_TIMER_TYPE_RECV:
1863 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1864 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1865 		    type, inp, stcb, net));
1866 		SCTP_STAT_INCR(sctps_timosack);
1867 		stcb->asoc.timosack++;
1868 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1869 #ifdef SCTP_AUDITING_ENABLED
1870 		sctp_auditing(4, inp, stcb, NULL);
1871 #endif
1872 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1873 		break;
1874 	case SCTP_TIMER_TYPE_SHUTDOWN:
1875 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1876 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1877 		    type, inp, stcb, net));
1878 		SCTP_STAT_INCR(sctps_timoshutdown);
1879 		stcb->asoc.timoshutdown++;
1880 		if (sctp_shutdown_timer(inp, stcb, net)) {
1881 			/* no need to unlock on tcb its gone */
1882 			goto out_decr;
1883 		}
1884 #ifdef SCTP_AUDITING_ENABLED
1885 		sctp_auditing(4, inp, stcb, net);
1886 #endif
1887 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1888 		break;
1889 	case SCTP_TIMER_TYPE_HEARTBEAT:
1890 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1891 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1892 		    type, inp, stcb, net));
1893 		SCTP_STAT_INCR(sctps_timoheartbeat);
1894 		stcb->asoc.timoheartbeat++;
1895 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1896 			/* no need to unlock on tcb its gone */
1897 			goto out_decr;
1898 		}
1899 #ifdef SCTP_AUDITING_ENABLED
1900 		sctp_auditing(4, inp, stcb, net);
1901 #endif
1902 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1903 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1904 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1905 		}
1906 		break;
1907 	case SCTP_TIMER_TYPE_COOKIE:
1908 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1909 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1910 		    type, inp, stcb, net));
1911 		SCTP_STAT_INCR(sctps_timocookie);
1912 		stcb->asoc.timocookie++;
1913 		if (sctp_cookie_timer(inp, stcb, net)) {
1914 			/* no need to unlock on tcb its gone */
1915 			goto out_decr;
1916 		}
1917 #ifdef SCTP_AUDITING_ENABLED
1918 		sctp_auditing(4, inp, stcb, net);
1919 #endif
1920 		/*
1921 		 * We consider T3 and Cookie timer pretty much the same with
1922 		 * respect to where from in chunk_output.
1923 		 */
1924 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1925 		break;
1926 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1927 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
1928 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1929 		    type, inp, stcb, net));
1930 		SCTP_STAT_INCR(sctps_timosecret);
1931 		(void)SCTP_GETTIME_TIMEVAL(&tv);
1932 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1933 		inp->sctp_ep.last_secret_number =
1934 		    inp->sctp_ep.current_secret_number;
1935 		inp->sctp_ep.current_secret_number++;
1936 		if (inp->sctp_ep.current_secret_number >=
1937 		    SCTP_HOW_MANY_SECRETS) {
1938 			inp->sctp_ep.current_secret_number = 0;
1939 		}
1940 		secret = (int)inp->sctp_ep.current_secret_number;
1941 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1942 			inp->sctp_ep.secret_key[secret][i] =
1943 			    sctp_select_initial_TSN(&inp->sctp_ep);
1944 		}
1945 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1946 		did_output = 0;
1947 		break;
1948 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1949 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1950 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1951 		    type, inp, stcb, net));
1952 		SCTP_STAT_INCR(sctps_timopathmtu);
1953 		sctp_pathmtu_timer(inp, stcb, net);
1954 		did_output = 0;
1955 		break;
1956 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1957 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1958 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1959 		    type, inp, stcb, net));
1960 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1961 			/* no need to unlock on tcb its gone */
1962 			goto out_decr;
1963 		}
1964 		SCTP_STAT_INCR(sctps_timoshutdownack);
1965 		stcb->asoc.timoshutdownack++;
1966 #ifdef SCTP_AUDITING_ENABLED
1967 		sctp_auditing(4, inp, stcb, net);
1968 #endif
1969 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1970 		break;
1971 	case SCTP_TIMER_TYPE_ASCONF:
1972 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1973 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1974 		    type, inp, stcb, net));
1975 		SCTP_STAT_INCR(sctps_timoasconf);
1976 		if (sctp_asconf_timer(inp, stcb, net)) {
1977 			/* no need to unlock on tcb its gone */
1978 			goto out_decr;
1979 		}
1980 #ifdef SCTP_AUDITING_ENABLED
1981 		sctp_auditing(4, inp, stcb, net);
1982 #endif
1983 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1984 		break;
1985 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1986 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1987 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1988 		    type, inp, stcb, net));
1989 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1990 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1991 		    "Shutdown guard timer expired");
1992 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1993 		/* no need to unlock on tcb its gone */
1994 		goto out_decr;
1995 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1996 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1997 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1998 		    type, inp, stcb, net));
1999 		SCTP_STAT_INCR(sctps_timoautoclose);
2000 		sctp_autoclose_timer(inp, stcb);
2001 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
2002 		did_output = 0;
2003 		break;
2004 	case SCTP_TIMER_TYPE_STRRESET:
2005 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2006 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2007 		    type, inp, stcb, net));
2008 		SCTP_STAT_INCR(sctps_timostrmrst);
2009 		if (sctp_strreset_timer(inp, stcb)) {
2010 			/* no need to unlock on tcb its gone */
2011 			goto out_decr;
2012 		}
2013 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
2014 		break;
2015 	case SCTP_TIMER_TYPE_INPKILL:
2016 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
2017 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2018 		    type, inp, stcb, net));
2019 		SCTP_STAT_INCR(sctps_timoinpkill);
2020 		/*
2021 		 * special case, take away our increment since WE are the
2022 		 * killer
2023 		 */
2024 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2025 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2026 		SCTP_INP_DECR_REF(inp);
2027 		SCTP_INP_WUNLOCK(inp);
2028 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2029 		    SCTP_CALLED_FROM_INPKILL_TIMER);
2030 		inp = NULL;
2031 		goto out_no_decr;
2032 	case SCTP_TIMER_TYPE_ASOCKILL:
2033 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2034 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2035 		    type, inp, stcb, net));
2036 		SCTP_STAT_INCR(sctps_timoassockill);
2037 		/* Can we free it yet? */
2038 		SCTP_INP_DECR_REF(inp);
2039 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
2040 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
2041 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2042 		so = SCTP_INP_SO(inp);
2043 		atomic_add_int(&stcb->asoc.refcnt, 1);
2044 		SCTP_TCB_UNLOCK(stcb);
2045 		SCTP_SOCKET_LOCK(so, 1);
2046 		SCTP_TCB_LOCK(stcb);
2047 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2048 #endif
2049 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2050 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
2051 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2052 		SCTP_SOCKET_UNLOCK(so, 1);
2053 #endif
2054 		/*
2055 		 * free asoc, always unlocks (or destroy's) so prevent
2056 		 * duplicate unlock or unlock of a free mtx :-0
2057 		 */
2058 		stcb = NULL;
2059 		goto out_no_decr;
2060 	case SCTP_TIMER_TYPE_ADDR_WQ:
2061 		KASSERT(inp == NULL && stcb == NULL && net == NULL,
2062 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2063 		    type, inp, stcb, net));
2064 		sctp_handle_addr_wq();
2065 		break;
2066 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2067 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2068 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2069 		    type, inp, stcb, net));
2070 		SCTP_STAT_INCR(sctps_timodelprim);
2071 		sctp_delete_prim_timer(inp, stcb);
2072 		break;
2073 	default:
2074 #ifdef INVARIANTS
2075 		panic("Unknown timer type %d", type);
2076 #else
2077 		goto get_out;
2078 #endif
2079 	}
2080 #ifdef SCTP_AUDITING_ENABLED
2081 	sctp_audit_log(0xF1, (uint8_t)type);
2082 	if (inp)
2083 		sctp_auditing(5, inp, stcb, net);
2084 #endif
2085 	if ((did_output) && stcb) {
2086 		/*
2087 		 * Now we need to clean up the control chunk chain if an
2088 		 * ECNE is on it. It must be marked as UNSENT again so next
2089 		 * call will continue to send it until such time that we get
2090 		 * a CWR, to remove it. It is, however, less likely that we
2091 		 * will find a ecn echo on the chain though.
2092 		 */
2093 		sctp_fix_ecn_echo(&stcb->asoc);
2094 	}
2095 get_out:
2096 	if (stcb) {
2097 		SCTP_TCB_UNLOCK(stcb);
2098 	} else if (inp != NULL) {
2099 		SCTP_INP_WUNLOCK(inp);
2100 	} else {
2101 		SCTP_WQ_ADDR_UNLOCK();
2102 	}
2103 
2104 out_decr:
2105 	if (inp) {
2106 		SCTP_INP_DECR_REF(inp);
2107 	}
2108 
2109 out_no_decr:
2110 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
2111 	CURVNET_RESTORE();
2112 	NET_EPOCH_EXIT(et);
2113 }
2114 
2115 /*-
2116  * The following table shows which parameters must be provided
2117  * when calling sctp_timer_start(). For parameters not being
2118  * provided, NULL must be used.
2119  *
2120  * |Name                         |inp |stcb|net |
2121  * |-----------------------------|----|----|----|
2122  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2123  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2124  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2125  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2126  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2127  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2128  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2129  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2130  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2131  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |Yes |
2132  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2133  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2134  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |Yes |
2135  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2136  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2137  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2138  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2139  *
2140  */
2141 
2142 void
2143 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2144     struct sctp_nets *net)
2145 {
2146 	struct sctp_timer *tmr;
2147 	uint32_t to_ticks;
2148 	uint32_t rndval, jitter;
2149 
2150 	tmr = NULL;
2151 	to_ticks = 0;
2152 	if (stcb != NULL) {
2153 		SCTP_TCB_LOCK_ASSERT(stcb);
2154 	} else if (inp != NULL) {
2155 		SCTP_INP_WLOCK_ASSERT(inp);
2156 	} else {
2157 		SCTP_WQ_ADDR_LOCK_ASSERT();
2158 	}
2159 	if (stcb != NULL) {
2160 		/*
2161 		 * Don't restart timer on association that's about to be
2162 		 * killed.
2163 		 */
2164 		if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2165 		    (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2166 			SCTPDBG(SCTP_DEBUG_TIMER2,
2167 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2168 			    t_type, inp, stcb, net);
2169 			return;
2170 		}
2171 		/* Don't restart timer on net that's been removed. */
2172 		if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2173 			SCTPDBG(SCTP_DEBUG_TIMER2,
2174 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2175 			    t_type, inp, stcb, net);
2176 			return;
2177 		}
2178 	}
2179 	switch (t_type) {
2180 	case SCTP_TIMER_TYPE_SEND:
2181 		/* Here we use the RTO timer. */
2182 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2183 #ifdef INVARIANTS
2184 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2185 			    t_type, inp, stcb, net);
2186 #else
2187 			return;
2188 #endif
2189 		}
2190 		tmr = &net->rxt_timer;
2191 		if (net->RTO == 0) {
2192 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2193 		} else {
2194 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2195 		}
2196 		break;
2197 	case SCTP_TIMER_TYPE_INIT:
2198 		/*
2199 		 * Here we use the INIT timer default usually about 1
2200 		 * second.
2201 		 */
2202 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2203 #ifdef INVARIANTS
2204 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2205 			    t_type, inp, stcb, net);
2206 #else
2207 			return;
2208 #endif
2209 		}
2210 		tmr = &net->rxt_timer;
2211 		if (net->RTO == 0) {
2212 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2213 		} else {
2214 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2215 		}
2216 		break;
2217 	case SCTP_TIMER_TYPE_RECV:
2218 		/*
2219 		 * Here we use the Delayed-Ack timer value from the inp,
2220 		 * ususually about 200ms.
2221 		 */
2222 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2223 #ifdef INVARIANTS
2224 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2225 			    t_type, inp, stcb, net);
2226 #else
2227 			return;
2228 #endif
2229 		}
2230 		tmr = &stcb->asoc.dack_timer;
2231 		to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack);
2232 		break;
2233 	case SCTP_TIMER_TYPE_SHUTDOWN:
2234 		/* Here we use the RTO of the destination. */
2235 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2236 #ifdef INVARIANTS
2237 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2238 			    t_type, inp, stcb, net);
2239 #else
2240 			return;
2241 #endif
2242 		}
2243 		tmr = &net->rxt_timer;
2244 		if (net->RTO == 0) {
2245 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2246 		} else {
2247 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2248 		}
2249 		break;
2250 	case SCTP_TIMER_TYPE_HEARTBEAT:
2251 		/*
2252 		 * The net is used here so that we can add in the RTO. Even
2253 		 * though we use a different timer. We also add the HB timer
2254 		 * PLUS a random jitter.
2255 		 */
2256 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2257 #ifdef INVARIANTS
2258 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2259 			    t_type, inp, stcb, net);
2260 #else
2261 			return;
2262 #endif
2263 		}
2264 		if ((net->dest_state & SCTP_ADDR_NOHB) &&
2265 		    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2266 			SCTPDBG(SCTP_DEBUG_TIMER2,
2267 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2268 			    t_type, inp, stcb, net);
2269 			return;
2270 		}
2271 		tmr = &net->hb_timer;
2272 		if (net->RTO == 0) {
2273 			to_ticks = stcb->asoc.initial_rto;
2274 		} else {
2275 			to_ticks = net->RTO;
2276 		}
2277 		rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2278 		jitter = rndval % to_ticks;
2279 		if (jitter >= (to_ticks >> 1)) {
2280 			to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2281 		} else {
2282 			to_ticks = to_ticks - jitter;
2283 		}
2284 		if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2285 		    !(net->dest_state & SCTP_ADDR_PF)) {
2286 			to_ticks += net->heart_beat_delay;
2287 		}
2288 		/*
2289 		 * Now we must convert the to_ticks that are now in ms to
2290 		 * ticks.
2291 		 */
2292 		to_ticks = sctp_msecs_to_ticks(to_ticks);
2293 		break;
2294 	case SCTP_TIMER_TYPE_COOKIE:
2295 		/*
2296 		 * Here we can use the RTO timer from the network since one
2297 		 * RTT was complete. If a retransmission happened then we
2298 		 * will be using the RTO initial value.
2299 		 */
2300 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2301 #ifdef INVARIANTS
2302 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2303 			    t_type, inp, stcb, net);
2304 #else
2305 			return;
2306 #endif
2307 		}
2308 		tmr = &net->rxt_timer;
2309 		if (net->RTO == 0) {
2310 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2311 		} else {
2312 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2313 		}
2314 		break;
2315 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2316 		/*
2317 		 * Nothing needed but the endpoint here ususually about 60
2318 		 * minutes.
2319 		 */
2320 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2321 #ifdef INVARIANTS
2322 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2323 			    t_type, inp, stcb, net);
2324 #else
2325 			return;
2326 #endif
2327 		}
2328 		tmr = &inp->sctp_ep.signature_change;
2329 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2330 		break;
2331 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2332 		/*
2333 		 * Here we use the value found in the EP for PMTUD,
2334 		 * ususually about 10 minutes.
2335 		 */
2336 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2337 #ifdef INVARIANTS
2338 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2339 			    t_type, inp, stcb, net);
2340 #else
2341 			return;
2342 #endif
2343 		}
2344 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2345 			SCTPDBG(SCTP_DEBUG_TIMER2,
2346 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2347 			    t_type, inp, stcb, net);
2348 			return;
2349 		}
2350 		tmr = &net->pmtu_timer;
2351 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2352 		break;
2353 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2354 		/* Here we use the RTO of the destination. */
2355 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2356 #ifdef INVARIANTS
2357 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2358 			    t_type, inp, stcb, net);
2359 #else
2360 			return;
2361 #endif
2362 		}
2363 		tmr = &net->rxt_timer;
2364 		if (net->RTO == 0) {
2365 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2366 		} else {
2367 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2368 		}
2369 		break;
2370 	case SCTP_TIMER_TYPE_ASCONF:
2371 		/*
2372 		 * Here the timer comes from the stcb but its value is from
2373 		 * the net's RTO.
2374 		 */
2375 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2376 #ifdef INVARIANTS
2377 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2378 			    t_type, inp, stcb, net);
2379 #else
2380 			return;
2381 #endif
2382 		}
2383 		tmr = &stcb->asoc.asconf_timer;
2384 		if (net->RTO == 0) {
2385 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2386 		} else {
2387 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2388 		}
2389 		break;
2390 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2391 		/*
2392 		 * Here we use the endpoints shutdown guard timer usually
2393 		 * about 3 minutes.
2394 		 */
2395 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2396 #ifdef INVARIANTS
2397 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2398 			    t_type, inp, stcb, net);
2399 #else
2400 			return;
2401 #endif
2402 		}
2403 		tmr = &stcb->asoc.shut_guard_timer;
2404 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2405 			if (stcb->asoc.maxrto < UINT32_MAX / 5) {
2406 				to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto);
2407 			} else {
2408 				to_ticks = sctp_msecs_to_ticks(UINT32_MAX);
2409 			}
2410 		} else {
2411 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2412 		}
2413 		break;
2414 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2415 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2416 #ifdef INVARIANTS
2417 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2418 			    t_type, inp, stcb, net);
2419 #else
2420 			return;
2421 #endif
2422 		}
2423 		tmr = &stcb->asoc.autoclose_timer;
2424 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2425 		break;
2426 	case SCTP_TIMER_TYPE_STRRESET:
2427 		/*
2428 		 * Here the timer comes from the stcb but its value is from
2429 		 * the net's RTO.
2430 		 */
2431 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2432 #ifdef INVARIANTS
2433 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2434 			    t_type, inp, stcb, net);
2435 #else
2436 			return;
2437 #endif
2438 		}
2439 		tmr = &stcb->asoc.strreset_timer;
2440 		if (net->RTO == 0) {
2441 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2442 		} else {
2443 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2444 		}
2445 		break;
2446 	case SCTP_TIMER_TYPE_INPKILL:
2447 		/*
2448 		 * The inp is setup to die. We re-use the signature_chage
2449 		 * timer since that has stopped and we are in the GONE
2450 		 * state.
2451 		 */
2452 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2453 #ifdef INVARIANTS
2454 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2455 			    t_type, inp, stcb, net);
2456 #else
2457 			return;
2458 #endif
2459 		}
2460 		tmr = &inp->sctp_ep.signature_change;
2461 		to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT);
2462 		break;
2463 	case SCTP_TIMER_TYPE_ASOCKILL:
2464 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2465 #ifdef INVARIANTS
2466 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2467 			    t_type, inp, stcb, net);
2468 #else
2469 			return;
2470 #endif
2471 		}
2472 		tmr = &stcb->asoc.strreset_timer;
2473 		to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT);
2474 		break;
2475 	case SCTP_TIMER_TYPE_ADDR_WQ:
2476 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2477 #ifdef INVARIANTS
2478 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2479 			    t_type, inp, stcb, net);
2480 #else
2481 			return;
2482 #endif
2483 		}
2484 		/* Only 1 tick away :-) */
2485 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2486 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2487 		break;
2488 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2489 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2490 #ifdef INVARIANTS
2491 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2492 			    t_type, inp, stcb, net);
2493 #else
2494 			return;
2495 #endif
2496 		}
2497 		tmr = &stcb->asoc.delete_prim_timer;
2498 		to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2499 		break;
2500 	default:
2501 #ifdef INVARIANTS
2502 		panic("Unknown timer type %d", t_type);
2503 #else
2504 		return;
2505 #endif
2506 	}
2507 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2508 	KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2509 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2510 		/*
2511 		 * We do NOT allow you to have it already running. If it is,
2512 		 * we leave the current one up unchanged.
2513 		 */
2514 		SCTPDBG(SCTP_DEBUG_TIMER2,
2515 		    "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2516 		    t_type, inp, stcb, net);
2517 		return;
2518 	}
2519 	/* At this point we can proceed. */
2520 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2521 		stcb->asoc.num_send_timers_up++;
2522 	}
2523 	tmr->stopped_from = 0;
2524 	tmr->type = t_type;
2525 	tmr->ep = (void *)inp;
2526 	tmr->tcb = (void *)stcb;
2527 	if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2528 		tmr->net = NULL;
2529 	} else {
2530 		tmr->net = (void *)net;
2531 	}
2532 	tmr->self = (void *)tmr;
2533 	tmr->vnet = (void *)curvnet;
2534 	tmr->ticks = sctp_get_tick_count();
2535 	if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2536 		SCTPDBG(SCTP_DEBUG_TIMER2,
2537 		    "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2538 		    t_type, to_ticks, inp, stcb, net);
2539 	} else {
2540 		/*
2541 		 * This should not happen, since we checked for pending
2542 		 * above.
2543 		 */
2544 		SCTPDBG(SCTP_DEBUG_TIMER2,
2545 		    "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2546 		    t_type, to_ticks, inp, stcb, net);
2547 	}
2548 	return;
2549 }
2550 
2551 /*-
2552  * The following table shows which parameters must be provided
2553  * when calling sctp_timer_stop(). For parameters not being
2554  * provided, NULL must be used.
2555  *
2556  * |Name                         |inp |stcb|net |
2557  * |-----------------------------|----|----|----|
2558  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2559  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2560  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2561  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2562  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2563  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2564  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2565  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2566  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2567  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |No  |
2568  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2569  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2570  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |No  |
2571  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2572  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2573  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2574  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2575  *
2576  */
2577 
2578 void
2579 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2580     struct sctp_nets *net, uint32_t from)
2581 {
2582 	struct sctp_timer *tmr;
2583 
2584 	if (stcb != NULL) {
2585 		SCTP_TCB_LOCK_ASSERT(stcb);
2586 	} else if (inp != NULL) {
2587 		SCTP_INP_WLOCK_ASSERT(inp);
2588 	} else {
2589 		SCTP_WQ_ADDR_LOCK_ASSERT();
2590 	}
2591 	tmr = NULL;
2592 	switch (t_type) {
2593 	case SCTP_TIMER_TYPE_SEND:
2594 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2595 #ifdef INVARIANTS
2596 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2597 			    t_type, inp, stcb, net);
2598 #else
2599 			return;
2600 #endif
2601 		}
2602 		tmr = &net->rxt_timer;
2603 		break;
2604 	case SCTP_TIMER_TYPE_INIT:
2605 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2606 #ifdef INVARIANTS
2607 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2608 			    t_type, inp, stcb, net);
2609 #else
2610 			return;
2611 #endif
2612 		}
2613 		tmr = &net->rxt_timer;
2614 		break;
2615 	case SCTP_TIMER_TYPE_RECV:
2616 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2617 #ifdef INVARIANTS
2618 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2619 			    t_type, inp, stcb, net);
2620 #else
2621 			return;
2622 #endif
2623 		}
2624 		tmr = &stcb->asoc.dack_timer;
2625 		break;
2626 	case SCTP_TIMER_TYPE_SHUTDOWN:
2627 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2628 #ifdef INVARIANTS
2629 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2630 			    t_type, inp, stcb, net);
2631 #else
2632 			return;
2633 #endif
2634 		}
2635 		tmr = &net->rxt_timer;
2636 		break;
2637 	case SCTP_TIMER_TYPE_HEARTBEAT:
2638 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2639 #ifdef INVARIANTS
2640 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2641 			    t_type, inp, stcb, net);
2642 #else
2643 			return;
2644 #endif
2645 		}
2646 		tmr = &net->hb_timer;
2647 		break;
2648 	case SCTP_TIMER_TYPE_COOKIE:
2649 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2650 #ifdef INVARIANTS
2651 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2652 			    t_type, inp, stcb, net);
2653 #else
2654 			return;
2655 #endif
2656 		}
2657 		tmr = &net->rxt_timer;
2658 		break;
2659 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2660 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2661 #ifdef INVARIANTS
2662 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2663 			    t_type, inp, stcb, net);
2664 #else
2665 			return;
2666 #endif
2667 		}
2668 		tmr = &inp->sctp_ep.signature_change;
2669 		break;
2670 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2671 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2672 #ifdef INVARIANTS
2673 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2674 			    t_type, inp, stcb, net);
2675 #else
2676 			return;
2677 #endif
2678 		}
2679 		tmr = &net->pmtu_timer;
2680 		break;
2681 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2682 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2683 #ifdef INVARIANTS
2684 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2685 			    t_type, inp, stcb, net);
2686 #else
2687 			return;
2688 #endif
2689 		}
2690 		tmr = &net->rxt_timer;
2691 		break;
2692 	case SCTP_TIMER_TYPE_ASCONF:
2693 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2694 #ifdef INVARIANTS
2695 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2696 			    t_type, inp, stcb, net);
2697 #else
2698 			return;
2699 #endif
2700 		}
2701 		tmr = &stcb->asoc.asconf_timer;
2702 		break;
2703 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2704 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2705 #ifdef INVARIANTS
2706 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2707 			    t_type, inp, stcb, net);
2708 #else
2709 			return;
2710 #endif
2711 		}
2712 		tmr = &stcb->asoc.shut_guard_timer;
2713 		break;
2714 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2715 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2716 #ifdef INVARIANTS
2717 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2718 			    t_type, inp, stcb, net);
2719 #else
2720 			return;
2721 #endif
2722 		}
2723 		tmr = &stcb->asoc.autoclose_timer;
2724 		break;
2725 	case SCTP_TIMER_TYPE_STRRESET:
2726 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2727 #ifdef INVARIANTS
2728 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2729 			    t_type, inp, stcb, net);
2730 #else
2731 			return;
2732 #endif
2733 		}
2734 		tmr = &stcb->asoc.strreset_timer;
2735 		break;
2736 	case SCTP_TIMER_TYPE_INPKILL:
2737 		/*
2738 		 * The inp is setup to die. We re-use the signature_chage
2739 		 * timer since that has stopped and we are in the GONE
2740 		 * state.
2741 		 */
2742 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2743 #ifdef INVARIANTS
2744 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2745 			    t_type, inp, stcb, net);
2746 #else
2747 			return;
2748 #endif
2749 		}
2750 		tmr = &inp->sctp_ep.signature_change;
2751 		break;
2752 	case SCTP_TIMER_TYPE_ASOCKILL:
2753 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2754 #ifdef INVARIANTS
2755 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2756 			    t_type, inp, stcb, net);
2757 #else
2758 			return;
2759 #endif
2760 		}
2761 		tmr = &stcb->asoc.strreset_timer;
2762 		break;
2763 	case SCTP_TIMER_TYPE_ADDR_WQ:
2764 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2765 #ifdef INVARIANTS
2766 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2767 			    t_type, inp, stcb, net);
2768 #else
2769 			return;
2770 #endif
2771 		}
2772 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2773 		break;
2774 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2775 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2776 #ifdef INVARIANTS
2777 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2778 			    t_type, inp, stcb, net);
2779 #else
2780 			return;
2781 #endif
2782 		}
2783 		tmr = &stcb->asoc.delete_prim_timer;
2784 		break;
2785 	default:
2786 #ifdef INVARIANTS
2787 		panic("Unknown timer type %d", t_type);
2788 #else
2789 		return;
2790 #endif
2791 	}
2792 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2793 	if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2794 	    (tmr->type != t_type)) {
2795 		/*
2796 		 * Ok we have a timer that is under joint use. Cookie timer
2797 		 * per chance with the SEND timer. We therefore are NOT
2798 		 * running the timer that the caller wants stopped.  So just
2799 		 * return.
2800 		 */
2801 		SCTPDBG(SCTP_DEBUG_TIMER2,
2802 		    "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2803 		    t_type, inp, stcb, net);
2804 		return;
2805 	}
2806 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2807 		stcb->asoc.num_send_timers_up--;
2808 		if (stcb->asoc.num_send_timers_up < 0) {
2809 			stcb->asoc.num_send_timers_up = 0;
2810 		}
2811 	}
2812 	tmr->self = NULL;
2813 	tmr->stopped_from = from;
2814 	if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2815 		KASSERT(tmr->ep == inp,
2816 		    ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2817 		    t_type, inp, tmr->ep));
2818 		KASSERT(tmr->tcb == stcb,
2819 		    ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2820 		    t_type, stcb, tmr->tcb));
2821 		KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2822 		    ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2823 		    ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2824 		    t_type, net, tmr->net));
2825 		SCTPDBG(SCTP_DEBUG_TIMER2,
2826 		    "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2827 		    t_type, inp, stcb, net);
2828 		tmr->ep = NULL;
2829 		tmr->tcb = NULL;
2830 		tmr->net = NULL;
2831 	} else {
2832 		SCTPDBG(SCTP_DEBUG_TIMER2,
2833 		    "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2834 		    t_type, inp, stcb, net);
2835 	}
2836 	return;
2837 }
2838 
2839 uint32_t
2840 sctp_calculate_len(struct mbuf *m)
2841 {
2842 	uint32_t tlen = 0;
2843 	struct mbuf *at;
2844 
2845 	at = m;
2846 	while (at) {
2847 		tlen += SCTP_BUF_LEN(at);
2848 		at = SCTP_BUF_NEXT(at);
2849 	}
2850 	return (tlen);
2851 }
2852 
2853 void
2854 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2855     struct sctp_association *asoc, uint32_t mtu)
2856 {
2857 	/*
2858 	 * Reset the P-MTU size on this association, this involves changing
2859 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2860 	 * allow the DF flag to be cleared.
2861 	 */
2862 	struct sctp_tmit_chunk *chk;
2863 	unsigned int eff_mtu, ovh;
2864 
2865 	asoc->smallest_mtu = mtu;
2866 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2867 		ovh = SCTP_MIN_OVERHEAD;
2868 	} else {
2869 		ovh = SCTP_MIN_V4_OVERHEAD;
2870 	}
2871 	eff_mtu = mtu - ovh;
2872 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2873 		if (chk->send_size > eff_mtu) {
2874 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2875 		}
2876 	}
2877 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2878 		if (chk->send_size > eff_mtu) {
2879 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2880 		}
2881 	}
2882 }
2883 
2884 
2885 /*
2886  * Given an association and starting time of the current RTT period, update
2887  * RTO in number of msecs. net should point to the current network.
2888  * Return 1, if an RTO update was performed, return 0 if no update was
2889  * performed due to invalid starting point.
2890  */
2891 
2892 int
2893 sctp_calculate_rto(struct sctp_tcb *stcb,
2894     struct sctp_association *asoc,
2895     struct sctp_nets *net,
2896     struct timeval *old,
2897     int rtt_from_sack)
2898 {
2899 	struct timeval now;
2900 	uint64_t rtt_us;	/* RTT in us */
2901 	int32_t rtt;		/* RTT in ms */
2902 	uint32_t new_rto;
2903 	int first_measure = 0;
2904 
2905 	/************************/
2906 	/* 1. calculate new RTT */
2907 	/************************/
2908 	/* get the current time */
2909 	if (stcb->asoc.use_precise_time) {
2910 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2911 	} else {
2912 		(void)SCTP_GETTIME_TIMEVAL(&now);
2913 	}
2914 	if ((old->tv_sec > now.tv_sec) ||
2915 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2916 		/* The starting point is in the future. */
2917 		return (0);
2918 	}
2919 	timevalsub(&now, old);
2920 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2921 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2922 		/* The RTT is larger than a sane value. */
2923 		return (0);
2924 	}
2925 	/* store the current RTT in us */
2926 	net->rtt = rtt_us;
2927 	/* compute rtt in ms */
2928 	rtt = (int32_t)(net->rtt / 1000);
2929 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2930 		/*
2931 		 * Tell the CC module that a new update has just occurred
2932 		 * from a sack
2933 		 */
2934 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2935 	}
2936 	/*
2937 	 * Do we need to determine the lan? We do this only on sacks i.e.
2938 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2939 	 */
2940 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2941 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2942 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2943 			net->lan_type = SCTP_LAN_INTERNET;
2944 		} else {
2945 			net->lan_type = SCTP_LAN_LOCAL;
2946 		}
2947 	}
2948 
2949 	/***************************/
2950 	/* 2. update RTTVAR & SRTT */
2951 	/***************************/
2952 	/*-
2953 	 * Compute the scaled average lastsa and the
2954 	 * scaled variance lastsv as described in van Jacobson
2955 	 * Paper "Congestion Avoidance and Control", Annex A.
2956 	 *
2957 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2958 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2959 	 */
2960 	if (net->RTO_measured) {
2961 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2962 		net->lastsa += rtt;
2963 		if (rtt < 0) {
2964 			rtt = -rtt;
2965 		}
2966 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2967 		net->lastsv += rtt;
2968 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2969 			rto_logging(net, SCTP_LOG_RTTVAR);
2970 		}
2971 	} else {
2972 		/* First RTO measurment */
2973 		net->RTO_measured = 1;
2974 		first_measure = 1;
2975 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2976 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2977 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2978 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2979 		}
2980 	}
2981 	if (net->lastsv == 0) {
2982 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2983 	}
2984 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2985 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2986 	    (stcb->asoc.sat_network_lockout == 0)) {
2987 		stcb->asoc.sat_network = 1;
2988 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2989 		stcb->asoc.sat_network = 0;
2990 		stcb->asoc.sat_network_lockout = 1;
2991 	}
2992 	/* bound it, per C6/C7 in Section 5.3.1 */
2993 	if (new_rto < stcb->asoc.minrto) {
2994 		new_rto = stcb->asoc.minrto;
2995 	}
2996 	if (new_rto > stcb->asoc.maxrto) {
2997 		new_rto = stcb->asoc.maxrto;
2998 	}
2999 	net->RTO = new_rto;
3000 	return (1);
3001 }
3002 
3003 /*
3004  * return a pointer to a contiguous piece of data from the given mbuf chain
3005  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
3006  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
3007  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
3008  */
3009 caddr_t
3010 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
3011 {
3012 	uint32_t count;
3013 	uint8_t *ptr;
3014 
3015 	ptr = in_ptr;
3016 	if ((off < 0) || (len <= 0))
3017 		return (NULL);
3018 
3019 	/* find the desired start location */
3020 	while ((m != NULL) && (off > 0)) {
3021 		if (off < SCTP_BUF_LEN(m))
3022 			break;
3023 		off -= SCTP_BUF_LEN(m);
3024 		m = SCTP_BUF_NEXT(m);
3025 	}
3026 	if (m == NULL)
3027 		return (NULL);
3028 
3029 	/* is the current mbuf large enough (eg. contiguous)? */
3030 	if ((SCTP_BUF_LEN(m) - off) >= len) {
3031 		return (mtod(m, caddr_t)+off);
3032 	} else {
3033 		/* else, it spans more than one mbuf, so save a temp copy... */
3034 		while ((m != NULL) && (len > 0)) {
3035 			count = min(SCTP_BUF_LEN(m) - off, len);
3036 			memcpy(ptr, mtod(m, caddr_t)+off, count);
3037 			len -= count;
3038 			ptr += count;
3039 			off = 0;
3040 			m = SCTP_BUF_NEXT(m);
3041 		}
3042 		if ((m == NULL) && (len > 0))
3043 			return (NULL);
3044 		else
3045 			return ((caddr_t)in_ptr);
3046 	}
3047 }
3048 
3049 
3050 
3051 struct sctp_paramhdr *
3052 sctp_get_next_param(struct mbuf *m,
3053     int offset,
3054     struct sctp_paramhdr *pull,
3055     int pull_limit)
3056 {
3057 	/* This just provides a typed signature to Peter's Pull routine */
3058 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
3059 	    (uint8_t *)pull));
3060 }
3061 
3062 
3063 struct mbuf *
3064 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
3065 {
3066 	struct mbuf *m_last;
3067 	caddr_t dp;
3068 
3069 	if (padlen > 3) {
3070 		return (NULL);
3071 	}
3072 	if (padlen <= M_TRAILINGSPACE(m)) {
3073 		/*
3074 		 * The easy way. We hope the majority of the time we hit
3075 		 * here :)
3076 		 */
3077 		m_last = m;
3078 	} else {
3079 		/* Hard way we must grow the mbuf chain */
3080 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3081 		if (m_last == NULL) {
3082 			return (NULL);
3083 		}
3084 		SCTP_BUF_LEN(m_last) = 0;
3085 		SCTP_BUF_NEXT(m_last) = NULL;
3086 		SCTP_BUF_NEXT(m) = m_last;
3087 	}
3088 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
3089 	SCTP_BUF_LEN(m_last) += padlen;
3090 	memset(dp, 0, padlen);
3091 	return (m_last);
3092 }
3093 
3094 struct mbuf *
3095 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3096 {
3097 	/* find the last mbuf in chain and pad it */
3098 	struct mbuf *m_at;
3099 
3100 	if (last_mbuf != NULL) {
3101 		return (sctp_add_pad_tombuf(last_mbuf, padval));
3102 	} else {
3103 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3104 			if (SCTP_BUF_NEXT(m_at) == NULL) {
3105 				return (sctp_add_pad_tombuf(m_at, padval));
3106 			}
3107 		}
3108 	}
3109 	return (NULL);
3110 }
3111 
3112 static void
3113 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3114     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
3115 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3116     SCTP_UNUSED
3117 #endif
3118 )
3119 {
3120 	struct mbuf *m_notify;
3121 	struct sctp_assoc_change *sac;
3122 	struct sctp_queued_to_read *control;
3123 	unsigned int notif_len;
3124 	uint16_t abort_len;
3125 	unsigned int i;
3126 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3127 	struct socket *so;
3128 #endif
3129 
3130 	if (stcb == NULL) {
3131 		return;
3132 	}
3133 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3134 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3135 		if (abort != NULL) {
3136 			abort_len = ntohs(abort->ch.chunk_length);
3137 			/*
3138 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3139 			 * contiguous.
3140 			 */
3141 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3142 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
3143 			}
3144 		} else {
3145 			abort_len = 0;
3146 		}
3147 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3148 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3149 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3150 			notif_len += abort_len;
3151 		}
3152 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3153 		if (m_notify == NULL) {
3154 			/* Retry with smaller value. */
3155 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3156 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3157 			if (m_notify == NULL) {
3158 				goto set_error;
3159 			}
3160 		}
3161 		SCTP_BUF_NEXT(m_notify) = NULL;
3162 		sac = mtod(m_notify, struct sctp_assoc_change *);
3163 		memset(sac, 0, notif_len);
3164 		sac->sac_type = SCTP_ASSOC_CHANGE;
3165 		sac->sac_flags = 0;
3166 		sac->sac_length = sizeof(struct sctp_assoc_change);
3167 		sac->sac_state = state;
3168 		sac->sac_error = error;
3169 		/* XXX verify these stream counts */
3170 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3171 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
3172 		sac->sac_assoc_id = sctp_get_associd(stcb);
3173 		if (notif_len > sizeof(struct sctp_assoc_change)) {
3174 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3175 				i = 0;
3176 				if (stcb->asoc.prsctp_supported == 1) {
3177 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3178 				}
3179 				if (stcb->asoc.auth_supported == 1) {
3180 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3181 				}
3182 				if (stcb->asoc.asconf_supported == 1) {
3183 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3184 				}
3185 				if (stcb->asoc.idata_supported == 1) {
3186 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3187 				}
3188 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3189 				if (stcb->asoc.reconfig_supported == 1) {
3190 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3191 				}
3192 				sac->sac_length += i;
3193 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3194 				memcpy(sac->sac_info, abort, abort_len);
3195 				sac->sac_length += abort_len;
3196 			}
3197 		}
3198 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
3199 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3200 		    0, 0, stcb->asoc.context, 0, 0, 0,
3201 		    m_notify);
3202 		if (control != NULL) {
3203 			control->length = SCTP_BUF_LEN(m_notify);
3204 			control->spec_flags = M_NOTIFICATION;
3205 			/* not that we need this */
3206 			control->tail_mbuf = m_notify;
3207 			sctp_add_to_readq(stcb->sctp_ep, stcb,
3208 			    control,
3209 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3210 			    so_locked);
3211 		} else {
3212 			sctp_m_freem(m_notify);
3213 		}
3214 	}
3215 	/*
3216 	 * For 1-to-1 style sockets, we send up and error when an ABORT
3217 	 * comes in.
3218 	 */
3219 set_error:
3220 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3221 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3222 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3223 		SOCK_LOCK(stcb->sctp_socket);
3224 		if (from_peer) {
3225 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3226 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3227 				stcb->sctp_socket->so_error = ECONNREFUSED;
3228 			} else {
3229 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3230 				stcb->sctp_socket->so_error = ECONNRESET;
3231 			}
3232 		} else {
3233 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3234 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3235 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3236 				stcb->sctp_socket->so_error = ETIMEDOUT;
3237 			} else {
3238 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3239 				stcb->sctp_socket->so_error = ECONNABORTED;
3240 			}
3241 		}
3242 		SOCK_UNLOCK(stcb->sctp_socket);
3243 	}
3244 	/* Wake ANY sleepers */
3245 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3246 	so = SCTP_INP_SO(stcb->sctp_ep);
3247 	if (!so_locked) {
3248 		atomic_add_int(&stcb->asoc.refcnt, 1);
3249 		SCTP_TCB_UNLOCK(stcb);
3250 		SCTP_SOCKET_LOCK(so, 1);
3251 		SCTP_TCB_LOCK(stcb);
3252 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3253 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3254 			SCTP_SOCKET_UNLOCK(so, 1);
3255 			return;
3256 		}
3257 	}
3258 #endif
3259 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3260 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3261 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3262 		socantrcvmore(stcb->sctp_socket);
3263 	}
3264 	sorwakeup(stcb->sctp_socket);
3265 	sowwakeup(stcb->sctp_socket);
3266 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3267 	if (!so_locked) {
3268 		SCTP_SOCKET_UNLOCK(so, 1);
3269 	}
3270 #endif
3271 }
3272 
3273 static void
3274 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3275     struct sockaddr *sa, uint32_t error, int so_locked
3276 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3277     SCTP_UNUSED
3278 #endif
3279 )
3280 {
3281 	struct mbuf *m_notify;
3282 	struct sctp_paddr_change *spc;
3283 	struct sctp_queued_to_read *control;
3284 
3285 	if ((stcb == NULL) ||
3286 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3287 		/* event not enabled */
3288 		return;
3289 	}
3290 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3291 	if (m_notify == NULL)
3292 		return;
3293 	SCTP_BUF_LEN(m_notify) = 0;
3294 	spc = mtod(m_notify, struct sctp_paddr_change *);
3295 	memset(spc, 0, sizeof(struct sctp_paddr_change));
3296 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3297 	spc->spc_flags = 0;
3298 	spc->spc_length = sizeof(struct sctp_paddr_change);
3299 	switch (sa->sa_family) {
3300 #ifdef INET
3301 	case AF_INET:
3302 #ifdef INET6
3303 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3304 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3305 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
3306 		} else {
3307 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3308 		}
3309 #else
3310 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3311 #endif
3312 		break;
3313 #endif
3314 #ifdef INET6
3315 	case AF_INET6:
3316 		{
3317 			struct sockaddr_in6 *sin6;
3318 
3319 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3320 
3321 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3322 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3323 				if (sin6->sin6_scope_id == 0) {
3324 					/* recover scope_id for user */
3325 					(void)sa6_recoverscope(sin6);
3326 				} else {
3327 					/* clear embedded scope_id for user */
3328 					in6_clearscope(&sin6->sin6_addr);
3329 				}
3330 			}
3331 			break;
3332 		}
3333 #endif
3334 	default:
3335 		/* TSNH */
3336 		break;
3337 	}
3338 	spc->spc_state = state;
3339 	spc->spc_error = error;
3340 	spc->spc_assoc_id = sctp_get_associd(stcb);
3341 
3342 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3343 	SCTP_BUF_NEXT(m_notify) = NULL;
3344 
3345 	/* append to socket */
3346 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3347 	    0, 0, stcb->asoc.context, 0, 0, 0,
3348 	    m_notify);
3349 	if (control == NULL) {
3350 		/* no memory */
3351 		sctp_m_freem(m_notify);
3352 		return;
3353 	}
3354 	control->length = SCTP_BUF_LEN(m_notify);
3355 	control->spec_flags = M_NOTIFICATION;
3356 	/* not that we need this */
3357 	control->tail_mbuf = m_notify;
3358 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3359 	    control,
3360 	    &stcb->sctp_socket->so_rcv, 1,
3361 	    SCTP_READ_LOCK_NOT_HELD,
3362 	    so_locked);
3363 }
3364 
3365 
3366 static void
3367 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3368     struct sctp_tmit_chunk *chk, int so_locked
3369 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3370     SCTP_UNUSED
3371 #endif
3372 )
3373 {
3374 	struct mbuf *m_notify;
3375 	struct sctp_send_failed *ssf;
3376 	struct sctp_send_failed_event *ssfe;
3377 	struct sctp_queued_to_read *control;
3378 	struct sctp_chunkhdr *chkhdr;
3379 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3380 
3381 	if ((stcb == NULL) ||
3382 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3383 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3384 		/* event not enabled */
3385 		return;
3386 	}
3387 
3388 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3389 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3390 	} else {
3391 		notifhdr_len = sizeof(struct sctp_send_failed);
3392 	}
3393 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3394 	if (m_notify == NULL)
3395 		/* no space left */
3396 		return;
3397 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3398 	if (stcb->asoc.idata_supported) {
3399 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3400 	} else {
3401 		chkhdr_len = sizeof(struct sctp_data_chunk);
3402 	}
3403 	/* Use some defaults in case we can't access the chunk header */
3404 	if (chk->send_size >= chkhdr_len) {
3405 		payload_len = chk->send_size - chkhdr_len;
3406 	} else {
3407 		payload_len = 0;
3408 	}
3409 	padding_len = 0;
3410 	if (chk->data != NULL) {
3411 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3412 		if (chkhdr != NULL) {
3413 			chk_len = ntohs(chkhdr->chunk_length);
3414 			if ((chk_len >= chkhdr_len) &&
3415 			    (chk->send_size >= chk_len) &&
3416 			    (chk->send_size - chk_len < 4)) {
3417 				padding_len = chk->send_size - chk_len;
3418 				payload_len = chk->send_size - chkhdr_len - padding_len;
3419 			}
3420 		}
3421 	}
3422 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3423 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3424 		memset(ssfe, 0, notifhdr_len);
3425 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3426 		if (sent) {
3427 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3428 		} else {
3429 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3430 		}
3431 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3432 		ssfe->ssfe_error = error;
3433 		/* not exactly what the user sent in, but should be close :) */
3434 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3435 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3436 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3437 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3438 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3439 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3440 	} else {
3441 		ssf = mtod(m_notify, struct sctp_send_failed *);
3442 		memset(ssf, 0, notifhdr_len);
3443 		ssf->ssf_type = SCTP_SEND_FAILED;
3444 		if (sent) {
3445 			ssf->ssf_flags = SCTP_DATA_SENT;
3446 		} else {
3447 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3448 		}
3449 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3450 		ssf->ssf_error = error;
3451 		/* not exactly what the user sent in, but should be close :) */
3452 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3453 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3454 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3455 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3456 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3457 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3458 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3459 	}
3460 	if (chk->data != NULL) {
3461 		/* Trim off the sctp chunk header (it should be there) */
3462 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3463 			m_adj(chk->data, chkhdr_len);
3464 			m_adj(chk->data, -padding_len);
3465 			sctp_mbuf_crush(chk->data);
3466 			chk->send_size -= (chkhdr_len + padding_len);
3467 		}
3468 	}
3469 	SCTP_BUF_NEXT(m_notify) = chk->data;
3470 	/* Steal off the mbuf */
3471 	chk->data = NULL;
3472 	/*
3473 	 * For this case, we check the actual socket buffer, since the assoc
3474 	 * is going away we don't want to overfill the socket buffer for a
3475 	 * non-reader
3476 	 */
3477 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3478 		sctp_m_freem(m_notify);
3479 		return;
3480 	}
3481 	/* append to socket */
3482 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3483 	    0, 0, stcb->asoc.context, 0, 0, 0,
3484 	    m_notify);
3485 	if (control == NULL) {
3486 		/* no memory */
3487 		sctp_m_freem(m_notify);
3488 		return;
3489 	}
3490 	control->length = SCTP_BUF_LEN(m_notify);
3491 	control->spec_flags = M_NOTIFICATION;
3492 	/* not that we need this */
3493 	control->tail_mbuf = m_notify;
3494 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3495 	    control,
3496 	    &stcb->sctp_socket->so_rcv, 1,
3497 	    SCTP_READ_LOCK_NOT_HELD,
3498 	    so_locked);
3499 }
3500 
3501 
3502 static void
3503 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3504     struct sctp_stream_queue_pending *sp, int so_locked
3505 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3506     SCTP_UNUSED
3507 #endif
3508 )
3509 {
3510 	struct mbuf *m_notify;
3511 	struct sctp_send_failed *ssf;
3512 	struct sctp_send_failed_event *ssfe;
3513 	struct sctp_queued_to_read *control;
3514 	int notifhdr_len;
3515 
3516 	if ((stcb == NULL) ||
3517 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3518 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3519 		/* event not enabled */
3520 		return;
3521 	}
3522 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3523 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3524 	} else {
3525 		notifhdr_len = sizeof(struct sctp_send_failed);
3526 	}
3527 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3528 	if (m_notify == NULL) {
3529 		/* no space left */
3530 		return;
3531 	}
3532 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3533 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3534 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3535 		memset(ssfe, 0, notifhdr_len);
3536 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3537 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3538 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3539 		ssfe->ssfe_error = error;
3540 		/* not exactly what the user sent in, but should be close :) */
3541 		ssfe->ssfe_info.snd_sid = sp->sid;
3542 		if (sp->some_taken) {
3543 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3544 		} else {
3545 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3546 		}
3547 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3548 		ssfe->ssfe_info.snd_context = sp->context;
3549 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3550 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3551 	} else {
3552 		ssf = mtod(m_notify, struct sctp_send_failed *);
3553 		memset(ssf, 0, notifhdr_len);
3554 		ssf->ssf_type = SCTP_SEND_FAILED;
3555 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3556 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3557 		ssf->ssf_error = error;
3558 		/* not exactly what the user sent in, but should be close :) */
3559 		ssf->ssf_info.sinfo_stream = sp->sid;
3560 		ssf->ssf_info.sinfo_ssn = 0;
3561 		if (sp->some_taken) {
3562 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3563 		} else {
3564 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3565 		}
3566 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3567 		ssf->ssf_info.sinfo_context = sp->context;
3568 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3569 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3570 	}
3571 	SCTP_BUF_NEXT(m_notify) = sp->data;
3572 
3573 	/* Steal off the mbuf */
3574 	sp->data = NULL;
3575 	/*
3576 	 * For this case, we check the actual socket buffer, since the assoc
3577 	 * is going away we don't want to overfill the socket buffer for a
3578 	 * non-reader
3579 	 */
3580 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3581 		sctp_m_freem(m_notify);
3582 		return;
3583 	}
3584 	/* append to socket */
3585 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3586 	    0, 0, stcb->asoc.context, 0, 0, 0,
3587 	    m_notify);
3588 	if (control == NULL) {
3589 		/* no memory */
3590 		sctp_m_freem(m_notify);
3591 		return;
3592 	}
3593 	control->length = SCTP_BUF_LEN(m_notify);
3594 	control->spec_flags = M_NOTIFICATION;
3595 	/* not that we need this */
3596 	control->tail_mbuf = m_notify;
3597 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3598 	    control,
3599 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3600 }
3601 
3602 
3603 
3604 static void
3605 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3606 {
3607 	struct mbuf *m_notify;
3608 	struct sctp_adaptation_event *sai;
3609 	struct sctp_queued_to_read *control;
3610 
3611 	if ((stcb == NULL) ||
3612 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3613 		/* event not enabled */
3614 		return;
3615 	}
3616 
3617 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3618 	if (m_notify == NULL)
3619 		/* no space left */
3620 		return;
3621 	SCTP_BUF_LEN(m_notify) = 0;
3622 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3623 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3624 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3625 	sai->sai_flags = 0;
3626 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3627 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3628 	sai->sai_assoc_id = sctp_get_associd(stcb);
3629 
3630 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3631 	SCTP_BUF_NEXT(m_notify) = NULL;
3632 
3633 	/* append to socket */
3634 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3635 	    0, 0, stcb->asoc.context, 0, 0, 0,
3636 	    m_notify);
3637 	if (control == NULL) {
3638 		/* no memory */
3639 		sctp_m_freem(m_notify);
3640 		return;
3641 	}
3642 	control->length = SCTP_BUF_LEN(m_notify);
3643 	control->spec_flags = M_NOTIFICATION;
3644 	/* not that we need this */
3645 	control->tail_mbuf = m_notify;
3646 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3647 	    control,
3648 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3649 }
3650 
3651 /* This always must be called with the read-queue LOCKED in the INP */
3652 static void
3653 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3654     uint32_t val, int so_locked
3655 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3656     SCTP_UNUSED
3657 #endif
3658 )
3659 {
3660 	struct mbuf *m_notify;
3661 	struct sctp_pdapi_event *pdapi;
3662 	struct sctp_queued_to_read *control;
3663 	struct sockbuf *sb;
3664 
3665 	if ((stcb == NULL) ||
3666 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3667 		/* event not enabled */
3668 		return;
3669 	}
3670 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3671 		return;
3672 	}
3673 
3674 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3675 	if (m_notify == NULL)
3676 		/* no space left */
3677 		return;
3678 	SCTP_BUF_LEN(m_notify) = 0;
3679 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3680 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3681 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3682 	pdapi->pdapi_flags = 0;
3683 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3684 	pdapi->pdapi_indication = error;
3685 	pdapi->pdapi_stream = (val >> 16);
3686 	pdapi->pdapi_seq = (val & 0x0000ffff);
3687 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3688 
3689 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3690 	SCTP_BUF_NEXT(m_notify) = NULL;
3691 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3692 	    0, 0, stcb->asoc.context, 0, 0, 0,
3693 	    m_notify);
3694 	if (control == NULL) {
3695 		/* no memory */
3696 		sctp_m_freem(m_notify);
3697 		return;
3698 	}
3699 	control->length = SCTP_BUF_LEN(m_notify);
3700 	control->spec_flags = M_NOTIFICATION;
3701 	/* not that we need this */
3702 	control->tail_mbuf = m_notify;
3703 	sb = &stcb->sctp_socket->so_rcv;
3704 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3705 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3706 	}
3707 	sctp_sballoc(stcb, sb, m_notify);
3708 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3709 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3710 	}
3711 	control->end_added = 1;
3712 	if (stcb->asoc.control_pdapi)
3713 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3714 	else {
3715 		/* we really should not see this case */
3716 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3717 	}
3718 	if (stcb->sctp_ep && stcb->sctp_socket) {
3719 		/* This should always be the case */
3720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3721 		struct socket *so;
3722 
3723 		so = SCTP_INP_SO(stcb->sctp_ep);
3724 		if (!so_locked) {
3725 			atomic_add_int(&stcb->asoc.refcnt, 1);
3726 			SCTP_TCB_UNLOCK(stcb);
3727 			SCTP_SOCKET_LOCK(so, 1);
3728 			SCTP_TCB_LOCK(stcb);
3729 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3730 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3731 				SCTP_SOCKET_UNLOCK(so, 1);
3732 				return;
3733 			}
3734 		}
3735 #endif
3736 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3737 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3738 		if (!so_locked) {
3739 			SCTP_SOCKET_UNLOCK(so, 1);
3740 		}
3741 #endif
3742 	}
3743 }
3744 
3745 static void
3746 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3747 {
3748 	struct mbuf *m_notify;
3749 	struct sctp_shutdown_event *sse;
3750 	struct sctp_queued_to_read *control;
3751 
3752 	/*
3753 	 * For TCP model AND UDP connected sockets we will send an error up
3754 	 * when an SHUTDOWN completes
3755 	 */
3756 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3757 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3758 		/* mark socket closed for read/write and wakeup! */
3759 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3760 		struct socket *so;
3761 
3762 		so = SCTP_INP_SO(stcb->sctp_ep);
3763 		atomic_add_int(&stcb->asoc.refcnt, 1);
3764 		SCTP_TCB_UNLOCK(stcb);
3765 		SCTP_SOCKET_LOCK(so, 1);
3766 		SCTP_TCB_LOCK(stcb);
3767 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3768 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3769 			SCTP_SOCKET_UNLOCK(so, 1);
3770 			return;
3771 		}
3772 #endif
3773 		socantsendmore(stcb->sctp_socket);
3774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3775 		SCTP_SOCKET_UNLOCK(so, 1);
3776 #endif
3777 	}
3778 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3779 		/* event not enabled */
3780 		return;
3781 	}
3782 
3783 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3784 	if (m_notify == NULL)
3785 		/* no space left */
3786 		return;
3787 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3788 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3789 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3790 	sse->sse_flags = 0;
3791 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3792 	sse->sse_assoc_id = sctp_get_associd(stcb);
3793 
3794 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3795 	SCTP_BUF_NEXT(m_notify) = NULL;
3796 
3797 	/* append to socket */
3798 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3799 	    0, 0, stcb->asoc.context, 0, 0, 0,
3800 	    m_notify);
3801 	if (control == NULL) {
3802 		/* no memory */
3803 		sctp_m_freem(m_notify);
3804 		return;
3805 	}
3806 	control->length = SCTP_BUF_LEN(m_notify);
3807 	control->spec_flags = M_NOTIFICATION;
3808 	/* not that we need this */
3809 	control->tail_mbuf = m_notify;
3810 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3811 	    control,
3812 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3813 }
3814 
3815 static void
3816 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3817     int so_locked
3818 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3819     SCTP_UNUSED
3820 #endif
3821 )
3822 {
3823 	struct mbuf *m_notify;
3824 	struct sctp_sender_dry_event *event;
3825 	struct sctp_queued_to_read *control;
3826 
3827 	if ((stcb == NULL) ||
3828 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3829 		/* event not enabled */
3830 		return;
3831 	}
3832 
3833 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3834 	if (m_notify == NULL) {
3835 		/* no space left */
3836 		return;
3837 	}
3838 	SCTP_BUF_LEN(m_notify) = 0;
3839 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3840 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3841 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3842 	event->sender_dry_flags = 0;
3843 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3844 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3845 
3846 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3847 	SCTP_BUF_NEXT(m_notify) = NULL;
3848 
3849 	/* append to socket */
3850 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3851 	    0, 0, stcb->asoc.context, 0, 0, 0,
3852 	    m_notify);
3853 	if (control == NULL) {
3854 		/* no memory */
3855 		sctp_m_freem(m_notify);
3856 		return;
3857 	}
3858 	control->length = SCTP_BUF_LEN(m_notify);
3859 	control->spec_flags = M_NOTIFICATION;
3860 	/* not that we need this */
3861 	control->tail_mbuf = m_notify;
3862 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3863 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3864 }
3865 
3866 
3867 void
3868 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3869 {
3870 	struct mbuf *m_notify;
3871 	struct sctp_queued_to_read *control;
3872 	struct sctp_stream_change_event *stradd;
3873 
3874 	if ((stcb == NULL) ||
3875 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3876 		/* event not enabled */
3877 		return;
3878 	}
3879 	if ((stcb->asoc.peer_req_out) && flag) {
3880 		/* Peer made the request, don't tell the local user */
3881 		stcb->asoc.peer_req_out = 0;
3882 		return;
3883 	}
3884 	stcb->asoc.peer_req_out = 0;
3885 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3886 	if (m_notify == NULL)
3887 		/* no space left */
3888 		return;
3889 	SCTP_BUF_LEN(m_notify) = 0;
3890 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3891 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3892 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3893 	stradd->strchange_flags = flag;
3894 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3895 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3896 	stradd->strchange_instrms = numberin;
3897 	stradd->strchange_outstrms = numberout;
3898 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3899 	SCTP_BUF_NEXT(m_notify) = NULL;
3900 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3901 		/* no space */
3902 		sctp_m_freem(m_notify);
3903 		return;
3904 	}
3905 	/* append to socket */
3906 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3907 	    0, 0, stcb->asoc.context, 0, 0, 0,
3908 	    m_notify);
3909 	if (control == NULL) {
3910 		/* no memory */
3911 		sctp_m_freem(m_notify);
3912 		return;
3913 	}
3914 	control->length = SCTP_BUF_LEN(m_notify);
3915 	control->spec_flags = M_NOTIFICATION;
3916 	/* not that we need this */
3917 	control->tail_mbuf = m_notify;
3918 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3919 	    control,
3920 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3921 }
3922 
3923 void
3924 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3925 {
3926 	struct mbuf *m_notify;
3927 	struct sctp_queued_to_read *control;
3928 	struct sctp_assoc_reset_event *strasoc;
3929 
3930 	if ((stcb == NULL) ||
3931 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3932 		/* event not enabled */
3933 		return;
3934 	}
3935 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3936 	if (m_notify == NULL)
3937 		/* no space left */
3938 		return;
3939 	SCTP_BUF_LEN(m_notify) = 0;
3940 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3941 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3942 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3943 	strasoc->assocreset_flags = flag;
3944 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3945 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3946 	strasoc->assocreset_local_tsn = sending_tsn;
3947 	strasoc->assocreset_remote_tsn = recv_tsn;
3948 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3949 	SCTP_BUF_NEXT(m_notify) = NULL;
3950 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3951 		/* no space */
3952 		sctp_m_freem(m_notify);
3953 		return;
3954 	}
3955 	/* append to socket */
3956 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3957 	    0, 0, stcb->asoc.context, 0, 0, 0,
3958 	    m_notify);
3959 	if (control == NULL) {
3960 		/* no memory */
3961 		sctp_m_freem(m_notify);
3962 		return;
3963 	}
3964 	control->length = SCTP_BUF_LEN(m_notify);
3965 	control->spec_flags = M_NOTIFICATION;
3966 	/* not that we need this */
3967 	control->tail_mbuf = m_notify;
3968 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3969 	    control,
3970 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3971 }
3972 
3973 
3974 
3975 static void
3976 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3977     int number_entries, uint16_t *list, int flag)
3978 {
3979 	struct mbuf *m_notify;
3980 	struct sctp_queued_to_read *control;
3981 	struct sctp_stream_reset_event *strreset;
3982 	int len;
3983 
3984 	if ((stcb == NULL) ||
3985 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3986 		/* event not enabled */
3987 		return;
3988 	}
3989 
3990 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3991 	if (m_notify == NULL)
3992 		/* no space left */
3993 		return;
3994 	SCTP_BUF_LEN(m_notify) = 0;
3995 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3996 	if (len > M_TRAILINGSPACE(m_notify)) {
3997 		/* never enough room */
3998 		sctp_m_freem(m_notify);
3999 		return;
4000 	}
4001 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
4002 	memset(strreset, 0, len);
4003 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
4004 	strreset->strreset_flags = flag;
4005 	strreset->strreset_length = len;
4006 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
4007 	if (number_entries) {
4008 		int i;
4009 
4010 		for (i = 0; i < number_entries; i++) {
4011 			strreset->strreset_stream_list[i] = ntohs(list[i]);
4012 		}
4013 	}
4014 	SCTP_BUF_LEN(m_notify) = len;
4015 	SCTP_BUF_NEXT(m_notify) = NULL;
4016 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4017 		/* no space */
4018 		sctp_m_freem(m_notify);
4019 		return;
4020 	}
4021 	/* append to socket */
4022 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4023 	    0, 0, stcb->asoc.context, 0, 0, 0,
4024 	    m_notify);
4025 	if (control == NULL) {
4026 		/* no memory */
4027 		sctp_m_freem(m_notify);
4028 		return;
4029 	}
4030 	control->length = SCTP_BUF_LEN(m_notify);
4031 	control->spec_flags = M_NOTIFICATION;
4032 	/* not that we need this */
4033 	control->tail_mbuf = m_notify;
4034 	sctp_add_to_readq(stcb->sctp_ep, stcb,
4035 	    control,
4036 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4037 }
4038 
4039 
4040 static void
4041 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
4042 {
4043 	struct mbuf *m_notify;
4044 	struct sctp_remote_error *sre;
4045 	struct sctp_queued_to_read *control;
4046 	unsigned int notif_len;
4047 	uint16_t chunk_len;
4048 
4049 	if ((stcb == NULL) ||
4050 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
4051 		return;
4052 	}
4053 	if (chunk != NULL) {
4054 		chunk_len = ntohs(chunk->ch.chunk_length);
4055 		/*
4056 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
4057 		 * contiguous.
4058 		 */
4059 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
4060 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
4061 		}
4062 	} else {
4063 		chunk_len = 0;
4064 	}
4065 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
4066 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4067 	if (m_notify == NULL) {
4068 		/* Retry with smaller value. */
4069 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
4070 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4071 		if (m_notify == NULL) {
4072 			return;
4073 		}
4074 	}
4075 	SCTP_BUF_NEXT(m_notify) = NULL;
4076 	sre = mtod(m_notify, struct sctp_remote_error *);
4077 	memset(sre, 0, notif_len);
4078 	sre->sre_type = SCTP_REMOTE_ERROR;
4079 	sre->sre_flags = 0;
4080 	sre->sre_length = sizeof(struct sctp_remote_error);
4081 	sre->sre_error = error;
4082 	sre->sre_assoc_id = sctp_get_associd(stcb);
4083 	if (notif_len > sizeof(struct sctp_remote_error)) {
4084 		memcpy(sre->sre_data, chunk, chunk_len);
4085 		sre->sre_length += chunk_len;
4086 	}
4087 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
4088 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4089 	    0, 0, stcb->asoc.context, 0, 0, 0,
4090 	    m_notify);
4091 	if (control != NULL) {
4092 		control->length = SCTP_BUF_LEN(m_notify);
4093 		control->spec_flags = M_NOTIFICATION;
4094 		/* not that we need this */
4095 		control->tail_mbuf = m_notify;
4096 		sctp_add_to_readq(stcb->sctp_ep, stcb,
4097 		    control,
4098 		    &stcb->sctp_socket->so_rcv, 1,
4099 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4100 	} else {
4101 		sctp_m_freem(m_notify);
4102 	}
4103 }
4104 
4105 
4106 void
4107 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4108     uint32_t error, void *data, int so_locked
4109 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4110     SCTP_UNUSED
4111 #endif
4112 )
4113 {
4114 	if ((stcb == NULL) ||
4115 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4116 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4117 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4118 		/* If the socket is gone we are out of here */
4119 		return;
4120 	}
4121 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4122 		return;
4123 	}
4124 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4125 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4126 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4127 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4128 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4129 			/* Don't report these in front states */
4130 			return;
4131 		}
4132 	}
4133 	switch (notification) {
4134 	case SCTP_NOTIFY_ASSOC_UP:
4135 		if (stcb->asoc.assoc_up_sent == 0) {
4136 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4137 			stcb->asoc.assoc_up_sent = 1;
4138 		}
4139 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4140 			sctp_notify_adaptation_layer(stcb);
4141 		}
4142 		if (stcb->asoc.auth_supported == 0) {
4143 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4144 			    NULL, so_locked);
4145 		}
4146 		break;
4147 	case SCTP_NOTIFY_ASSOC_DOWN:
4148 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4149 		break;
4150 	case SCTP_NOTIFY_INTERFACE_DOWN:
4151 		{
4152 			struct sctp_nets *net;
4153 
4154 			net = (struct sctp_nets *)data;
4155 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4156 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4157 			break;
4158 		}
4159 	case SCTP_NOTIFY_INTERFACE_UP:
4160 		{
4161 			struct sctp_nets *net;
4162 
4163 			net = (struct sctp_nets *)data;
4164 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4165 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4166 			break;
4167 		}
4168 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4169 		{
4170 			struct sctp_nets *net;
4171 
4172 			net = (struct sctp_nets *)data;
4173 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4174 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4175 			break;
4176 		}
4177 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4178 		sctp_notify_send_failed2(stcb, error,
4179 		    (struct sctp_stream_queue_pending *)data, so_locked);
4180 		break;
4181 	case SCTP_NOTIFY_SENT_DG_FAIL:
4182 		sctp_notify_send_failed(stcb, 1, error,
4183 		    (struct sctp_tmit_chunk *)data, so_locked);
4184 		break;
4185 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
4186 		sctp_notify_send_failed(stcb, 0, error,
4187 		    (struct sctp_tmit_chunk *)data, so_locked);
4188 		break;
4189 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4190 		{
4191 			uint32_t val;
4192 
4193 			val = *((uint32_t *)data);
4194 
4195 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4196 			break;
4197 		}
4198 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4199 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4200 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4201 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4202 		} else {
4203 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4204 		}
4205 		break;
4206 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4207 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4208 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4209 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4210 		} else {
4211 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4212 		}
4213 		break;
4214 	case SCTP_NOTIFY_ASSOC_RESTART:
4215 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4216 		if (stcb->asoc.auth_supported == 0) {
4217 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4218 			    NULL, so_locked);
4219 		}
4220 		break;
4221 	case SCTP_NOTIFY_STR_RESET_SEND:
4222 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
4223 		break;
4224 	case SCTP_NOTIFY_STR_RESET_RECV:
4225 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
4226 		break;
4227 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4228 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4229 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
4230 		break;
4231 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4232 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4233 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
4234 		break;
4235 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4236 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4237 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
4238 		break;
4239 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4240 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4241 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
4242 		break;
4243 	case SCTP_NOTIFY_ASCONF_ADD_IP:
4244 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4245 		    error, so_locked);
4246 		break;
4247 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
4248 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4249 		    error, so_locked);
4250 		break;
4251 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4252 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4253 		    error, so_locked);
4254 		break;
4255 	case SCTP_NOTIFY_PEER_SHUTDOWN:
4256 		sctp_notify_shutdown_event(stcb);
4257 		break;
4258 	case SCTP_NOTIFY_AUTH_NEW_KEY:
4259 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4260 		    (uint16_t)(uintptr_t)data,
4261 		    so_locked);
4262 		break;
4263 	case SCTP_NOTIFY_AUTH_FREE_KEY:
4264 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4265 		    (uint16_t)(uintptr_t)data,
4266 		    so_locked);
4267 		break;
4268 	case SCTP_NOTIFY_NO_PEER_AUTH:
4269 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4270 		    (uint16_t)(uintptr_t)data,
4271 		    so_locked);
4272 		break;
4273 	case SCTP_NOTIFY_SENDER_DRY:
4274 		sctp_notify_sender_dry_event(stcb, so_locked);
4275 		break;
4276 	case SCTP_NOTIFY_REMOTE_ERROR:
4277 		sctp_notify_remote_error(stcb, error, data);
4278 		break;
4279 	default:
4280 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4281 		    __func__, notification, notification);
4282 		break;
4283 	}			/* end switch */
4284 }
4285 
4286 void
4287 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
4288 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4289     SCTP_UNUSED
4290 #endif
4291 )
4292 {
4293 	struct sctp_association *asoc;
4294 	struct sctp_stream_out *outs;
4295 	struct sctp_tmit_chunk *chk, *nchk;
4296 	struct sctp_stream_queue_pending *sp, *nsp;
4297 	int i;
4298 
4299 	if (stcb == NULL) {
4300 		return;
4301 	}
4302 	asoc = &stcb->asoc;
4303 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4304 		/* already being freed */
4305 		return;
4306 	}
4307 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4308 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4309 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4310 		return;
4311 	}
4312 	/* now through all the gunk freeing chunks */
4313 	if (holds_lock == 0) {
4314 		SCTP_TCB_SEND_LOCK(stcb);
4315 	}
4316 	/* sent queue SHOULD be empty */
4317 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4318 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4319 		asoc->sent_queue_cnt--;
4320 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4321 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4322 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4323 #ifdef INVARIANTS
4324 			} else {
4325 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4326 #endif
4327 			}
4328 		}
4329 		if (chk->data != NULL) {
4330 			sctp_free_bufspace(stcb, asoc, chk, 1);
4331 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4332 			    error, chk, so_locked);
4333 			if (chk->data) {
4334 				sctp_m_freem(chk->data);
4335 				chk->data = NULL;
4336 			}
4337 		}
4338 		sctp_free_a_chunk(stcb, chk, so_locked);
4339 		/* sa_ignore FREED_MEMORY */
4340 	}
4341 	/* pending send queue SHOULD be empty */
4342 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4343 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4344 		asoc->send_queue_cnt--;
4345 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4346 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4347 #ifdef INVARIANTS
4348 		} else {
4349 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4350 #endif
4351 		}
4352 		if (chk->data != NULL) {
4353 			sctp_free_bufspace(stcb, asoc, chk, 1);
4354 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4355 			    error, chk, so_locked);
4356 			if (chk->data) {
4357 				sctp_m_freem(chk->data);
4358 				chk->data = NULL;
4359 			}
4360 		}
4361 		sctp_free_a_chunk(stcb, chk, so_locked);
4362 		/* sa_ignore FREED_MEMORY */
4363 	}
4364 	for (i = 0; i < asoc->streamoutcnt; i++) {
4365 		/* For each stream */
4366 		outs = &asoc->strmout[i];
4367 		/* clean up any sends there */
4368 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4369 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4370 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4371 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4372 			sctp_free_spbufspace(stcb, asoc, sp);
4373 			if (sp->data) {
4374 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4375 				    error, (void *)sp, so_locked);
4376 				if (sp->data) {
4377 					sctp_m_freem(sp->data);
4378 					sp->data = NULL;
4379 					sp->tail_mbuf = NULL;
4380 					sp->length = 0;
4381 				}
4382 			}
4383 			if (sp->net) {
4384 				sctp_free_remote_addr(sp->net);
4385 				sp->net = NULL;
4386 			}
4387 			/* Free the chunk */
4388 			sctp_free_a_strmoq(stcb, sp, so_locked);
4389 			/* sa_ignore FREED_MEMORY */
4390 		}
4391 	}
4392 
4393 	if (holds_lock == 0) {
4394 		SCTP_TCB_SEND_UNLOCK(stcb);
4395 	}
4396 }
4397 
4398 void
4399 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4400     struct sctp_abort_chunk *abort, int so_locked
4401 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4402     SCTP_UNUSED
4403 #endif
4404 )
4405 {
4406 	if (stcb == NULL) {
4407 		return;
4408 	}
4409 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4410 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4411 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4412 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4413 	}
4414 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4415 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4416 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4417 		return;
4418 	}
4419 	/* Tell them we lost the asoc */
4420 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4421 	if (from_peer) {
4422 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4423 	} else {
4424 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4425 	}
4426 }
4427 
4428 void
4429 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4430     struct mbuf *m, int iphlen,
4431     struct sockaddr *src, struct sockaddr *dst,
4432     struct sctphdr *sh, struct mbuf *op_err,
4433     uint8_t mflowtype, uint32_t mflowid,
4434     uint32_t vrf_id, uint16_t port)
4435 {
4436 	uint32_t vtag;
4437 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4438 	struct socket *so;
4439 #endif
4440 
4441 	vtag = 0;
4442 	if (stcb != NULL) {
4443 		vtag = stcb->asoc.peer_vtag;
4444 		vrf_id = stcb->asoc.vrf_id;
4445 	}
4446 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4447 	    mflowtype, mflowid, inp->fibnum,
4448 	    vrf_id, port);
4449 	if (stcb != NULL) {
4450 		/* We have a TCB to abort, send notification too */
4451 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4452 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4453 		/* Ok, now lets free it */
4454 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4455 		so = SCTP_INP_SO(inp);
4456 		atomic_add_int(&stcb->asoc.refcnt, 1);
4457 		SCTP_TCB_UNLOCK(stcb);
4458 		SCTP_SOCKET_LOCK(so, 1);
4459 		SCTP_TCB_LOCK(stcb);
4460 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4461 #endif
4462 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4463 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4464 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4465 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4466 		}
4467 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4468 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4469 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4470 		SCTP_SOCKET_UNLOCK(so, 1);
4471 #endif
4472 	}
4473 }
4474 #ifdef SCTP_ASOCLOG_OF_TSNS
4475 void
4476 sctp_print_out_track_log(struct sctp_tcb *stcb)
4477 {
4478 #ifdef NOSIY_PRINTS
4479 	int i;
4480 
4481 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4482 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4483 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4484 		SCTP_PRINTF("None rcvd\n");
4485 		goto none_in;
4486 	}
4487 	if (stcb->asoc.tsn_in_wrapped) {
4488 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4489 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4490 			    stcb->asoc.in_tsnlog[i].tsn,
4491 			    stcb->asoc.in_tsnlog[i].strm,
4492 			    stcb->asoc.in_tsnlog[i].seq,
4493 			    stcb->asoc.in_tsnlog[i].flgs,
4494 			    stcb->asoc.in_tsnlog[i].sz);
4495 		}
4496 	}
4497 	if (stcb->asoc.tsn_in_at) {
4498 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4499 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4500 			    stcb->asoc.in_tsnlog[i].tsn,
4501 			    stcb->asoc.in_tsnlog[i].strm,
4502 			    stcb->asoc.in_tsnlog[i].seq,
4503 			    stcb->asoc.in_tsnlog[i].flgs,
4504 			    stcb->asoc.in_tsnlog[i].sz);
4505 		}
4506 	}
4507 none_in:
4508 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4509 	if ((stcb->asoc.tsn_out_at == 0) &&
4510 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4511 		SCTP_PRINTF("None sent\n");
4512 	}
4513 	if (stcb->asoc.tsn_out_wrapped) {
4514 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4515 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4516 			    stcb->asoc.out_tsnlog[i].tsn,
4517 			    stcb->asoc.out_tsnlog[i].strm,
4518 			    stcb->asoc.out_tsnlog[i].seq,
4519 			    stcb->asoc.out_tsnlog[i].flgs,
4520 			    stcb->asoc.out_tsnlog[i].sz);
4521 		}
4522 	}
4523 	if (stcb->asoc.tsn_out_at) {
4524 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4525 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4526 			    stcb->asoc.out_tsnlog[i].tsn,
4527 			    stcb->asoc.out_tsnlog[i].strm,
4528 			    stcb->asoc.out_tsnlog[i].seq,
4529 			    stcb->asoc.out_tsnlog[i].flgs,
4530 			    stcb->asoc.out_tsnlog[i].sz);
4531 		}
4532 	}
4533 #endif
4534 }
4535 #endif
4536 
4537 void
4538 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4539     struct mbuf *op_err,
4540     int so_locked
4541 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4542     SCTP_UNUSED
4543 #endif
4544 )
4545 {
4546 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4547 	struct socket *so;
4548 #endif
4549 
4550 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4551 	so = SCTP_INP_SO(inp);
4552 #endif
4553 	if (stcb == NULL) {
4554 		/* Got to have a TCB */
4555 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4556 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4557 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4558 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4559 			}
4560 		}
4561 		return;
4562 	} else {
4563 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4564 	}
4565 	/* notify the peer */
4566 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4567 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4568 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4569 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4570 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4571 	}
4572 	/* notify the ulp */
4573 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4574 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4575 	}
4576 	/* now free the asoc */
4577 #ifdef SCTP_ASOCLOG_OF_TSNS
4578 	sctp_print_out_track_log(stcb);
4579 #endif
4580 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4581 	if (!so_locked) {
4582 		atomic_add_int(&stcb->asoc.refcnt, 1);
4583 		SCTP_TCB_UNLOCK(stcb);
4584 		SCTP_SOCKET_LOCK(so, 1);
4585 		SCTP_TCB_LOCK(stcb);
4586 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4587 	}
4588 #endif
4589 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4590 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4591 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4592 	if (!so_locked) {
4593 		SCTP_SOCKET_UNLOCK(so, 1);
4594 	}
4595 #endif
4596 }
4597 
4598 void
4599 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4600     struct sockaddr *src, struct sockaddr *dst,
4601     struct sctphdr *sh, struct sctp_inpcb *inp,
4602     struct mbuf *cause,
4603     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4604     uint32_t vrf_id, uint16_t port)
4605 {
4606 	struct sctp_chunkhdr *ch, chunk_buf;
4607 	unsigned int chk_length;
4608 	int contains_init_chunk;
4609 
4610 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4611 	/* Generate a TO address for future reference */
4612 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4613 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4614 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4615 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4616 		}
4617 	}
4618 	contains_init_chunk = 0;
4619 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4620 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4621 	while (ch != NULL) {
4622 		chk_length = ntohs(ch->chunk_length);
4623 		if (chk_length < sizeof(*ch)) {
4624 			/* break to abort land */
4625 			break;
4626 		}
4627 		switch (ch->chunk_type) {
4628 		case SCTP_INIT:
4629 			contains_init_chunk = 1;
4630 			break;
4631 		case SCTP_PACKET_DROPPED:
4632 			/* we don't respond to pkt-dropped */
4633 			return;
4634 		case SCTP_ABORT_ASSOCIATION:
4635 			/* we don't respond with an ABORT to an ABORT */
4636 			return;
4637 		case SCTP_SHUTDOWN_COMPLETE:
4638 			/*
4639 			 * we ignore it since we are not waiting for it and
4640 			 * peer is gone
4641 			 */
4642 			return;
4643 		case SCTP_SHUTDOWN_ACK:
4644 			sctp_send_shutdown_complete2(src, dst, sh,
4645 			    mflowtype, mflowid, fibnum,
4646 			    vrf_id, port);
4647 			return;
4648 		default:
4649 			break;
4650 		}
4651 		offset += SCTP_SIZE32(chk_length);
4652 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4653 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4654 	}
4655 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4656 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4657 	    (contains_init_chunk == 0))) {
4658 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4659 		    mflowtype, mflowid, fibnum,
4660 		    vrf_id, port);
4661 	}
4662 }
4663 
4664 /*
4665  * check the inbound datagram to make sure there is not an abort inside it,
4666  * if there is return 1, else return 0.
4667  */
4668 int
4669 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4670 {
4671 	struct sctp_chunkhdr *ch;
4672 	struct sctp_init_chunk *init_chk, chunk_buf;
4673 	int offset;
4674 	unsigned int chk_length;
4675 
4676 	offset = iphlen + sizeof(struct sctphdr);
4677 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4678 	    (uint8_t *)&chunk_buf);
4679 	while (ch != NULL) {
4680 		chk_length = ntohs(ch->chunk_length);
4681 		if (chk_length < sizeof(*ch)) {
4682 			/* packet is probably corrupt */
4683 			break;
4684 		}
4685 		/* we seem to be ok, is it an abort? */
4686 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4687 			/* yep, tell them */
4688 			return (1);
4689 		}
4690 		if (ch->chunk_type == SCTP_INITIATION) {
4691 			/* need to update the Vtag */
4692 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4693 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4694 			if (init_chk != NULL) {
4695 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4696 			}
4697 		}
4698 		/* Nope, move to the next chunk */
4699 		offset += SCTP_SIZE32(chk_length);
4700 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4701 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4702 	}
4703 	return (0);
4704 }
4705 
4706 /*
4707  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4708  * set (i.e. it's 0) so, create this function to compare link local scopes
4709  */
4710 #ifdef INET6
4711 uint32_t
4712 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4713 {
4714 	struct sockaddr_in6 a, b;
4715 
4716 	/* save copies */
4717 	a = *addr1;
4718 	b = *addr2;
4719 
4720 	if (a.sin6_scope_id == 0)
4721 		if (sa6_recoverscope(&a)) {
4722 			/* can't get scope, so can't match */
4723 			return (0);
4724 		}
4725 	if (b.sin6_scope_id == 0)
4726 		if (sa6_recoverscope(&b)) {
4727 			/* can't get scope, so can't match */
4728 			return (0);
4729 		}
4730 	if (a.sin6_scope_id != b.sin6_scope_id)
4731 		return (0);
4732 
4733 	return (1);
4734 }
4735 
4736 /*
4737  * returns a sockaddr_in6 with embedded scope recovered and removed
4738  */
4739 struct sockaddr_in6 *
4740 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4741 {
4742 	/* check and strip embedded scope junk */
4743 	if (addr->sin6_family == AF_INET6) {
4744 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4745 			if (addr->sin6_scope_id == 0) {
4746 				*store = *addr;
4747 				if (!sa6_recoverscope(store)) {
4748 					/* use the recovered scope */
4749 					addr = store;
4750 				}
4751 			} else {
4752 				/* else, return the original "to" addr */
4753 				in6_clearscope(&addr->sin6_addr);
4754 			}
4755 		}
4756 	}
4757 	return (addr);
4758 }
4759 #endif
4760 
4761 /*
4762  * are the two addresses the same?  currently a "scopeless" check returns: 1
4763  * if same, 0 if not
4764  */
4765 int
4766 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4767 {
4768 
4769 	/* must be valid */
4770 	if (sa1 == NULL || sa2 == NULL)
4771 		return (0);
4772 
4773 	/* must be the same family */
4774 	if (sa1->sa_family != sa2->sa_family)
4775 		return (0);
4776 
4777 	switch (sa1->sa_family) {
4778 #ifdef INET6
4779 	case AF_INET6:
4780 		{
4781 			/* IPv6 addresses */
4782 			struct sockaddr_in6 *sin6_1, *sin6_2;
4783 
4784 			sin6_1 = (struct sockaddr_in6 *)sa1;
4785 			sin6_2 = (struct sockaddr_in6 *)sa2;
4786 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4787 			    sin6_2));
4788 		}
4789 #endif
4790 #ifdef INET
4791 	case AF_INET:
4792 		{
4793 			/* IPv4 addresses */
4794 			struct sockaddr_in *sin_1, *sin_2;
4795 
4796 			sin_1 = (struct sockaddr_in *)sa1;
4797 			sin_2 = (struct sockaddr_in *)sa2;
4798 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4799 		}
4800 #endif
4801 	default:
4802 		/* we don't do these... */
4803 		return (0);
4804 	}
4805 }
4806 
4807 void
4808 sctp_print_address(struct sockaddr *sa)
4809 {
4810 #ifdef INET6
4811 	char ip6buf[INET6_ADDRSTRLEN];
4812 #endif
4813 
4814 	switch (sa->sa_family) {
4815 #ifdef INET6
4816 	case AF_INET6:
4817 		{
4818 			struct sockaddr_in6 *sin6;
4819 
4820 			sin6 = (struct sockaddr_in6 *)sa;
4821 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4822 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4823 			    ntohs(sin6->sin6_port),
4824 			    sin6->sin6_scope_id);
4825 			break;
4826 		}
4827 #endif
4828 #ifdef INET
4829 	case AF_INET:
4830 		{
4831 			struct sockaddr_in *sin;
4832 			unsigned char *p;
4833 
4834 			sin = (struct sockaddr_in *)sa;
4835 			p = (unsigned char *)&sin->sin_addr;
4836 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4837 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4838 			break;
4839 		}
4840 #endif
4841 	default:
4842 		SCTP_PRINTF("?\n");
4843 		break;
4844 	}
4845 }
4846 
4847 void
4848 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4849     struct sctp_inpcb *new_inp,
4850     struct sctp_tcb *stcb,
4851     int waitflags)
4852 {
4853 	/*
4854 	 * go through our old INP and pull off any control structures that
4855 	 * belong to stcb and move then to the new inp.
4856 	 */
4857 	struct socket *old_so, *new_so;
4858 	struct sctp_queued_to_read *control, *nctl;
4859 	struct sctp_readhead tmp_queue;
4860 	struct mbuf *m;
4861 	int error = 0;
4862 
4863 	old_so = old_inp->sctp_socket;
4864 	new_so = new_inp->sctp_socket;
4865 	TAILQ_INIT(&tmp_queue);
4866 	error = sblock(&old_so->so_rcv, waitflags);
4867 	if (error) {
4868 		/*
4869 		 * Gak, can't get sblock, we have a problem. data will be
4870 		 * left stranded.. and we don't dare look at it since the
4871 		 * other thread may be reading something. Oh well, its a
4872 		 * screwed up app that does a peeloff OR a accept while
4873 		 * reading from the main socket... actually its only the
4874 		 * peeloff() case, since I think read will fail on a
4875 		 * listening socket..
4876 		 */
4877 		return;
4878 	}
4879 	/* lock the socket buffers */
4880 	SCTP_INP_READ_LOCK(old_inp);
4881 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4882 		/* Pull off all for out target stcb */
4883 		if (control->stcb == stcb) {
4884 			/* remove it we want it */
4885 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4886 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4887 			m = control->data;
4888 			while (m) {
4889 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4890 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4891 				}
4892 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4893 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4894 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4895 				}
4896 				m = SCTP_BUF_NEXT(m);
4897 			}
4898 		}
4899 	}
4900 	SCTP_INP_READ_UNLOCK(old_inp);
4901 	/* Remove the sb-lock on the old socket */
4902 
4903 	sbunlock(&old_so->so_rcv);
4904 	/* Now we move them over to the new socket buffer */
4905 	SCTP_INP_READ_LOCK(new_inp);
4906 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4907 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4908 		m = control->data;
4909 		while (m) {
4910 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4911 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4912 			}
4913 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4914 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4915 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4916 			}
4917 			m = SCTP_BUF_NEXT(m);
4918 		}
4919 	}
4920 	SCTP_INP_READ_UNLOCK(new_inp);
4921 }
4922 
4923 void
4924 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4925     struct sctp_tcb *stcb,
4926     int so_locked
4927 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4928     SCTP_UNUSED
4929 #endif
4930 )
4931 {
4932 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4933 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4934 		struct socket *so;
4935 
4936 		so = SCTP_INP_SO(inp);
4937 		if (!so_locked) {
4938 			if (stcb) {
4939 				atomic_add_int(&stcb->asoc.refcnt, 1);
4940 				SCTP_TCB_UNLOCK(stcb);
4941 			}
4942 			SCTP_SOCKET_LOCK(so, 1);
4943 			if (stcb) {
4944 				SCTP_TCB_LOCK(stcb);
4945 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4946 			}
4947 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4948 				SCTP_SOCKET_UNLOCK(so, 1);
4949 				return;
4950 			}
4951 		}
4952 #endif
4953 		sctp_sorwakeup(inp, inp->sctp_socket);
4954 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4955 		if (!so_locked) {
4956 			SCTP_SOCKET_UNLOCK(so, 1);
4957 		}
4958 #endif
4959 	}
4960 }
4961 
4962 void
4963 sctp_add_to_readq(struct sctp_inpcb *inp,
4964     struct sctp_tcb *stcb,
4965     struct sctp_queued_to_read *control,
4966     struct sockbuf *sb,
4967     int end,
4968     int inp_read_lock_held,
4969     int so_locked
4970 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4971     SCTP_UNUSED
4972 #endif
4973 )
4974 {
4975 	/*
4976 	 * Here we must place the control on the end of the socket read
4977 	 * queue AND increment sb_cc so that select will work properly on
4978 	 * read.
4979 	 */
4980 	struct mbuf *m, *prev = NULL;
4981 
4982 	if (inp == NULL) {
4983 		/* Gak, TSNH!! */
4984 #ifdef INVARIANTS
4985 		panic("Gak, inp NULL on add_to_readq");
4986 #endif
4987 		return;
4988 	}
4989 	if (inp_read_lock_held == 0)
4990 		SCTP_INP_READ_LOCK(inp);
4991 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4992 		if (!control->on_strm_q) {
4993 			sctp_free_remote_addr(control->whoFrom);
4994 			if (control->data) {
4995 				sctp_m_freem(control->data);
4996 				control->data = NULL;
4997 			}
4998 			sctp_free_a_readq(stcb, control);
4999 		}
5000 		if (inp_read_lock_held == 0)
5001 			SCTP_INP_READ_UNLOCK(inp);
5002 		return;
5003 	}
5004 	if (!(control->spec_flags & M_NOTIFICATION)) {
5005 		atomic_add_int(&inp->total_recvs, 1);
5006 		if (!control->do_not_ref_stcb) {
5007 			atomic_add_int(&stcb->total_recvs, 1);
5008 		}
5009 	}
5010 	m = control->data;
5011 	control->held_length = 0;
5012 	control->length = 0;
5013 	while (m) {
5014 		if (SCTP_BUF_LEN(m) == 0) {
5015 			/* Skip mbufs with NO length */
5016 			if (prev == NULL) {
5017 				/* First one */
5018 				control->data = sctp_m_free(m);
5019 				m = control->data;
5020 			} else {
5021 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
5022 				m = SCTP_BUF_NEXT(prev);
5023 			}
5024 			if (m == NULL) {
5025 				control->tail_mbuf = prev;
5026 			}
5027 			continue;
5028 		}
5029 		prev = m;
5030 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5031 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5032 		}
5033 		sctp_sballoc(stcb, sb, m);
5034 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5035 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5036 		}
5037 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
5038 		m = SCTP_BUF_NEXT(m);
5039 	}
5040 	if (prev != NULL) {
5041 		control->tail_mbuf = prev;
5042 	} else {
5043 		/* Everything got collapsed out?? */
5044 		if (!control->on_strm_q) {
5045 			sctp_free_remote_addr(control->whoFrom);
5046 			sctp_free_a_readq(stcb, control);
5047 		}
5048 		if (inp_read_lock_held == 0)
5049 			SCTP_INP_READ_UNLOCK(inp);
5050 		return;
5051 	}
5052 	if (end) {
5053 		control->end_added = 1;
5054 	}
5055 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
5056 	control->on_read_q = 1;
5057 	if (inp_read_lock_held == 0)
5058 		SCTP_INP_READ_UNLOCK(inp);
5059 	if (inp && inp->sctp_socket) {
5060 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5061 	}
5062 }
5063 
5064 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5065  *************ALTERNATE ROUTING CODE
5066  */
5067 
5068 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5069  *************ALTERNATE ROUTING CODE
5070  */
5071 
5072 struct mbuf *
5073 sctp_generate_cause(uint16_t code, char *info)
5074 {
5075 	struct mbuf *m;
5076 	struct sctp_gen_error_cause *cause;
5077 	size_t info_len;
5078 	uint16_t len;
5079 
5080 	if ((code == 0) || (info == NULL)) {
5081 		return (NULL);
5082 	}
5083 	info_len = strlen(info);
5084 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5085 		return (NULL);
5086 	}
5087 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5088 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5089 	if (m != NULL) {
5090 		SCTP_BUF_LEN(m) = len;
5091 		cause = mtod(m, struct sctp_gen_error_cause *);
5092 		cause->code = htons(code);
5093 		cause->length = htons(len);
5094 		memcpy(cause->info, info, info_len);
5095 	}
5096 	return (m);
5097 }
5098 
5099 struct mbuf *
5100 sctp_generate_no_user_data_cause(uint32_t tsn)
5101 {
5102 	struct mbuf *m;
5103 	struct sctp_error_no_user_data *no_user_data_cause;
5104 	uint16_t len;
5105 
5106 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5107 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5108 	if (m != NULL) {
5109 		SCTP_BUF_LEN(m) = len;
5110 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5111 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5112 		no_user_data_cause->cause.length = htons(len);
5113 		no_user_data_cause->tsn = htonl(tsn);
5114 	}
5115 	return (m);
5116 }
5117 
5118 #ifdef SCTP_MBCNT_LOGGING
5119 void
5120 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5121     struct sctp_tmit_chunk *tp1, int chk_cnt)
5122 {
5123 	if (tp1->data == NULL) {
5124 		return;
5125 	}
5126 	asoc->chunks_on_out_queue -= chk_cnt;
5127 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5128 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5129 		    asoc->total_output_queue_size,
5130 		    tp1->book_size,
5131 		    0,
5132 		    tp1->mbcnt);
5133 	}
5134 	if (asoc->total_output_queue_size >= tp1->book_size) {
5135 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5136 	} else {
5137 		asoc->total_output_queue_size = 0;
5138 	}
5139 
5140 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5141 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5142 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5143 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5144 		} else {
5145 			stcb->sctp_socket->so_snd.sb_cc = 0;
5146 
5147 		}
5148 	}
5149 }
5150 
5151 #endif
5152 
5153 int
5154 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5155     uint8_t sent, int so_locked
5156 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5157     SCTP_UNUSED
5158 #endif
5159 )
5160 {
5161 	struct sctp_stream_out *strq;
5162 	struct sctp_tmit_chunk *chk = NULL, *tp2;
5163 	struct sctp_stream_queue_pending *sp;
5164 	uint32_t mid;
5165 	uint16_t sid;
5166 	uint8_t foundeom = 0;
5167 	int ret_sz = 0;
5168 	int notdone;
5169 	int do_wakeup_routine = 0;
5170 
5171 	sid = tp1->rec.data.sid;
5172 	mid = tp1->rec.data.mid;
5173 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5174 		stcb->asoc.abandoned_sent[0]++;
5175 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5176 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
5177 #if defined(SCTP_DETAILED_STR_STATS)
5178 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5179 #endif
5180 	} else {
5181 		stcb->asoc.abandoned_unsent[0]++;
5182 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5183 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5184 #if defined(SCTP_DETAILED_STR_STATS)
5185 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5186 #endif
5187 	}
5188 	do {
5189 		ret_sz += tp1->book_size;
5190 		if (tp1->data != NULL) {
5191 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5192 				sctp_flight_size_decrease(tp1);
5193 				sctp_total_flight_decrease(stcb, tp1);
5194 			}
5195 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5196 			stcb->asoc.peers_rwnd += tp1->send_size;
5197 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5198 			if (sent) {
5199 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5200 			} else {
5201 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5202 			}
5203 			if (tp1->data) {
5204 				sctp_m_freem(tp1->data);
5205 				tp1->data = NULL;
5206 			}
5207 			do_wakeup_routine = 1;
5208 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5209 				stcb->asoc.sent_queue_cnt_removeable--;
5210 			}
5211 		}
5212 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5213 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5214 		    SCTP_DATA_NOT_FRAG) {
5215 			/* not frag'ed we ae done   */
5216 			notdone = 0;
5217 			foundeom = 1;
5218 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5219 			/* end of frag, we are done */
5220 			notdone = 0;
5221 			foundeom = 1;
5222 		} else {
5223 			/*
5224 			 * Its a begin or middle piece, we must mark all of
5225 			 * it
5226 			 */
5227 			notdone = 1;
5228 			tp1 = TAILQ_NEXT(tp1, sctp_next);
5229 		}
5230 	} while (tp1 && notdone);
5231 	if (foundeom == 0) {
5232 		/*
5233 		 * The multi-part message was scattered across the send and
5234 		 * sent queue.
5235 		 */
5236 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5237 			if ((tp1->rec.data.sid != sid) ||
5238 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5239 				break;
5240 			}
5241 			/*
5242 			 * save to chk in case we have some on stream out
5243 			 * queue. If so and we have an un-transmitted one we
5244 			 * don't have to fudge the TSN.
5245 			 */
5246 			chk = tp1;
5247 			ret_sz += tp1->book_size;
5248 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5249 			if (sent) {
5250 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5251 			} else {
5252 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5253 			}
5254 			if (tp1->data) {
5255 				sctp_m_freem(tp1->data);
5256 				tp1->data = NULL;
5257 			}
5258 			/* No flight involved here book the size to 0 */
5259 			tp1->book_size = 0;
5260 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5261 				foundeom = 1;
5262 			}
5263 			do_wakeup_routine = 1;
5264 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5265 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5266 			/*
5267 			 * on to the sent queue so we can wait for it to be
5268 			 * passed by.
5269 			 */
5270 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5271 			    sctp_next);
5272 			stcb->asoc.send_queue_cnt--;
5273 			stcb->asoc.sent_queue_cnt++;
5274 		}
5275 	}
5276 	if (foundeom == 0) {
5277 		/*
5278 		 * Still no eom found. That means there is stuff left on the
5279 		 * stream out queue.. yuck.
5280 		 */
5281 		SCTP_TCB_SEND_LOCK(stcb);
5282 		strq = &stcb->asoc.strmout[sid];
5283 		sp = TAILQ_FIRST(&strq->outqueue);
5284 		if (sp != NULL) {
5285 			sp->discard_rest = 1;
5286 			/*
5287 			 * We may need to put a chunk on the queue that
5288 			 * holds the TSN that would have been sent with the
5289 			 * LAST bit.
5290 			 */
5291 			if (chk == NULL) {
5292 				/* Yep, we have to */
5293 				sctp_alloc_a_chunk(stcb, chk);
5294 				if (chk == NULL) {
5295 					/*
5296 					 * we are hosed. All we can do is
5297 					 * nothing.. which will cause an
5298 					 * abort if the peer is paying
5299 					 * attention.
5300 					 */
5301 					goto oh_well;
5302 				}
5303 				memset(chk, 0, sizeof(*chk));
5304 				chk->rec.data.rcv_flags = 0;
5305 				chk->sent = SCTP_FORWARD_TSN_SKIP;
5306 				chk->asoc = &stcb->asoc;
5307 				if (stcb->asoc.idata_supported == 0) {
5308 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5309 						chk->rec.data.mid = 0;
5310 					} else {
5311 						chk->rec.data.mid = strq->next_mid_ordered;
5312 					}
5313 				} else {
5314 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5315 						chk->rec.data.mid = strq->next_mid_unordered;
5316 					} else {
5317 						chk->rec.data.mid = strq->next_mid_ordered;
5318 					}
5319 				}
5320 				chk->rec.data.sid = sp->sid;
5321 				chk->rec.data.ppid = sp->ppid;
5322 				chk->rec.data.context = sp->context;
5323 				chk->flags = sp->act_flags;
5324 				chk->whoTo = NULL;
5325 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5326 				strq->chunks_on_queues++;
5327 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5328 				stcb->asoc.sent_queue_cnt++;
5329 				stcb->asoc.pr_sctp_cnt++;
5330 			}
5331 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5332 			if (sp->sinfo_flags & SCTP_UNORDERED) {
5333 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5334 			}
5335 			if (stcb->asoc.idata_supported == 0) {
5336 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5337 					strq->next_mid_ordered++;
5338 				}
5339 			} else {
5340 				if (sp->sinfo_flags & SCTP_UNORDERED) {
5341 					strq->next_mid_unordered++;
5342 				} else {
5343 					strq->next_mid_ordered++;
5344 				}
5345 			}
5346 	oh_well:
5347 			if (sp->data) {
5348 				/*
5349 				 * Pull any data to free up the SB and allow
5350 				 * sender to "add more" while we will throw
5351 				 * away :-)
5352 				 */
5353 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5354 				ret_sz += sp->length;
5355 				do_wakeup_routine = 1;
5356 				sp->some_taken = 1;
5357 				sctp_m_freem(sp->data);
5358 				sp->data = NULL;
5359 				sp->tail_mbuf = NULL;
5360 				sp->length = 0;
5361 			}
5362 		}
5363 		SCTP_TCB_SEND_UNLOCK(stcb);
5364 	}
5365 	if (do_wakeup_routine) {
5366 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5367 		struct socket *so;
5368 
5369 		so = SCTP_INP_SO(stcb->sctp_ep);
5370 		if (!so_locked) {
5371 			atomic_add_int(&stcb->asoc.refcnt, 1);
5372 			SCTP_TCB_UNLOCK(stcb);
5373 			SCTP_SOCKET_LOCK(so, 1);
5374 			SCTP_TCB_LOCK(stcb);
5375 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5376 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5377 				/* assoc was freed while we were unlocked */
5378 				SCTP_SOCKET_UNLOCK(so, 1);
5379 				return (ret_sz);
5380 			}
5381 		}
5382 #endif
5383 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5384 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5385 		if (!so_locked) {
5386 			SCTP_SOCKET_UNLOCK(so, 1);
5387 		}
5388 #endif
5389 	}
5390 	return (ret_sz);
5391 }
5392 
5393 /*
5394  * checks to see if the given address, sa, is one that is currently known by
5395  * the kernel note: can't distinguish the same address on multiple interfaces
5396  * and doesn't handle multiple addresses with different zone/scope id's note:
5397  * ifa_ifwithaddr() compares the entire sockaddr struct
5398  */
5399 struct sctp_ifa *
5400 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5401     int holds_lock)
5402 {
5403 	struct sctp_laddr *laddr;
5404 
5405 	if (holds_lock == 0) {
5406 		SCTP_INP_RLOCK(inp);
5407 	}
5408 
5409 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5410 		if (laddr->ifa == NULL)
5411 			continue;
5412 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5413 			continue;
5414 #ifdef INET
5415 		if (addr->sa_family == AF_INET) {
5416 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5417 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5418 				/* found him. */
5419 				if (holds_lock == 0) {
5420 					SCTP_INP_RUNLOCK(inp);
5421 				}
5422 				return (laddr->ifa);
5423 				break;
5424 			}
5425 		}
5426 #endif
5427 #ifdef INET6
5428 		if (addr->sa_family == AF_INET6) {
5429 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5430 			    &laddr->ifa->address.sin6)) {
5431 				/* found him. */
5432 				if (holds_lock == 0) {
5433 					SCTP_INP_RUNLOCK(inp);
5434 				}
5435 				return (laddr->ifa);
5436 				break;
5437 			}
5438 		}
5439 #endif
5440 	}
5441 	if (holds_lock == 0) {
5442 		SCTP_INP_RUNLOCK(inp);
5443 	}
5444 	return (NULL);
5445 }
5446 
5447 uint32_t
5448 sctp_get_ifa_hash_val(struct sockaddr *addr)
5449 {
5450 	switch (addr->sa_family) {
5451 #ifdef INET
5452 	case AF_INET:
5453 		{
5454 			struct sockaddr_in *sin;
5455 
5456 			sin = (struct sockaddr_in *)addr;
5457 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5458 		}
5459 #endif
5460 #ifdef INET6
5461 	case AF_INET6:
5462 		{
5463 			struct sockaddr_in6 *sin6;
5464 			uint32_t hash_of_addr;
5465 
5466 			sin6 = (struct sockaddr_in6 *)addr;
5467 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5468 			    sin6->sin6_addr.s6_addr32[1] +
5469 			    sin6->sin6_addr.s6_addr32[2] +
5470 			    sin6->sin6_addr.s6_addr32[3]);
5471 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5472 			return (hash_of_addr);
5473 		}
5474 #endif
5475 	default:
5476 		break;
5477 	}
5478 	return (0);
5479 }
5480 
5481 struct sctp_ifa *
5482 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5483 {
5484 	struct sctp_ifa *sctp_ifap;
5485 	struct sctp_vrf *vrf;
5486 	struct sctp_ifalist *hash_head;
5487 	uint32_t hash_of_addr;
5488 
5489 	if (holds_lock == 0)
5490 		SCTP_IPI_ADDR_RLOCK();
5491 
5492 	vrf = sctp_find_vrf(vrf_id);
5493 	if (vrf == NULL) {
5494 		if (holds_lock == 0)
5495 			SCTP_IPI_ADDR_RUNLOCK();
5496 		return (NULL);
5497 	}
5498 
5499 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5500 
5501 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5502 	if (hash_head == NULL) {
5503 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5504 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5505 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5506 		sctp_print_address(addr);
5507 		SCTP_PRINTF("No such bucket for address\n");
5508 		if (holds_lock == 0)
5509 			SCTP_IPI_ADDR_RUNLOCK();
5510 
5511 		return (NULL);
5512 	}
5513 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5514 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5515 			continue;
5516 #ifdef INET
5517 		if (addr->sa_family == AF_INET) {
5518 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5519 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5520 				/* found him. */
5521 				if (holds_lock == 0)
5522 					SCTP_IPI_ADDR_RUNLOCK();
5523 				return (sctp_ifap);
5524 				break;
5525 			}
5526 		}
5527 #endif
5528 #ifdef INET6
5529 		if (addr->sa_family == AF_INET6) {
5530 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5531 			    &sctp_ifap->address.sin6)) {
5532 				/* found him. */
5533 				if (holds_lock == 0)
5534 					SCTP_IPI_ADDR_RUNLOCK();
5535 				return (sctp_ifap);
5536 				break;
5537 			}
5538 		}
5539 #endif
5540 	}
5541 	if (holds_lock == 0)
5542 		SCTP_IPI_ADDR_RUNLOCK();
5543 	return (NULL);
5544 }
5545 
5546 static void
5547 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5548     uint32_t rwnd_req)
5549 {
5550 	/* User pulled some data, do we need a rwnd update? */
5551 	struct epoch_tracker et;
5552 	int r_unlocked = 0;
5553 	uint32_t dif, rwnd;
5554 	struct socket *so = NULL;
5555 
5556 	if (stcb == NULL)
5557 		return;
5558 
5559 	atomic_add_int(&stcb->asoc.refcnt, 1);
5560 
5561 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5562 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5563 		/* Pre-check If we are freeing no update */
5564 		goto no_lock;
5565 	}
5566 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5567 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5568 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5569 		goto out;
5570 	}
5571 	so = stcb->sctp_socket;
5572 	if (so == NULL) {
5573 		goto out;
5574 	}
5575 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5576 	/* Have you have freed enough to look */
5577 	*freed_so_far = 0;
5578 	/* Yep, its worth a look and the lock overhead */
5579 
5580 	/* Figure out what the rwnd would be */
5581 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5582 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5583 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5584 	} else {
5585 		dif = 0;
5586 	}
5587 	if (dif >= rwnd_req) {
5588 		if (hold_rlock) {
5589 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5590 			r_unlocked = 1;
5591 		}
5592 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5593 			/*
5594 			 * One last check before we allow the guy possibly
5595 			 * to get in. There is a race, where the guy has not
5596 			 * reached the gate. In that case
5597 			 */
5598 			goto out;
5599 		}
5600 		SCTP_TCB_LOCK(stcb);
5601 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5602 			/* No reports here */
5603 			SCTP_TCB_UNLOCK(stcb);
5604 			goto out;
5605 		}
5606 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5607 		NET_EPOCH_ENTER(et);
5608 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5609 
5610 		sctp_chunk_output(stcb->sctp_ep, stcb,
5611 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5612 		/* make sure no timer is running */
5613 		NET_EPOCH_EXIT(et);
5614 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5615 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5616 		SCTP_TCB_UNLOCK(stcb);
5617 	} else {
5618 		/* Update how much we have pending */
5619 		stcb->freed_by_sorcv_sincelast = dif;
5620 	}
5621 out:
5622 	if (so && r_unlocked && hold_rlock) {
5623 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5624 	}
5625 
5626 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5627 no_lock:
5628 	atomic_add_int(&stcb->asoc.refcnt, -1);
5629 	return;
5630 }
5631 
5632 int
5633 sctp_sorecvmsg(struct socket *so,
5634     struct uio *uio,
5635     struct mbuf **mp,
5636     struct sockaddr *from,
5637     int fromlen,
5638     int *msg_flags,
5639     struct sctp_sndrcvinfo *sinfo,
5640     int filling_sinfo)
5641 {
5642 	/*
5643 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5644 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5645 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5646 	 * On the way out we may send out any combination of:
5647 	 * MSG_NOTIFICATION MSG_EOR
5648 	 *
5649 	 */
5650 	struct sctp_inpcb *inp = NULL;
5651 	ssize_t my_len = 0;
5652 	ssize_t cp_len = 0;
5653 	int error = 0;
5654 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5655 	struct mbuf *m = NULL;
5656 	struct sctp_tcb *stcb = NULL;
5657 	int wakeup_read_socket = 0;
5658 	int freecnt_applied = 0;
5659 	int out_flags = 0, in_flags = 0;
5660 	int block_allowed = 1;
5661 	uint32_t freed_so_far = 0;
5662 	ssize_t copied_so_far = 0;
5663 	int in_eeor_mode = 0;
5664 	int no_rcv_needed = 0;
5665 	uint32_t rwnd_req = 0;
5666 	int hold_sblock = 0;
5667 	int hold_rlock = 0;
5668 	ssize_t slen = 0;
5669 	uint32_t held_length = 0;
5670 	int sockbuf_lock = 0;
5671 
5672 	if (uio == NULL) {
5673 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5674 		return (EINVAL);
5675 	}
5676 
5677 	if (msg_flags) {
5678 		in_flags = *msg_flags;
5679 		if (in_flags & MSG_PEEK)
5680 			SCTP_STAT_INCR(sctps_read_peeks);
5681 	} else {
5682 		in_flags = 0;
5683 	}
5684 	slen = uio->uio_resid;
5685 
5686 	/* Pull in and set up our int flags */
5687 	if (in_flags & MSG_OOB) {
5688 		/* Out of band's NOT supported */
5689 		return (EOPNOTSUPP);
5690 	}
5691 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5692 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5693 		return (EINVAL);
5694 	}
5695 	if ((in_flags & (MSG_DONTWAIT
5696 	    | MSG_NBIO
5697 	    )) ||
5698 	    SCTP_SO_IS_NBIO(so)) {
5699 		block_allowed = 0;
5700 	}
5701 	/* setup the endpoint */
5702 	inp = (struct sctp_inpcb *)so->so_pcb;
5703 	if (inp == NULL) {
5704 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5705 		return (EFAULT);
5706 	}
5707 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5708 	/* Must be at least a MTU's worth */
5709 	if (rwnd_req < SCTP_MIN_RWND)
5710 		rwnd_req = SCTP_MIN_RWND;
5711 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5712 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5713 		sctp_misc_ints(SCTP_SORECV_ENTER,
5714 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5715 	}
5716 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5717 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5718 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5719 	}
5720 
5721 
5722 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5723 	if (error) {
5724 		goto release_unlocked;
5725 	}
5726 	sockbuf_lock = 1;
5727 restart:
5728 
5729 
5730 restart_nosblocks:
5731 	if (hold_sblock == 0) {
5732 		SOCKBUF_LOCK(&so->so_rcv);
5733 		hold_sblock = 1;
5734 	}
5735 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5736 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5737 		goto out;
5738 	}
5739 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5740 		if (so->so_error) {
5741 			error = so->so_error;
5742 			if ((in_flags & MSG_PEEK) == 0)
5743 				so->so_error = 0;
5744 			goto out;
5745 		} else {
5746 			if (so->so_rcv.sb_cc == 0) {
5747 				/* indicate EOF */
5748 				error = 0;
5749 				goto out;
5750 			}
5751 		}
5752 	}
5753 	if (so->so_rcv.sb_cc <= held_length) {
5754 		if (so->so_error) {
5755 			error = so->so_error;
5756 			if ((in_flags & MSG_PEEK) == 0) {
5757 				so->so_error = 0;
5758 			}
5759 			goto out;
5760 		}
5761 		if ((so->so_rcv.sb_cc == 0) &&
5762 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5763 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5764 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5765 				/*
5766 				 * For active open side clear flags for
5767 				 * re-use passive open is blocked by
5768 				 * connect.
5769 				 */
5770 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5771 					/*
5772 					 * You were aborted, passive side
5773 					 * always hits here
5774 					 */
5775 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5776 					error = ECONNRESET;
5777 				}
5778 				so->so_state &= ~(SS_ISCONNECTING |
5779 				    SS_ISDISCONNECTING |
5780 				    SS_ISCONFIRMING |
5781 				    SS_ISCONNECTED);
5782 				if (error == 0) {
5783 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5784 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5785 						error = ENOTCONN;
5786 					}
5787 				}
5788 				goto out;
5789 			}
5790 		}
5791 		if (block_allowed) {
5792 			error = sbwait(&so->so_rcv);
5793 			if (error) {
5794 				goto out;
5795 			}
5796 			held_length = 0;
5797 			goto restart_nosblocks;
5798 		} else {
5799 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5800 			error = EWOULDBLOCK;
5801 			goto out;
5802 		}
5803 	}
5804 	if (hold_sblock == 1) {
5805 		SOCKBUF_UNLOCK(&so->so_rcv);
5806 		hold_sblock = 0;
5807 	}
5808 	/* we possibly have data we can read */
5809 	/* sa_ignore FREED_MEMORY */
5810 	control = TAILQ_FIRST(&inp->read_queue);
5811 	if (control == NULL) {
5812 		/*
5813 		 * This could be happening since the appender did the
5814 		 * increment but as not yet did the tailq insert onto the
5815 		 * read_queue
5816 		 */
5817 		if (hold_rlock == 0) {
5818 			SCTP_INP_READ_LOCK(inp);
5819 		}
5820 		control = TAILQ_FIRST(&inp->read_queue);
5821 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5822 #ifdef INVARIANTS
5823 			panic("Huh, its non zero and nothing on control?");
5824 #endif
5825 			so->so_rcv.sb_cc = 0;
5826 		}
5827 		SCTP_INP_READ_UNLOCK(inp);
5828 		hold_rlock = 0;
5829 		goto restart;
5830 	}
5831 
5832 	if ((control->length == 0) &&
5833 	    (control->do_not_ref_stcb)) {
5834 		/*
5835 		 * Clean up code for freeing assoc that left behind a
5836 		 * pdapi.. maybe a peer in EEOR that just closed after
5837 		 * sending and never indicated a EOR.
5838 		 */
5839 		if (hold_rlock == 0) {
5840 			hold_rlock = 1;
5841 			SCTP_INP_READ_LOCK(inp);
5842 		}
5843 		control->held_length = 0;
5844 		if (control->data) {
5845 			/* Hmm there is data here .. fix */
5846 			struct mbuf *m_tmp;
5847 			int cnt = 0;
5848 
5849 			m_tmp = control->data;
5850 			while (m_tmp) {
5851 				cnt += SCTP_BUF_LEN(m_tmp);
5852 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5853 					control->tail_mbuf = m_tmp;
5854 					control->end_added = 1;
5855 				}
5856 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5857 			}
5858 			control->length = cnt;
5859 		} else {
5860 			/* remove it */
5861 			TAILQ_REMOVE(&inp->read_queue, control, next);
5862 			/* Add back any hiddend data */
5863 			sctp_free_remote_addr(control->whoFrom);
5864 			sctp_free_a_readq(stcb, control);
5865 		}
5866 		if (hold_rlock) {
5867 			hold_rlock = 0;
5868 			SCTP_INP_READ_UNLOCK(inp);
5869 		}
5870 		goto restart;
5871 	}
5872 	if ((control->length == 0) &&
5873 	    (control->end_added == 1)) {
5874 		/*
5875 		 * Do we also need to check for (control->pdapi_aborted ==
5876 		 * 1)?
5877 		 */
5878 		if (hold_rlock == 0) {
5879 			hold_rlock = 1;
5880 			SCTP_INP_READ_LOCK(inp);
5881 		}
5882 		TAILQ_REMOVE(&inp->read_queue, control, next);
5883 		if (control->data) {
5884 #ifdef INVARIANTS
5885 			panic("control->data not null but control->length == 0");
5886 #else
5887 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5888 			sctp_m_freem(control->data);
5889 			control->data = NULL;
5890 #endif
5891 		}
5892 		if (control->aux_data) {
5893 			sctp_m_free(control->aux_data);
5894 			control->aux_data = NULL;
5895 		}
5896 #ifdef INVARIANTS
5897 		if (control->on_strm_q) {
5898 			panic("About to free ctl:%p so:%p and its in %d",
5899 			    control, so, control->on_strm_q);
5900 		}
5901 #endif
5902 		sctp_free_remote_addr(control->whoFrom);
5903 		sctp_free_a_readq(stcb, control);
5904 		if (hold_rlock) {
5905 			hold_rlock = 0;
5906 			SCTP_INP_READ_UNLOCK(inp);
5907 		}
5908 		goto restart;
5909 	}
5910 	if (control->length == 0) {
5911 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5912 		    (filling_sinfo)) {
5913 			/* find a more suitable one then this */
5914 			ctl = TAILQ_NEXT(control, next);
5915 			while (ctl) {
5916 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5917 				    (ctl->some_taken ||
5918 				    (ctl->spec_flags & M_NOTIFICATION) ||
5919 				    ((ctl->do_not_ref_stcb == 0) &&
5920 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5921 				    ) {
5922 					/*-
5923 					 * If we have a different TCB next, and there is data
5924 					 * present. If we have already taken some (pdapi), OR we can
5925 					 * ref the tcb and no delivery as started on this stream, we
5926 					 * take it. Note we allow a notification on a different
5927 					 * assoc to be delivered..
5928 					 */
5929 					control = ctl;
5930 					goto found_one;
5931 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5932 					    (ctl->length) &&
5933 					    ((ctl->some_taken) ||
5934 					    ((ctl->do_not_ref_stcb == 0) &&
5935 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5936 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5937 					/*-
5938 					 * If we have the same tcb, and there is data present, and we
5939 					 * have the strm interleave feature present. Then if we have
5940 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5941 					 * not started a delivery for this stream, we can take it.
5942 					 * Note we do NOT allow a notificaiton on the same assoc to
5943 					 * be delivered.
5944 					 */
5945 					control = ctl;
5946 					goto found_one;
5947 				}
5948 				ctl = TAILQ_NEXT(ctl, next);
5949 			}
5950 		}
5951 		/*
5952 		 * if we reach here, not suitable replacement is available
5953 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5954 		 * into the our held count, and its time to sleep again.
5955 		 */
5956 		held_length = so->so_rcv.sb_cc;
5957 		control->held_length = so->so_rcv.sb_cc;
5958 		goto restart;
5959 	}
5960 	/* Clear the held length since there is something to read */
5961 	control->held_length = 0;
5962 found_one:
5963 	/*
5964 	 * If we reach here, control has a some data for us to read off.
5965 	 * Note that stcb COULD be NULL.
5966 	 */
5967 	if (hold_rlock == 0) {
5968 		hold_rlock = 1;
5969 		SCTP_INP_READ_LOCK(inp);
5970 	}
5971 	control->some_taken++;
5972 	stcb = control->stcb;
5973 	if (stcb) {
5974 		if ((control->do_not_ref_stcb == 0) &&
5975 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5976 			if (freecnt_applied == 0)
5977 				stcb = NULL;
5978 		} else if (control->do_not_ref_stcb == 0) {
5979 			/* you can't free it on me please */
5980 			/*
5981 			 * The lock on the socket buffer protects us so the
5982 			 * free code will stop. But since we used the
5983 			 * socketbuf lock and the sender uses the tcb_lock
5984 			 * to increment, we need to use the atomic add to
5985 			 * the refcnt
5986 			 */
5987 			if (freecnt_applied) {
5988 #ifdef INVARIANTS
5989 				panic("refcnt already incremented");
5990 #else
5991 				SCTP_PRINTF("refcnt already incremented?\n");
5992 #endif
5993 			} else {
5994 				atomic_add_int(&stcb->asoc.refcnt, 1);
5995 				freecnt_applied = 1;
5996 			}
5997 			/*
5998 			 * Setup to remember how much we have not yet told
5999 			 * the peer our rwnd has opened up. Note we grab the
6000 			 * value from the tcb from last time. Note too that
6001 			 * sack sending clears this when a sack is sent,
6002 			 * which is fine. Once we hit the rwnd_req, we then
6003 			 * will go to the sctp_user_rcvd() that will not
6004 			 * lock until it KNOWs it MUST send a WUP-SACK.
6005 			 */
6006 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
6007 			stcb->freed_by_sorcv_sincelast = 0;
6008 		}
6009 	}
6010 	if (stcb &&
6011 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
6012 	    control->do_not_ref_stcb == 0) {
6013 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6014 	}
6015 
6016 	/* First lets get off the sinfo and sockaddr info */
6017 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
6018 		sinfo->sinfo_stream = control->sinfo_stream;
6019 		sinfo->sinfo_ssn = (uint16_t)control->mid;
6020 		sinfo->sinfo_flags = control->sinfo_flags;
6021 		sinfo->sinfo_ppid = control->sinfo_ppid;
6022 		sinfo->sinfo_context = control->sinfo_context;
6023 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
6024 		sinfo->sinfo_tsn = control->sinfo_tsn;
6025 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6026 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6027 		nxt = TAILQ_NEXT(control, next);
6028 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6029 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6030 			struct sctp_extrcvinfo *s_extra;
6031 
6032 			s_extra = (struct sctp_extrcvinfo *)sinfo;
6033 			if ((nxt) &&
6034 			    (nxt->length)) {
6035 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6036 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
6037 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6038 				}
6039 				if (nxt->spec_flags & M_NOTIFICATION) {
6040 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6041 				}
6042 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6043 				s_extra->serinfo_next_length = nxt->length;
6044 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6045 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
6046 				if (nxt->tail_mbuf != NULL) {
6047 					if (nxt->end_added) {
6048 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6049 					}
6050 				}
6051 			} else {
6052 				/*
6053 				 * we explicitly 0 this, since the memcpy
6054 				 * got some other things beyond the older
6055 				 * sinfo_ that is on the control's structure
6056 				 * :-D
6057 				 */
6058 				nxt = NULL;
6059 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6060 				s_extra->serinfo_next_aid = 0;
6061 				s_extra->serinfo_next_length = 0;
6062 				s_extra->serinfo_next_ppid = 0;
6063 				s_extra->serinfo_next_stream = 0;
6064 			}
6065 		}
6066 		/*
6067 		 * update off the real current cum-ack, if we have an stcb.
6068 		 */
6069 		if ((control->do_not_ref_stcb == 0) && stcb)
6070 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6071 		/*
6072 		 * mask off the high bits, we keep the actual chunk bits in
6073 		 * there.
6074 		 */
6075 		sinfo->sinfo_flags &= 0x00ff;
6076 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6077 			sinfo->sinfo_flags |= SCTP_UNORDERED;
6078 		}
6079 	}
6080 #ifdef SCTP_ASOCLOG_OF_TSNS
6081 	{
6082 		int index, newindex;
6083 		struct sctp_pcbtsn_rlog *entry;
6084 
6085 		do {
6086 			index = inp->readlog_index;
6087 			newindex = index + 1;
6088 			if (newindex >= SCTP_READ_LOG_SIZE) {
6089 				newindex = 0;
6090 			}
6091 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6092 		entry = &inp->readlog[index];
6093 		entry->vtag = control->sinfo_assoc_id;
6094 		entry->strm = control->sinfo_stream;
6095 		entry->seq = (uint16_t)control->mid;
6096 		entry->sz = control->length;
6097 		entry->flgs = control->sinfo_flags;
6098 	}
6099 #endif
6100 	if ((fromlen > 0) && (from != NULL)) {
6101 		union sctp_sockstore store;
6102 		size_t len;
6103 
6104 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6105 #ifdef INET6
6106 		case AF_INET6:
6107 			len = sizeof(struct sockaddr_in6);
6108 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
6109 			store.sin6.sin6_port = control->port_from;
6110 			break;
6111 #endif
6112 #ifdef INET
6113 		case AF_INET:
6114 #ifdef INET6
6115 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6116 				len = sizeof(struct sockaddr_in6);
6117 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6118 				    &store.sin6);
6119 				store.sin6.sin6_port = control->port_from;
6120 			} else {
6121 				len = sizeof(struct sockaddr_in);
6122 				store.sin = control->whoFrom->ro._l_addr.sin;
6123 				store.sin.sin_port = control->port_from;
6124 			}
6125 #else
6126 			len = sizeof(struct sockaddr_in);
6127 			store.sin = control->whoFrom->ro._l_addr.sin;
6128 			store.sin.sin_port = control->port_from;
6129 #endif
6130 			break;
6131 #endif
6132 		default:
6133 			len = 0;
6134 			break;
6135 		}
6136 		memcpy(from, &store, min((size_t)fromlen, len));
6137 #ifdef INET6
6138 		{
6139 			struct sockaddr_in6 lsa6, *from6;
6140 
6141 			from6 = (struct sockaddr_in6 *)from;
6142 			sctp_recover_scope_mac(from6, (&lsa6));
6143 		}
6144 #endif
6145 	}
6146 	if (hold_rlock) {
6147 		SCTP_INP_READ_UNLOCK(inp);
6148 		hold_rlock = 0;
6149 	}
6150 	if (hold_sblock) {
6151 		SOCKBUF_UNLOCK(&so->so_rcv);
6152 		hold_sblock = 0;
6153 	}
6154 	/* now copy out what data we can */
6155 	if (mp == NULL) {
6156 		/* copy out each mbuf in the chain up to length */
6157 get_more_data:
6158 		m = control->data;
6159 		while (m) {
6160 			/* Move out all we can */
6161 			cp_len = uio->uio_resid;
6162 			my_len = SCTP_BUF_LEN(m);
6163 			if (cp_len > my_len) {
6164 				/* not enough in this buf */
6165 				cp_len = my_len;
6166 			}
6167 			if (hold_rlock) {
6168 				SCTP_INP_READ_UNLOCK(inp);
6169 				hold_rlock = 0;
6170 			}
6171 			if (cp_len > 0)
6172 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
6173 			/* re-read */
6174 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6175 				goto release;
6176 			}
6177 
6178 			if ((control->do_not_ref_stcb == 0) && stcb &&
6179 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6180 				no_rcv_needed = 1;
6181 			}
6182 			if (error) {
6183 				/* error we are out of here */
6184 				goto release;
6185 			}
6186 			SCTP_INP_READ_LOCK(inp);
6187 			hold_rlock = 1;
6188 			if (cp_len == SCTP_BUF_LEN(m)) {
6189 				if ((SCTP_BUF_NEXT(m) == NULL) &&
6190 				    (control->end_added)) {
6191 					out_flags |= MSG_EOR;
6192 					if ((control->do_not_ref_stcb == 0) &&
6193 					    (control->stcb != NULL) &&
6194 					    ((control->spec_flags & M_NOTIFICATION) == 0))
6195 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6196 				}
6197 				if (control->spec_flags & M_NOTIFICATION) {
6198 					out_flags |= MSG_NOTIFICATION;
6199 				}
6200 				/* we ate up the mbuf */
6201 				if (in_flags & MSG_PEEK) {
6202 					/* just looking */
6203 					m = SCTP_BUF_NEXT(m);
6204 					copied_so_far += cp_len;
6205 				} else {
6206 					/* dispose of the mbuf */
6207 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6208 						sctp_sblog(&so->so_rcv,
6209 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6210 					}
6211 					sctp_sbfree(control, stcb, &so->so_rcv, m);
6212 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6213 						sctp_sblog(&so->so_rcv,
6214 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6215 					}
6216 					copied_so_far += cp_len;
6217 					freed_so_far += (uint32_t)cp_len;
6218 					freed_so_far += MSIZE;
6219 					atomic_subtract_int(&control->length, cp_len);
6220 					control->data = sctp_m_free(m);
6221 					m = control->data;
6222 					/*
6223 					 * been through it all, must hold sb
6224 					 * lock ok to null tail
6225 					 */
6226 					if (control->data == NULL) {
6227 #ifdef INVARIANTS
6228 						if ((control->end_added == 0) ||
6229 						    (TAILQ_NEXT(control, next) == NULL)) {
6230 							/*
6231 							 * If the end is not
6232 							 * added, OR the
6233 							 * next is NOT null
6234 							 * we MUST have the
6235 							 * lock.
6236 							 */
6237 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6238 								panic("Hmm we don't own the lock?");
6239 							}
6240 						}
6241 #endif
6242 						control->tail_mbuf = NULL;
6243 #ifdef INVARIANTS
6244 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6245 							panic("end_added, nothing left and no MSG_EOR");
6246 						}
6247 #endif
6248 					}
6249 				}
6250 			} else {
6251 				/* Do we need to trim the mbuf? */
6252 				if (control->spec_flags & M_NOTIFICATION) {
6253 					out_flags |= MSG_NOTIFICATION;
6254 				}
6255 				if ((in_flags & MSG_PEEK) == 0) {
6256 					SCTP_BUF_RESV_UF(m, cp_len);
6257 					SCTP_BUF_LEN(m) -= (int)cp_len;
6258 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6259 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
6260 					}
6261 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6262 					if ((control->do_not_ref_stcb == 0) &&
6263 					    stcb) {
6264 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6265 					}
6266 					copied_so_far += cp_len;
6267 					freed_so_far += (uint32_t)cp_len;
6268 					freed_so_far += MSIZE;
6269 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6270 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
6271 						    SCTP_LOG_SBRESULT, 0);
6272 					}
6273 					atomic_subtract_int(&control->length, cp_len);
6274 				} else {
6275 					copied_so_far += cp_len;
6276 				}
6277 			}
6278 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6279 				break;
6280 			}
6281 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6282 			    (control->do_not_ref_stcb == 0) &&
6283 			    (freed_so_far >= rwnd_req)) {
6284 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6285 			}
6286 		}		/* end while(m) */
6287 		/*
6288 		 * At this point we have looked at it all and we either have
6289 		 * a MSG_EOR/or read all the user wants... <OR>
6290 		 * control->length == 0.
6291 		 */
6292 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6293 			/* we are done with this control */
6294 			if (control->length == 0) {
6295 				if (control->data) {
6296 #ifdef INVARIANTS
6297 					panic("control->data not null at read eor?");
6298 #else
6299 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6300 					sctp_m_freem(control->data);
6301 					control->data = NULL;
6302 #endif
6303 				}
6304 		done_with_control:
6305 				if (hold_rlock == 0) {
6306 					SCTP_INP_READ_LOCK(inp);
6307 					hold_rlock = 1;
6308 				}
6309 				TAILQ_REMOVE(&inp->read_queue, control, next);
6310 				/* Add back any hiddend data */
6311 				if (control->held_length) {
6312 					held_length = 0;
6313 					control->held_length = 0;
6314 					wakeup_read_socket = 1;
6315 				}
6316 				if (control->aux_data) {
6317 					sctp_m_free(control->aux_data);
6318 					control->aux_data = NULL;
6319 				}
6320 				no_rcv_needed = control->do_not_ref_stcb;
6321 				sctp_free_remote_addr(control->whoFrom);
6322 				control->data = NULL;
6323 #ifdef INVARIANTS
6324 				if (control->on_strm_q) {
6325 					panic("About to free ctl:%p so:%p and its in %d",
6326 					    control, so, control->on_strm_q);
6327 				}
6328 #endif
6329 				sctp_free_a_readq(stcb, control);
6330 				control = NULL;
6331 				if ((freed_so_far >= rwnd_req) &&
6332 				    (no_rcv_needed == 0))
6333 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6334 
6335 			} else {
6336 				/*
6337 				 * The user did not read all of this
6338 				 * message, turn off the returned MSG_EOR
6339 				 * since we are leaving more behind on the
6340 				 * control to read.
6341 				 */
6342 #ifdef INVARIANTS
6343 				if (control->end_added &&
6344 				    (control->data == NULL) &&
6345 				    (control->tail_mbuf == NULL)) {
6346 					panic("Gak, control->length is corrupt?");
6347 				}
6348 #endif
6349 				no_rcv_needed = control->do_not_ref_stcb;
6350 				out_flags &= ~MSG_EOR;
6351 			}
6352 		}
6353 		if (out_flags & MSG_EOR) {
6354 			goto release;
6355 		}
6356 		if ((uio->uio_resid == 0) ||
6357 		    ((in_eeor_mode) &&
6358 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6359 			goto release;
6360 		}
6361 		/*
6362 		 * If I hit here the receiver wants more and this message is
6363 		 * NOT done (pd-api). So two questions. Can we block? if not
6364 		 * we are done. Did the user NOT set MSG_WAITALL?
6365 		 */
6366 		if (block_allowed == 0) {
6367 			goto release;
6368 		}
6369 		/*
6370 		 * We need to wait for more data a few things: - We don't
6371 		 * sbunlock() so we don't get someone else reading. - We
6372 		 * must be sure to account for the case where what is added
6373 		 * is NOT to our control when we wakeup.
6374 		 */
6375 
6376 		/*
6377 		 * Do we need to tell the transport a rwnd update might be
6378 		 * needed before we go to sleep?
6379 		 */
6380 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6381 		    ((freed_so_far >= rwnd_req) &&
6382 		    (control->do_not_ref_stcb == 0) &&
6383 		    (no_rcv_needed == 0))) {
6384 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6385 		}
6386 wait_some_more:
6387 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6388 			goto release;
6389 		}
6390 
6391 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6392 			goto release;
6393 
6394 		if (hold_rlock == 1) {
6395 			SCTP_INP_READ_UNLOCK(inp);
6396 			hold_rlock = 0;
6397 		}
6398 		if (hold_sblock == 0) {
6399 			SOCKBUF_LOCK(&so->so_rcv);
6400 			hold_sblock = 1;
6401 		}
6402 		if ((copied_so_far) && (control->length == 0) &&
6403 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6404 			goto release;
6405 		}
6406 		if (so->so_rcv.sb_cc <= control->held_length) {
6407 			error = sbwait(&so->so_rcv);
6408 			if (error) {
6409 				goto release;
6410 			}
6411 			control->held_length = 0;
6412 		}
6413 		if (hold_sblock) {
6414 			SOCKBUF_UNLOCK(&so->so_rcv);
6415 			hold_sblock = 0;
6416 		}
6417 		if (control->length == 0) {
6418 			/* still nothing here */
6419 			if (control->end_added == 1) {
6420 				/* he aborted, or is done i.e.did a shutdown */
6421 				out_flags |= MSG_EOR;
6422 				if (control->pdapi_aborted) {
6423 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6424 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6425 
6426 					out_flags |= MSG_TRUNC;
6427 				} else {
6428 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6429 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6430 				}
6431 				goto done_with_control;
6432 			}
6433 			if (so->so_rcv.sb_cc > held_length) {
6434 				control->held_length = so->so_rcv.sb_cc;
6435 				held_length = 0;
6436 			}
6437 			goto wait_some_more;
6438 		} else if (control->data == NULL) {
6439 			/*
6440 			 * we must re-sync since data is probably being
6441 			 * added
6442 			 */
6443 			SCTP_INP_READ_LOCK(inp);
6444 			if ((control->length > 0) && (control->data == NULL)) {
6445 				/*
6446 				 * big trouble.. we have the lock and its
6447 				 * corrupt?
6448 				 */
6449 #ifdef INVARIANTS
6450 				panic("Impossible data==NULL length !=0");
6451 #endif
6452 				out_flags |= MSG_EOR;
6453 				out_flags |= MSG_TRUNC;
6454 				control->length = 0;
6455 				SCTP_INP_READ_UNLOCK(inp);
6456 				goto done_with_control;
6457 			}
6458 			SCTP_INP_READ_UNLOCK(inp);
6459 			/* We will fall around to get more data */
6460 		}
6461 		goto get_more_data;
6462 	} else {
6463 		/*-
6464 		 * Give caller back the mbuf chain,
6465 		 * store in uio_resid the length
6466 		 */
6467 		wakeup_read_socket = 0;
6468 		if ((control->end_added == 0) ||
6469 		    (TAILQ_NEXT(control, next) == NULL)) {
6470 			/* Need to get rlock */
6471 			if (hold_rlock == 0) {
6472 				SCTP_INP_READ_LOCK(inp);
6473 				hold_rlock = 1;
6474 			}
6475 		}
6476 		if (control->end_added) {
6477 			out_flags |= MSG_EOR;
6478 			if ((control->do_not_ref_stcb == 0) &&
6479 			    (control->stcb != NULL) &&
6480 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6481 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6482 		}
6483 		if (control->spec_flags & M_NOTIFICATION) {
6484 			out_flags |= MSG_NOTIFICATION;
6485 		}
6486 		uio->uio_resid = control->length;
6487 		*mp = control->data;
6488 		m = control->data;
6489 		while (m) {
6490 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6491 				sctp_sblog(&so->so_rcv,
6492 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6493 			}
6494 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6495 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6496 			freed_so_far += MSIZE;
6497 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6498 				sctp_sblog(&so->so_rcv,
6499 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6500 			}
6501 			m = SCTP_BUF_NEXT(m);
6502 		}
6503 		control->data = control->tail_mbuf = NULL;
6504 		control->length = 0;
6505 		if (out_flags & MSG_EOR) {
6506 			/* Done with this control */
6507 			goto done_with_control;
6508 		}
6509 	}
6510 release:
6511 	if (hold_rlock == 1) {
6512 		SCTP_INP_READ_UNLOCK(inp);
6513 		hold_rlock = 0;
6514 	}
6515 	if (hold_sblock == 1) {
6516 		SOCKBUF_UNLOCK(&so->so_rcv);
6517 		hold_sblock = 0;
6518 	}
6519 
6520 	sbunlock(&so->so_rcv);
6521 	sockbuf_lock = 0;
6522 
6523 release_unlocked:
6524 	if (hold_sblock) {
6525 		SOCKBUF_UNLOCK(&so->so_rcv);
6526 		hold_sblock = 0;
6527 	}
6528 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6529 		if ((freed_so_far >= rwnd_req) &&
6530 		    (control && (control->do_not_ref_stcb == 0)) &&
6531 		    (no_rcv_needed == 0))
6532 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6533 	}
6534 out:
6535 	if (msg_flags) {
6536 		*msg_flags = out_flags;
6537 	}
6538 	if (((out_flags & MSG_EOR) == 0) &&
6539 	    ((in_flags & MSG_PEEK) == 0) &&
6540 	    (sinfo) &&
6541 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6542 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6543 		struct sctp_extrcvinfo *s_extra;
6544 
6545 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6546 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6547 	}
6548 	if (hold_rlock == 1) {
6549 		SCTP_INP_READ_UNLOCK(inp);
6550 	}
6551 	if (hold_sblock) {
6552 		SOCKBUF_UNLOCK(&so->so_rcv);
6553 	}
6554 	if (sockbuf_lock) {
6555 		sbunlock(&so->so_rcv);
6556 	}
6557 
6558 	if (freecnt_applied) {
6559 		/*
6560 		 * The lock on the socket buffer protects us so the free
6561 		 * code will stop. But since we used the socketbuf lock and
6562 		 * the sender uses the tcb_lock to increment, we need to use
6563 		 * the atomic add to the refcnt.
6564 		 */
6565 		if (stcb == NULL) {
6566 #ifdef INVARIANTS
6567 			panic("stcb for refcnt has gone NULL?");
6568 			goto stage_left;
6569 #else
6570 			goto stage_left;
6571 #endif
6572 		}
6573 		/* Save the value back for next time */
6574 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6575 		atomic_add_int(&stcb->asoc.refcnt, -1);
6576 	}
6577 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6578 		if (stcb) {
6579 			sctp_misc_ints(SCTP_SORECV_DONE,
6580 			    freed_so_far,
6581 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6582 			    stcb->asoc.my_rwnd,
6583 			    so->so_rcv.sb_cc);
6584 		} else {
6585 			sctp_misc_ints(SCTP_SORECV_DONE,
6586 			    freed_so_far,
6587 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6588 			    0,
6589 			    so->so_rcv.sb_cc);
6590 		}
6591 	}
6592 stage_left:
6593 	if (wakeup_read_socket) {
6594 		sctp_sorwakeup(inp, so);
6595 	}
6596 	return (error);
6597 }
6598 
6599 
6600 #ifdef SCTP_MBUF_LOGGING
6601 struct mbuf *
6602 sctp_m_free(struct mbuf *m)
6603 {
6604 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6605 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6606 	}
6607 	return (m_free(m));
6608 }
6609 
6610 void
6611 sctp_m_freem(struct mbuf *mb)
6612 {
6613 	while (mb != NULL)
6614 		mb = sctp_m_free(mb);
6615 }
6616 
6617 #endif
6618 
6619 int
6620 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6621 {
6622 	/*
6623 	 * Given a local address. For all associations that holds the
6624 	 * address, request a peer-set-primary.
6625 	 */
6626 	struct sctp_ifa *ifa;
6627 	struct sctp_laddr *wi;
6628 
6629 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6630 	if (ifa == NULL) {
6631 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6632 		return (EADDRNOTAVAIL);
6633 	}
6634 	/*
6635 	 * Now that we have the ifa we must awaken the iterator with this
6636 	 * message.
6637 	 */
6638 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6639 	if (wi == NULL) {
6640 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6641 		return (ENOMEM);
6642 	}
6643 	/* Now incr the count and int wi structure */
6644 	SCTP_INCR_LADDR_COUNT();
6645 	memset(wi, 0, sizeof(*wi));
6646 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6647 	wi->ifa = ifa;
6648 	wi->action = SCTP_SET_PRIM_ADDR;
6649 	atomic_add_int(&ifa->refcount, 1);
6650 
6651 	/* Now add it to the work queue */
6652 	SCTP_WQ_ADDR_LOCK();
6653 	/*
6654 	 * Should this really be a tailq? As it is we will process the
6655 	 * newest first :-0
6656 	 */
6657 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6658 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6659 	    (struct sctp_inpcb *)NULL,
6660 	    (struct sctp_tcb *)NULL,
6661 	    (struct sctp_nets *)NULL);
6662 	SCTP_WQ_ADDR_UNLOCK();
6663 	return (0);
6664 }
6665 
6666 
6667 int
6668 sctp_soreceive(struct socket *so,
6669     struct sockaddr **psa,
6670     struct uio *uio,
6671     struct mbuf **mp0,
6672     struct mbuf **controlp,
6673     int *flagsp)
6674 {
6675 	int error, fromlen;
6676 	uint8_t sockbuf[256];
6677 	struct sockaddr *from;
6678 	struct sctp_extrcvinfo sinfo;
6679 	int filling_sinfo = 1;
6680 	int flags;
6681 	struct sctp_inpcb *inp;
6682 
6683 	inp = (struct sctp_inpcb *)so->so_pcb;
6684 	/* pickup the assoc we are reading from */
6685 	if (inp == NULL) {
6686 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6687 		return (EINVAL);
6688 	}
6689 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6690 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6691 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6692 	    (controlp == NULL)) {
6693 		/* user does not want the sndrcv ctl */
6694 		filling_sinfo = 0;
6695 	}
6696 	if (psa) {
6697 		from = (struct sockaddr *)sockbuf;
6698 		fromlen = sizeof(sockbuf);
6699 		from->sa_len = 0;
6700 	} else {
6701 		from = NULL;
6702 		fromlen = 0;
6703 	}
6704 
6705 	if (filling_sinfo) {
6706 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6707 	}
6708 	if (flagsp != NULL) {
6709 		flags = *flagsp;
6710 	} else {
6711 		flags = 0;
6712 	}
6713 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6714 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6715 	if (flagsp != NULL) {
6716 		*flagsp = flags;
6717 	}
6718 	if (controlp != NULL) {
6719 		/* copy back the sinfo in a CMSG format */
6720 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6721 			*controlp = sctp_build_ctl_nchunk(inp,
6722 			    (struct sctp_sndrcvinfo *)&sinfo);
6723 		} else {
6724 			*controlp = NULL;
6725 		}
6726 	}
6727 	if (psa) {
6728 		/* copy back the address info */
6729 		if (from && from->sa_len) {
6730 			*psa = sodupsockaddr(from, M_NOWAIT);
6731 		} else {
6732 			*psa = NULL;
6733 		}
6734 	}
6735 	return (error);
6736 }
6737 
6738 
6739 
6740 
6741 
6742 int
6743 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6744     int totaddr, int *error)
6745 {
6746 	int added = 0;
6747 	int i;
6748 	struct sctp_inpcb *inp;
6749 	struct sockaddr *sa;
6750 	size_t incr = 0;
6751 #ifdef INET
6752 	struct sockaddr_in *sin;
6753 #endif
6754 #ifdef INET6
6755 	struct sockaddr_in6 *sin6;
6756 #endif
6757 
6758 	sa = addr;
6759 	inp = stcb->sctp_ep;
6760 	*error = 0;
6761 	for (i = 0; i < totaddr; i++) {
6762 		switch (sa->sa_family) {
6763 #ifdef INET
6764 		case AF_INET:
6765 			incr = sizeof(struct sockaddr_in);
6766 			sin = (struct sockaddr_in *)sa;
6767 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6768 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6769 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6770 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6771 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6772 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6773 				*error = EINVAL;
6774 				goto out_now;
6775 			}
6776 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6777 			    SCTP_DONOT_SETSCOPE,
6778 			    SCTP_ADDR_IS_CONFIRMED)) {
6779 				/* assoc gone no un-lock */
6780 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6781 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6782 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6783 				*error = ENOBUFS;
6784 				goto out_now;
6785 			}
6786 			added++;
6787 			break;
6788 #endif
6789 #ifdef INET6
6790 		case AF_INET6:
6791 			incr = sizeof(struct sockaddr_in6);
6792 			sin6 = (struct sockaddr_in6 *)sa;
6793 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6794 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6795 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6796 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6797 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6798 				*error = EINVAL;
6799 				goto out_now;
6800 			}
6801 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6802 			    SCTP_DONOT_SETSCOPE,
6803 			    SCTP_ADDR_IS_CONFIRMED)) {
6804 				/* assoc gone no un-lock */
6805 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6806 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6807 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6808 				*error = ENOBUFS;
6809 				goto out_now;
6810 			}
6811 			added++;
6812 			break;
6813 #endif
6814 		default:
6815 			break;
6816 		}
6817 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6818 	}
6819 out_now:
6820 	return (added);
6821 }
6822 
6823 int
6824 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6825     unsigned int totaddr,
6826     unsigned int *num_v4, unsigned int *num_v6,
6827     unsigned int limit)
6828 {
6829 	struct sockaddr *sa;
6830 	struct sctp_tcb *stcb;
6831 	unsigned int incr, at, i;
6832 
6833 	at = 0;
6834 	sa = addr;
6835 	*num_v6 = *num_v4 = 0;
6836 	/* account and validate addresses */
6837 	if (totaddr == 0) {
6838 		return (EINVAL);
6839 	}
6840 	for (i = 0; i < totaddr; i++) {
6841 		if (at + sizeof(struct sockaddr) > limit) {
6842 			return (EINVAL);
6843 		}
6844 		switch (sa->sa_family) {
6845 #ifdef INET
6846 		case AF_INET:
6847 			incr = (unsigned int)sizeof(struct sockaddr_in);
6848 			if (sa->sa_len != incr) {
6849 				return (EINVAL);
6850 			}
6851 			(*num_v4) += 1;
6852 			break;
6853 #endif
6854 #ifdef INET6
6855 		case AF_INET6:
6856 			{
6857 				struct sockaddr_in6 *sin6;
6858 
6859 				sin6 = (struct sockaddr_in6 *)sa;
6860 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6861 					/* Must be non-mapped for connectx */
6862 					return (EINVAL);
6863 				}
6864 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6865 				if (sa->sa_len != incr) {
6866 					return (EINVAL);
6867 				}
6868 				(*num_v6) += 1;
6869 				break;
6870 			}
6871 #endif
6872 		default:
6873 			return (EINVAL);
6874 		}
6875 		if ((at + incr) > limit) {
6876 			return (EINVAL);
6877 		}
6878 		SCTP_INP_INCR_REF(inp);
6879 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6880 		if (stcb != NULL) {
6881 			SCTP_TCB_UNLOCK(stcb);
6882 			return (EALREADY);
6883 		} else {
6884 			SCTP_INP_DECR_REF(inp);
6885 		}
6886 		at += incr;
6887 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6888 	}
6889 	return (0);
6890 }
6891 
6892 /*
6893  * sctp_bindx(ADD) for one address.
6894  * assumes all arguments are valid/checked by caller.
6895  */
6896 void
6897 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6898     struct sockaddr *sa, sctp_assoc_t assoc_id,
6899     uint32_t vrf_id, int *error, void *p)
6900 {
6901 	struct sockaddr *addr_touse;
6902 #if defined(INET) && defined(INET6)
6903 	struct sockaddr_in sin;
6904 #endif
6905 
6906 	/* see if we're bound all already! */
6907 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6908 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6909 		*error = EINVAL;
6910 		return;
6911 	}
6912 	addr_touse = sa;
6913 #ifdef INET6
6914 	if (sa->sa_family == AF_INET6) {
6915 #ifdef INET
6916 		struct sockaddr_in6 *sin6;
6917 
6918 #endif
6919 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6920 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6921 			*error = EINVAL;
6922 			return;
6923 		}
6924 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6925 			/* can only bind v6 on PF_INET6 sockets */
6926 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6927 			*error = EINVAL;
6928 			return;
6929 		}
6930 #ifdef INET
6931 		sin6 = (struct sockaddr_in6 *)addr_touse;
6932 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6933 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6934 			    SCTP_IPV6_V6ONLY(inp)) {
6935 				/* can't bind v4-mapped on PF_INET sockets */
6936 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6937 				*error = EINVAL;
6938 				return;
6939 			}
6940 			in6_sin6_2_sin(&sin, sin6);
6941 			addr_touse = (struct sockaddr *)&sin;
6942 		}
6943 #endif
6944 	}
6945 #endif
6946 #ifdef INET
6947 	if (sa->sa_family == AF_INET) {
6948 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6949 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6950 			*error = EINVAL;
6951 			return;
6952 		}
6953 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6954 		    SCTP_IPV6_V6ONLY(inp)) {
6955 			/* can't bind v4 on PF_INET sockets */
6956 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6957 			*error = EINVAL;
6958 			return;
6959 		}
6960 	}
6961 #endif
6962 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6963 		if (p == NULL) {
6964 			/* Can't get proc for Net/Open BSD */
6965 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6966 			*error = EINVAL;
6967 			return;
6968 		}
6969 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6970 		return;
6971 	}
6972 	/*
6973 	 * No locks required here since bind and mgmt_ep_sa all do their own
6974 	 * locking. If we do something for the FIX: below we may need to
6975 	 * lock in that case.
6976 	 */
6977 	if (assoc_id == 0) {
6978 		/* add the address */
6979 		struct sctp_inpcb *lep;
6980 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6981 
6982 		/* validate the incoming port */
6983 		if ((lsin->sin_port != 0) &&
6984 		    (lsin->sin_port != inp->sctp_lport)) {
6985 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6986 			*error = EINVAL;
6987 			return;
6988 		} else {
6989 			/* user specified 0 port, set it to existing port */
6990 			lsin->sin_port = inp->sctp_lport;
6991 		}
6992 
6993 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6994 		if (lep != NULL) {
6995 			/*
6996 			 * We must decrement the refcount since we have the
6997 			 * ep already and are binding. No remove going on
6998 			 * here.
6999 			 */
7000 			SCTP_INP_DECR_REF(lep);
7001 		}
7002 		if (lep == inp) {
7003 			/* already bound to it.. ok */
7004 			return;
7005 		} else if (lep == NULL) {
7006 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
7007 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7008 			    SCTP_ADD_IP_ADDRESS,
7009 			    vrf_id, NULL);
7010 		} else {
7011 			*error = EADDRINUSE;
7012 		}
7013 		if (*error)
7014 			return;
7015 	} else {
7016 		/*
7017 		 * FIX: decide whether we allow assoc based bindx
7018 		 */
7019 	}
7020 }
7021 
7022 /*
7023  * sctp_bindx(DELETE) for one address.
7024  * assumes all arguments are valid/checked by caller.
7025  */
7026 void
7027 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7028     struct sockaddr *sa, sctp_assoc_t assoc_id,
7029     uint32_t vrf_id, int *error)
7030 {
7031 	struct sockaddr *addr_touse;
7032 #if defined(INET) && defined(INET6)
7033 	struct sockaddr_in sin;
7034 #endif
7035 
7036 	/* see if we're bound all already! */
7037 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7038 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7039 		*error = EINVAL;
7040 		return;
7041 	}
7042 	addr_touse = sa;
7043 #ifdef INET6
7044 	if (sa->sa_family == AF_INET6) {
7045 #ifdef INET
7046 		struct sockaddr_in6 *sin6;
7047 #endif
7048 
7049 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7050 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7051 			*error = EINVAL;
7052 			return;
7053 		}
7054 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7055 			/* can only bind v6 on PF_INET6 sockets */
7056 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7057 			*error = EINVAL;
7058 			return;
7059 		}
7060 #ifdef INET
7061 		sin6 = (struct sockaddr_in6 *)addr_touse;
7062 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7063 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7064 			    SCTP_IPV6_V6ONLY(inp)) {
7065 				/* can't bind mapped-v4 on PF_INET sockets */
7066 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7067 				*error = EINVAL;
7068 				return;
7069 			}
7070 			in6_sin6_2_sin(&sin, sin6);
7071 			addr_touse = (struct sockaddr *)&sin;
7072 		}
7073 #endif
7074 	}
7075 #endif
7076 #ifdef INET
7077 	if (sa->sa_family == AF_INET) {
7078 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7079 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7080 			*error = EINVAL;
7081 			return;
7082 		}
7083 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7084 		    SCTP_IPV6_V6ONLY(inp)) {
7085 			/* can't bind v4 on PF_INET sockets */
7086 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7087 			*error = EINVAL;
7088 			return;
7089 		}
7090 	}
7091 #endif
7092 	/*
7093 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
7094 	 * below is ever changed we may need to lock before calling
7095 	 * association level binding.
7096 	 */
7097 	if (assoc_id == 0) {
7098 		/* delete the address */
7099 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7100 		    SCTP_DEL_IP_ADDRESS,
7101 		    vrf_id, NULL);
7102 	} else {
7103 		/*
7104 		 * FIX: decide whether we allow assoc based bindx
7105 		 */
7106 	}
7107 }
7108 
7109 /*
7110  * returns the valid local address count for an assoc, taking into account
7111  * all scoping rules
7112  */
7113 int
7114 sctp_local_addr_count(struct sctp_tcb *stcb)
7115 {
7116 	int loopback_scope;
7117 #if defined(INET)
7118 	int ipv4_local_scope, ipv4_addr_legal;
7119 #endif
7120 #if defined (INET6)
7121 	int local_scope, site_scope, ipv6_addr_legal;
7122 #endif
7123 	struct sctp_vrf *vrf;
7124 	struct sctp_ifn *sctp_ifn;
7125 	struct sctp_ifa *sctp_ifa;
7126 	int count = 0;
7127 
7128 	/* Turn on all the appropriate scopes */
7129 	loopback_scope = stcb->asoc.scope.loopback_scope;
7130 #if defined(INET)
7131 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7132 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7133 #endif
7134 #if defined(INET6)
7135 	local_scope = stcb->asoc.scope.local_scope;
7136 	site_scope = stcb->asoc.scope.site_scope;
7137 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7138 #endif
7139 	SCTP_IPI_ADDR_RLOCK();
7140 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7141 	if (vrf == NULL) {
7142 		/* no vrf, no addresses */
7143 		SCTP_IPI_ADDR_RUNLOCK();
7144 		return (0);
7145 	}
7146 
7147 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7148 		/*
7149 		 * bound all case: go through all ifns on the vrf
7150 		 */
7151 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7152 			if ((loopback_scope == 0) &&
7153 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7154 				continue;
7155 			}
7156 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7157 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7158 					continue;
7159 				switch (sctp_ifa->address.sa.sa_family) {
7160 #ifdef INET
7161 				case AF_INET:
7162 					if (ipv4_addr_legal) {
7163 						struct sockaddr_in *sin;
7164 
7165 						sin = &sctp_ifa->address.sin;
7166 						if (sin->sin_addr.s_addr == 0) {
7167 							/*
7168 							 * skip unspecified
7169 							 * addrs
7170 							 */
7171 							continue;
7172 						}
7173 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7174 						    &sin->sin_addr) != 0) {
7175 							continue;
7176 						}
7177 						if ((ipv4_local_scope == 0) &&
7178 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7179 							continue;
7180 						}
7181 						/* count this one */
7182 						count++;
7183 					} else {
7184 						continue;
7185 					}
7186 					break;
7187 #endif
7188 #ifdef INET6
7189 				case AF_INET6:
7190 					if (ipv6_addr_legal) {
7191 						struct sockaddr_in6 *sin6;
7192 
7193 						sin6 = &sctp_ifa->address.sin6;
7194 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7195 							continue;
7196 						}
7197 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7198 						    &sin6->sin6_addr) != 0) {
7199 							continue;
7200 						}
7201 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7202 							if (local_scope == 0)
7203 								continue;
7204 							if (sin6->sin6_scope_id == 0) {
7205 								if (sa6_recoverscope(sin6) != 0)
7206 									/*
7207 									 *
7208 									 * bad
7209 									 * link
7210 									 *
7211 									 * local
7212 									 *
7213 									 * address
7214 									 */
7215 									continue;
7216 							}
7217 						}
7218 						if ((site_scope == 0) &&
7219 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7220 							continue;
7221 						}
7222 						/* count this one */
7223 						count++;
7224 					}
7225 					break;
7226 #endif
7227 				default:
7228 					/* TSNH */
7229 					break;
7230 				}
7231 			}
7232 		}
7233 	} else {
7234 		/*
7235 		 * subset bound case
7236 		 */
7237 		struct sctp_laddr *laddr;
7238 
7239 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7240 		    sctp_nxt_addr) {
7241 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7242 				continue;
7243 			}
7244 			/* count this one */
7245 			count++;
7246 		}
7247 	}
7248 	SCTP_IPI_ADDR_RUNLOCK();
7249 	return (count);
7250 }
7251 
7252 #if defined(SCTP_LOCAL_TRACE_BUF)
7253 
7254 void
7255 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7256 {
7257 	uint32_t saveindex, newindex;
7258 
7259 	do {
7260 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7261 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7262 			newindex = 1;
7263 		} else {
7264 			newindex = saveindex + 1;
7265 		}
7266 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7267 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7268 		saveindex = 0;
7269 	}
7270 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7271 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7272 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7273 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7274 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7275 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7276 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7277 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7278 }
7279 
7280 #endif
7281 static void
7282 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
7283     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7284 {
7285 	struct ip *iph;
7286 #ifdef INET6
7287 	struct ip6_hdr *ip6;
7288 #endif
7289 	struct mbuf *sp, *last;
7290 	struct udphdr *uhdr;
7291 	uint16_t port;
7292 
7293 	if ((m->m_flags & M_PKTHDR) == 0) {
7294 		/* Can't handle one that is not a pkt hdr */
7295 		goto out;
7296 	}
7297 	/* Pull the src port */
7298 	iph = mtod(m, struct ip *);
7299 	uhdr = (struct udphdr *)((caddr_t)iph + off);
7300 	port = uhdr->uh_sport;
7301 	/*
7302 	 * Split out the mbuf chain. Leave the IP header in m, place the
7303 	 * rest in the sp.
7304 	 */
7305 	sp = m_split(m, off, M_NOWAIT);
7306 	if (sp == NULL) {
7307 		/* Gak, drop packet, we can't do a split */
7308 		goto out;
7309 	}
7310 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7311 		/* Gak, packet can't have an SCTP header in it - too small */
7312 		m_freem(sp);
7313 		goto out;
7314 	}
7315 	/* Now pull up the UDP header and SCTP header together */
7316 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7317 	if (sp == NULL) {
7318 		/* Gak pullup failed */
7319 		goto out;
7320 	}
7321 	/* Trim out the UDP header */
7322 	m_adj(sp, sizeof(struct udphdr));
7323 
7324 	/* Now reconstruct the mbuf chain */
7325 	for (last = m; last->m_next; last = last->m_next);
7326 	last->m_next = sp;
7327 	m->m_pkthdr.len += sp->m_pkthdr.len;
7328 	/*
7329 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
7330 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
7331 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
7332 	 * SCTP checksum. Therefore, clear the bit.
7333 	 */
7334 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7335 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7336 	    m->m_pkthdr.len,
7337 	    if_name(m->m_pkthdr.rcvif),
7338 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7339 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7340 	iph = mtod(m, struct ip *);
7341 	switch (iph->ip_v) {
7342 #ifdef INET
7343 	case IPVERSION:
7344 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7345 		sctp_input_with_port(m, off, port);
7346 		break;
7347 #endif
7348 #ifdef INET6
7349 	case IPV6_VERSION >> 4:
7350 		ip6 = mtod(m, struct ip6_hdr *);
7351 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7352 		sctp6_input_with_port(&m, &off, port);
7353 		break;
7354 #endif
7355 	default:
7356 		goto out;
7357 		break;
7358 	}
7359 	return;
7360 out:
7361 	m_freem(m);
7362 }
7363 
7364 #ifdef INET
7365 static void
7366 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7367 {
7368 	struct ip *outer_ip, *inner_ip;
7369 	struct sctphdr *sh;
7370 	struct icmp *icmp;
7371 	struct udphdr *udp;
7372 	struct sctp_inpcb *inp;
7373 	struct sctp_tcb *stcb;
7374 	struct sctp_nets *net;
7375 	struct sctp_init_chunk *ch;
7376 	struct sockaddr_in src, dst;
7377 	uint8_t type, code;
7378 
7379 	inner_ip = (struct ip *)vip;
7380 	icmp = (struct icmp *)((caddr_t)inner_ip -
7381 	    (sizeof(struct icmp) - sizeof(struct ip)));
7382 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7383 	if (ntohs(outer_ip->ip_len) <
7384 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7385 		return;
7386 	}
7387 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7388 	sh = (struct sctphdr *)(udp + 1);
7389 	memset(&src, 0, sizeof(struct sockaddr_in));
7390 	src.sin_family = AF_INET;
7391 	src.sin_len = sizeof(struct sockaddr_in);
7392 	src.sin_port = sh->src_port;
7393 	src.sin_addr = inner_ip->ip_src;
7394 	memset(&dst, 0, sizeof(struct sockaddr_in));
7395 	dst.sin_family = AF_INET;
7396 	dst.sin_len = sizeof(struct sockaddr_in);
7397 	dst.sin_port = sh->dest_port;
7398 	dst.sin_addr = inner_ip->ip_dst;
7399 	/*
7400 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7401 	 * holds our local endpoint address. Thus we reverse the dst and the
7402 	 * src in the lookup.
7403 	 */
7404 	inp = NULL;
7405 	net = NULL;
7406 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7407 	    (struct sockaddr *)&src,
7408 	    &inp, &net, 1,
7409 	    SCTP_DEFAULT_VRFID);
7410 	if ((stcb != NULL) &&
7411 	    (net != NULL) &&
7412 	    (inp != NULL)) {
7413 		/* Check the UDP port numbers */
7414 		if ((udp->uh_dport != net->port) ||
7415 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7416 			SCTP_TCB_UNLOCK(stcb);
7417 			return;
7418 		}
7419 		/* Check the verification tag */
7420 		if (ntohl(sh->v_tag) != 0) {
7421 			/*
7422 			 * This must be the verification tag used for
7423 			 * sending out packets. We don't consider packets
7424 			 * reflecting the verification tag.
7425 			 */
7426 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7427 				SCTP_TCB_UNLOCK(stcb);
7428 				return;
7429 			}
7430 		} else {
7431 			if (ntohs(outer_ip->ip_len) >=
7432 			    sizeof(struct ip) +
7433 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7434 				/*
7435 				 * In this case we can check if we got an
7436 				 * INIT chunk and if the initiate tag
7437 				 * matches.
7438 				 */
7439 				ch = (struct sctp_init_chunk *)(sh + 1);
7440 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7441 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7442 					SCTP_TCB_UNLOCK(stcb);
7443 					return;
7444 				}
7445 			} else {
7446 				SCTP_TCB_UNLOCK(stcb);
7447 				return;
7448 			}
7449 		}
7450 		type = icmp->icmp_type;
7451 		code = icmp->icmp_code;
7452 		if ((type == ICMP_UNREACH) &&
7453 		    (code == ICMP_UNREACH_PORT)) {
7454 			code = ICMP_UNREACH_PROTOCOL;
7455 		}
7456 		sctp_notify(inp, stcb, net, type, code,
7457 		    ntohs(inner_ip->ip_len),
7458 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7459 	} else {
7460 		if ((stcb == NULL) && (inp != NULL)) {
7461 			/* reduce ref-count */
7462 			SCTP_INP_WLOCK(inp);
7463 			SCTP_INP_DECR_REF(inp);
7464 			SCTP_INP_WUNLOCK(inp);
7465 		}
7466 		if (stcb) {
7467 			SCTP_TCB_UNLOCK(stcb);
7468 		}
7469 	}
7470 	return;
7471 }
7472 #endif
7473 
7474 #ifdef INET6
7475 static void
7476 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7477 {
7478 	struct ip6ctlparam *ip6cp;
7479 	struct sctp_inpcb *inp;
7480 	struct sctp_tcb *stcb;
7481 	struct sctp_nets *net;
7482 	struct sctphdr sh;
7483 	struct udphdr udp;
7484 	struct sockaddr_in6 src, dst;
7485 	uint8_t type, code;
7486 
7487 	ip6cp = (struct ip6ctlparam *)d;
7488 	/*
7489 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7490 	 */
7491 	if (ip6cp->ip6c_m == NULL) {
7492 		return;
7493 	}
7494 	/*
7495 	 * Check if we can safely examine the ports and the verification tag
7496 	 * of the SCTP common header.
7497 	 */
7498 	if (ip6cp->ip6c_m->m_pkthdr.len <
7499 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7500 		return;
7501 	}
7502 	/* Copy out the UDP header. */
7503 	memset(&udp, 0, sizeof(struct udphdr));
7504 	m_copydata(ip6cp->ip6c_m,
7505 	    ip6cp->ip6c_off,
7506 	    sizeof(struct udphdr),
7507 	    (caddr_t)&udp);
7508 	/* Copy out the port numbers and the verification tag. */
7509 	memset(&sh, 0, sizeof(struct sctphdr));
7510 	m_copydata(ip6cp->ip6c_m,
7511 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7512 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7513 	    (caddr_t)&sh);
7514 	memset(&src, 0, sizeof(struct sockaddr_in6));
7515 	src.sin6_family = AF_INET6;
7516 	src.sin6_len = sizeof(struct sockaddr_in6);
7517 	src.sin6_port = sh.src_port;
7518 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7519 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7520 		return;
7521 	}
7522 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7523 	dst.sin6_family = AF_INET6;
7524 	dst.sin6_len = sizeof(struct sockaddr_in6);
7525 	dst.sin6_port = sh.dest_port;
7526 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7527 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7528 		return;
7529 	}
7530 	inp = NULL;
7531 	net = NULL;
7532 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7533 	    (struct sockaddr *)&src,
7534 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7535 	if ((stcb != NULL) &&
7536 	    (net != NULL) &&
7537 	    (inp != NULL)) {
7538 		/* Check the UDP port numbers */
7539 		if ((udp.uh_dport != net->port) ||
7540 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7541 			SCTP_TCB_UNLOCK(stcb);
7542 			return;
7543 		}
7544 		/* Check the verification tag */
7545 		if (ntohl(sh.v_tag) != 0) {
7546 			/*
7547 			 * This must be the verification tag used for
7548 			 * sending out packets. We don't consider packets
7549 			 * reflecting the verification tag.
7550 			 */
7551 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7552 				SCTP_TCB_UNLOCK(stcb);
7553 				return;
7554 			}
7555 		} else {
7556 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7557 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7558 			    sizeof(struct sctphdr) +
7559 			    sizeof(struct sctp_chunkhdr) +
7560 			    offsetof(struct sctp_init, a_rwnd)) {
7561 				/*
7562 				 * In this case we can check if we got an
7563 				 * INIT chunk and if the initiate tag
7564 				 * matches.
7565 				 */
7566 				uint32_t initiate_tag;
7567 				uint8_t chunk_type;
7568 
7569 				m_copydata(ip6cp->ip6c_m,
7570 				    ip6cp->ip6c_off +
7571 				    sizeof(struct udphdr) +
7572 				    sizeof(struct sctphdr),
7573 				    sizeof(uint8_t),
7574 				    (caddr_t)&chunk_type);
7575 				m_copydata(ip6cp->ip6c_m,
7576 				    ip6cp->ip6c_off +
7577 				    sizeof(struct udphdr) +
7578 				    sizeof(struct sctphdr) +
7579 				    sizeof(struct sctp_chunkhdr),
7580 				    sizeof(uint32_t),
7581 				    (caddr_t)&initiate_tag);
7582 				if ((chunk_type != SCTP_INITIATION) ||
7583 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7584 					SCTP_TCB_UNLOCK(stcb);
7585 					return;
7586 				}
7587 			} else {
7588 				SCTP_TCB_UNLOCK(stcb);
7589 				return;
7590 			}
7591 		}
7592 		type = ip6cp->ip6c_icmp6->icmp6_type;
7593 		code = ip6cp->ip6c_icmp6->icmp6_code;
7594 		if ((type == ICMP6_DST_UNREACH) &&
7595 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7596 			type = ICMP6_PARAM_PROB;
7597 			code = ICMP6_PARAMPROB_NEXTHEADER;
7598 		}
7599 		sctp6_notify(inp, stcb, net, type, code,
7600 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7601 	} else {
7602 		if ((stcb == NULL) && (inp != NULL)) {
7603 			/* reduce inp's ref-count */
7604 			SCTP_INP_WLOCK(inp);
7605 			SCTP_INP_DECR_REF(inp);
7606 			SCTP_INP_WUNLOCK(inp);
7607 		}
7608 		if (stcb) {
7609 			SCTP_TCB_UNLOCK(stcb);
7610 		}
7611 	}
7612 }
7613 #endif
7614 
7615 void
7616 sctp_over_udp_stop(void)
7617 {
7618 	/*
7619 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7620 	 * for writting!
7621 	 */
7622 #ifdef INET
7623 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7624 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7625 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7626 	}
7627 #endif
7628 #ifdef INET6
7629 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7630 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7631 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7632 	}
7633 #endif
7634 }
7635 
7636 int
7637 sctp_over_udp_start(void)
7638 {
7639 	uint16_t port;
7640 	int ret;
7641 #ifdef INET
7642 	struct sockaddr_in sin;
7643 #endif
7644 #ifdef INET6
7645 	struct sockaddr_in6 sin6;
7646 #endif
7647 	/*
7648 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7649 	 * for writting!
7650 	 */
7651 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7652 	if (ntohs(port) == 0) {
7653 		/* Must have a port set */
7654 		return (EINVAL);
7655 	}
7656 #ifdef INET
7657 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7658 		/* Already running -- must stop first */
7659 		return (EALREADY);
7660 	}
7661 #endif
7662 #ifdef INET6
7663 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7664 		/* Already running -- must stop first */
7665 		return (EALREADY);
7666 	}
7667 #endif
7668 #ifdef INET
7669 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7670 	    SOCK_DGRAM, IPPROTO_UDP,
7671 	    curthread->td_ucred, curthread))) {
7672 		sctp_over_udp_stop();
7673 		return (ret);
7674 	}
7675 	/* Call the special UDP hook. */
7676 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7677 	    sctp_recv_udp_tunneled_packet,
7678 	    sctp_recv_icmp_tunneled_packet,
7679 	    NULL))) {
7680 		sctp_over_udp_stop();
7681 		return (ret);
7682 	}
7683 	/* Ok, we have a socket, bind it to the port. */
7684 	memset(&sin, 0, sizeof(struct sockaddr_in));
7685 	sin.sin_len = sizeof(struct sockaddr_in);
7686 	sin.sin_family = AF_INET;
7687 	sin.sin_port = htons(port);
7688 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7689 	    (struct sockaddr *)&sin, curthread))) {
7690 		sctp_over_udp_stop();
7691 		return (ret);
7692 	}
7693 #endif
7694 #ifdef INET6
7695 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7696 	    SOCK_DGRAM, IPPROTO_UDP,
7697 	    curthread->td_ucred, curthread))) {
7698 		sctp_over_udp_stop();
7699 		return (ret);
7700 	}
7701 	/* Call the special UDP hook. */
7702 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7703 	    sctp_recv_udp_tunneled_packet,
7704 	    sctp_recv_icmp6_tunneled_packet,
7705 	    NULL))) {
7706 		sctp_over_udp_stop();
7707 		return (ret);
7708 	}
7709 	/* Ok, we have a socket, bind it to the port. */
7710 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7711 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7712 	sin6.sin6_family = AF_INET6;
7713 	sin6.sin6_port = htons(port);
7714 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7715 	    (struct sockaddr *)&sin6, curthread))) {
7716 		sctp_over_udp_stop();
7717 		return (ret);
7718 	}
7719 #endif
7720 	return (0);
7721 }
7722 
7723 /*
7724  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7725  * If all arguments are zero, zero is returned.
7726  */
7727 uint32_t
7728 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7729 {
7730 	if (mtu1 > 0) {
7731 		if (mtu2 > 0) {
7732 			if (mtu3 > 0) {
7733 				return (min(mtu1, min(mtu2, mtu3)));
7734 			} else {
7735 				return (min(mtu1, mtu2));
7736 			}
7737 		} else {
7738 			if (mtu3 > 0) {
7739 				return (min(mtu1, mtu3));
7740 			} else {
7741 				return (mtu1);
7742 			}
7743 		}
7744 	} else {
7745 		if (mtu2 > 0) {
7746 			if (mtu3 > 0) {
7747 				return (min(mtu2, mtu3));
7748 			} else {
7749 				return (mtu2);
7750 			}
7751 		} else {
7752 			return (mtu3);
7753 		}
7754 	}
7755 }
7756 
7757 void
7758 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7759 {
7760 	struct in_conninfo inc;
7761 
7762 	memset(&inc, 0, sizeof(struct in_conninfo));
7763 	inc.inc_fibnum = fibnum;
7764 	switch (addr->sa.sa_family) {
7765 #ifdef INET
7766 	case AF_INET:
7767 		inc.inc_faddr = addr->sin.sin_addr;
7768 		break;
7769 #endif
7770 #ifdef INET6
7771 	case AF_INET6:
7772 		inc.inc_flags |= INC_ISIPV6;
7773 		inc.inc6_faddr = addr->sin6.sin6_addr;
7774 		break;
7775 #endif
7776 	default:
7777 		return;
7778 	}
7779 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7780 }
7781 
7782 uint32_t
7783 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7784 {
7785 	struct in_conninfo inc;
7786 
7787 	memset(&inc, 0, sizeof(struct in_conninfo));
7788 	inc.inc_fibnum = fibnum;
7789 	switch (addr->sa.sa_family) {
7790 #ifdef INET
7791 	case AF_INET:
7792 		inc.inc_faddr = addr->sin.sin_addr;
7793 		break;
7794 #endif
7795 #ifdef INET6
7796 	case AF_INET6:
7797 		inc.inc_flags |= INC_ISIPV6;
7798 		inc.inc6_faddr = addr->sin6.sin6_addr;
7799 		break;
7800 #endif
7801 	default:
7802 		return (0);
7803 	}
7804 	return ((uint32_t)tcp_hc_getmtu(&inc));
7805 }
7806 
7807 void
7808 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7809 {
7810 #if defined(KDTRACE_HOOKS)
7811 	int old_state = stcb->asoc.state;
7812 #endif
7813 
7814 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7815 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7816 	    new_state));
7817 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7818 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7819 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7820 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7821 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7822 	}
7823 #if defined(KDTRACE_HOOKS)
7824 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7825 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7826 	    (new_state == SCTP_STATE_INUSE))) {
7827 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7828 	}
7829 #endif
7830 }
7831 
7832 void
7833 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7834 {
7835 #if defined(KDTRACE_HOOKS)
7836 	int old_state = stcb->asoc.state;
7837 #endif
7838 
7839 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7840 	    ("sctp_add_substate: Can't set state (substate = %x)",
7841 	    substate));
7842 	stcb->asoc.state |= substate;
7843 #if defined(KDTRACE_HOOKS)
7844 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7845 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7846 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7847 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7848 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7849 	}
7850 #endif
7851 }
7852