xref: /freebsd/sys/netinet/sctputil.c (revision e40139ff33b48b56a24c808b166b04b8ee6f5b21)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_association *asoc;
784 	struct sctp_nets *net;
785 
786 	asoc = &stcb->asoc;
787 
788 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
789 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
790 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
791 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
792 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
793 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
794 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
795 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
796 	}
797 }
798 
799 /*
800  * A list of sizes based on typical mtu's, used only if next hop size not
801  * returned. These values MUST be multiples of 4 and MUST be ordered.
802  */
803 static uint32_t sctp_mtu_sizes[] = {
804 	68,
805 	296,
806 	508,
807 	512,
808 	544,
809 	576,
810 	1004,
811 	1492,
812 	1500,
813 	1536,
814 	2000,
815 	2048,
816 	4352,
817 	4464,
818 	8166,
819 	17912,
820 	32000,
821 	65532
822 };
823 
824 /*
825  * Return the largest MTU in sctp_mtu_sizes smaller than val.
826  * If val is smaller than the minimum, just return the largest
827  * multiple of 4 smaller or equal to val.
828  * Ensure that the result is a multiple of 4.
829  */
830 uint32_t
831 sctp_get_prev_mtu(uint32_t val)
832 {
833 	uint32_t i;
834 
835 	val &= 0xfffffffc;
836 	if (val <= sctp_mtu_sizes[0]) {
837 		return (val);
838 	}
839 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
840 		if (val <= sctp_mtu_sizes[i]) {
841 			break;
842 		}
843 	}
844 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
845 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
846 	return (sctp_mtu_sizes[i - 1]);
847 }
848 
849 /*
850  * Return the smallest MTU in sctp_mtu_sizes larger than val.
851  * If val is larger than the maximum, just return the largest multiple of 4 smaller
852  * or equal to val.
853  * Ensure that the result is a multiple of 4.
854  */
855 uint32_t
856 sctp_get_next_mtu(uint32_t val)
857 {
858 	/* select another MTU that is just bigger than this one */
859 	uint32_t i;
860 
861 	val &= 0xfffffffc;
862 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
863 		if (val < sctp_mtu_sizes[i]) {
864 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
865 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
866 			return (sctp_mtu_sizes[i]);
867 		}
868 	}
869 	return (val);
870 }
871 
872 void
873 sctp_fill_random_store(struct sctp_pcb *m)
874 {
875 	/*
876 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
877 	 * our counter. The result becomes our good random numbers and we
878 	 * then setup to give these out. Note that we do no locking to
879 	 * protect this. This is ok, since if competing folks call this we
880 	 * will get more gobbled gook in the random store which is what we
881 	 * want. There is a danger that two guys will use the same random
882 	 * numbers, but thats ok too since that is random as well :->
883 	 */
884 	m->store_at = 0;
885 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
886 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
887 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
888 	m->random_counter++;
889 }
890 
891 uint32_t
892 sctp_select_initial_TSN(struct sctp_pcb *inp)
893 {
894 	/*
895 	 * A true implementation should use random selection process to get
896 	 * the initial stream sequence number, using RFC1750 as a good
897 	 * guideline
898 	 */
899 	uint32_t x, *xp;
900 	uint8_t *p;
901 	int store_at, new_store;
902 
903 	if (inp->initial_sequence_debug != 0) {
904 		uint32_t ret;
905 
906 		ret = inp->initial_sequence_debug;
907 		inp->initial_sequence_debug++;
908 		return (ret);
909 	}
910 retry:
911 	store_at = inp->store_at;
912 	new_store = store_at + sizeof(uint32_t);
913 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
914 		new_store = 0;
915 	}
916 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
917 		goto retry;
918 	}
919 	if (new_store == 0) {
920 		/* Refill the random store */
921 		sctp_fill_random_store(inp);
922 	}
923 	p = &inp->random_store[store_at];
924 	xp = (uint32_t *)p;
925 	x = *xp;
926 	return (x);
927 }
928 
929 uint32_t
930 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
931 {
932 	uint32_t x;
933 	struct timeval now;
934 
935 	if (check) {
936 		(void)SCTP_GETTIME_TIMEVAL(&now);
937 	}
938 	for (;;) {
939 		x = sctp_select_initial_TSN(&inp->sctp_ep);
940 		if (x == 0) {
941 			/* we never use 0 */
942 			continue;
943 		}
944 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
945 			break;
946 		}
947 	}
948 	return (x);
949 }
950 
951 int32_t
952 sctp_map_assoc_state(int kernel_state)
953 {
954 	int32_t user_state;
955 
956 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
957 		user_state = SCTP_CLOSED;
958 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
959 		user_state = SCTP_SHUTDOWN_PENDING;
960 	} else {
961 		switch (kernel_state & SCTP_STATE_MASK) {
962 		case SCTP_STATE_EMPTY:
963 			user_state = SCTP_CLOSED;
964 			break;
965 		case SCTP_STATE_INUSE:
966 			user_state = SCTP_CLOSED;
967 			break;
968 		case SCTP_STATE_COOKIE_WAIT:
969 			user_state = SCTP_COOKIE_WAIT;
970 			break;
971 		case SCTP_STATE_COOKIE_ECHOED:
972 			user_state = SCTP_COOKIE_ECHOED;
973 			break;
974 		case SCTP_STATE_OPEN:
975 			user_state = SCTP_ESTABLISHED;
976 			break;
977 		case SCTP_STATE_SHUTDOWN_SENT:
978 			user_state = SCTP_SHUTDOWN_SENT;
979 			break;
980 		case SCTP_STATE_SHUTDOWN_RECEIVED:
981 			user_state = SCTP_SHUTDOWN_RECEIVED;
982 			break;
983 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
984 			user_state = SCTP_SHUTDOWN_ACK_SENT;
985 			break;
986 		default:
987 			user_state = SCTP_CLOSED;
988 			break;
989 		}
990 	}
991 	return (user_state);
992 }
993 
994 int
995 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
996     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
997 {
998 	struct sctp_association *asoc;
999 
1000 	/*
1001 	 * Anything set to zero is taken care of by the allocation routine's
1002 	 * bzero
1003 	 */
1004 
1005 	/*
1006 	 * Up front select what scoping to apply on addresses I tell my peer
1007 	 * Not sure what to do with these right now, we will need to come up
1008 	 * with a way to set them. We may need to pass them through from the
1009 	 * caller in the sctp_aloc_assoc() function.
1010 	 */
1011 	int i;
1012 #if defined(SCTP_DETAILED_STR_STATS)
1013 	int j;
1014 #endif
1015 
1016 	asoc = &stcb->asoc;
1017 	/* init all variables to a known value. */
1018 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1019 	asoc->max_burst = inp->sctp_ep.max_burst;
1020 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1021 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1022 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1023 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1024 	asoc->ecn_supported = inp->ecn_supported;
1025 	asoc->prsctp_supported = inp->prsctp_supported;
1026 	asoc->idata_supported = inp->idata_supported;
1027 	asoc->auth_supported = inp->auth_supported;
1028 	asoc->asconf_supported = inp->asconf_supported;
1029 	asoc->reconfig_supported = inp->reconfig_supported;
1030 	asoc->nrsack_supported = inp->nrsack_supported;
1031 	asoc->pktdrop_supported = inp->pktdrop_supported;
1032 	asoc->idata_supported = inp->idata_supported;
1033 	asoc->sctp_cmt_pf = (uint8_t)0;
1034 	asoc->sctp_frag_point = inp->sctp_frag_point;
1035 	asoc->sctp_features = inp->sctp_features;
1036 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1037 	asoc->max_cwnd = inp->max_cwnd;
1038 #ifdef INET6
1039 	if (inp->sctp_ep.default_flowlabel) {
1040 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1041 	} else {
1042 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1043 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1044 			asoc->default_flowlabel &= 0x000fffff;
1045 			asoc->default_flowlabel |= 0x80000000;
1046 		} else {
1047 			asoc->default_flowlabel = 0;
1048 		}
1049 	}
1050 #endif
1051 	asoc->sb_send_resv = 0;
1052 	if (override_tag) {
1053 		asoc->my_vtag = override_tag;
1054 	} else {
1055 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1056 	}
1057 	/* Get the nonce tags */
1058 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1059 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1060 	asoc->vrf_id = vrf_id;
1061 
1062 #ifdef SCTP_ASOCLOG_OF_TSNS
1063 	asoc->tsn_in_at = 0;
1064 	asoc->tsn_out_at = 0;
1065 	asoc->tsn_in_wrapped = 0;
1066 	asoc->tsn_out_wrapped = 0;
1067 	asoc->cumack_log_at = 0;
1068 	asoc->cumack_log_atsnt = 0;
1069 #endif
1070 #ifdef SCTP_FS_SPEC_LOG
1071 	asoc->fs_index = 0;
1072 #endif
1073 	asoc->refcnt = 0;
1074 	asoc->assoc_up_sent = 0;
1075 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1076 	    sctp_select_initial_TSN(&inp->sctp_ep);
1077 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1078 	/* we are optimisitic here */
1079 	asoc->peer_supports_nat = 0;
1080 	asoc->sent_queue_retran_cnt = 0;
1081 
1082 	/* for CMT */
1083 	asoc->last_net_cmt_send_started = NULL;
1084 
1085 	/* This will need to be adjusted */
1086 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1087 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1088 	asoc->asconf_seq_in = asoc->last_acked_seq;
1089 
1090 	/* here we are different, we hold the next one we expect */
1091 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1092 
1093 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1094 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1095 
1096 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1097 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1098 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1099 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1100 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1101 	asoc->free_chunk_cnt = 0;
1102 
1103 	asoc->iam_blocking = 0;
1104 	asoc->context = inp->sctp_context;
1105 	asoc->local_strreset_support = inp->local_strreset_support;
1106 	asoc->def_send = inp->def_send;
1107 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1108 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1109 	asoc->pr_sctp_cnt = 0;
1110 	asoc->total_output_queue_size = 0;
1111 
1112 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1113 		asoc->scope.ipv6_addr_legal = 1;
1114 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1115 			asoc->scope.ipv4_addr_legal = 1;
1116 		} else {
1117 			asoc->scope.ipv4_addr_legal = 0;
1118 		}
1119 	} else {
1120 		asoc->scope.ipv6_addr_legal = 0;
1121 		asoc->scope.ipv4_addr_legal = 1;
1122 	}
1123 
1124 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1125 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1126 
1127 	asoc->smallest_mtu = inp->sctp_frag_point;
1128 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1129 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1130 
1131 	asoc->stream_locked_on = 0;
1132 	asoc->ecn_echo_cnt_onq = 0;
1133 	asoc->stream_locked = 0;
1134 
1135 	asoc->send_sack = 1;
1136 
1137 	LIST_INIT(&asoc->sctp_restricted_addrs);
1138 
1139 	TAILQ_INIT(&asoc->nets);
1140 	TAILQ_INIT(&asoc->pending_reply_queue);
1141 	TAILQ_INIT(&asoc->asconf_ack_sent);
1142 	/* Setup to fill the hb random cache at first HB */
1143 	asoc->hb_random_idx = 4;
1144 
1145 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1146 
1147 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1148 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1149 
1150 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1151 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1152 
1153 	/*
1154 	 * Now the stream parameters, here we allocate space for all streams
1155 	 * that we request by default.
1156 	 */
1157 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1158 	    o_strms;
1159 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1160 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1161 	    SCTP_M_STRMO);
1162 	if (asoc->strmout == NULL) {
1163 		/* big trouble no memory */
1164 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1165 		return (ENOMEM);
1166 	}
1167 	for (i = 0; i < asoc->streamoutcnt; i++) {
1168 		/*
1169 		 * inbound side must be set to 0xffff, also NOTE when we get
1170 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1171 		 * count (streamoutcnt) but first check if we sent to any of
1172 		 * the upper streams that were dropped (if some were). Those
1173 		 * that were dropped must be notified to the upper layer as
1174 		 * failed to send.
1175 		 */
1176 		asoc->strmout[i].next_mid_ordered = 0;
1177 		asoc->strmout[i].next_mid_unordered = 0;
1178 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1179 		asoc->strmout[i].chunks_on_queues = 0;
1180 #if defined(SCTP_DETAILED_STR_STATS)
1181 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1182 			asoc->strmout[i].abandoned_sent[j] = 0;
1183 			asoc->strmout[i].abandoned_unsent[j] = 0;
1184 		}
1185 #else
1186 		asoc->strmout[i].abandoned_sent[0] = 0;
1187 		asoc->strmout[i].abandoned_unsent[0] = 0;
1188 #endif
1189 		asoc->strmout[i].sid = i;
1190 		asoc->strmout[i].last_msg_incomplete = 0;
1191 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1192 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1193 	}
1194 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1195 
1196 	/* Now the mapping array */
1197 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1198 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1199 	    SCTP_M_MAP);
1200 	if (asoc->mapping_array == NULL) {
1201 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1202 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1203 		return (ENOMEM);
1204 	}
1205 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1206 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1207 	    SCTP_M_MAP);
1208 	if (asoc->nr_mapping_array == NULL) {
1209 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1210 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1215 
1216 	/* Now the init of the other outqueues */
1217 	TAILQ_INIT(&asoc->free_chunks);
1218 	TAILQ_INIT(&asoc->control_send_queue);
1219 	TAILQ_INIT(&asoc->asconf_send_queue);
1220 	TAILQ_INIT(&asoc->send_queue);
1221 	TAILQ_INIT(&asoc->sent_queue);
1222 	TAILQ_INIT(&asoc->resetHead);
1223 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1224 	TAILQ_INIT(&asoc->asconf_queue);
1225 	/* authentication fields */
1226 	asoc->authinfo.random = NULL;
1227 	asoc->authinfo.active_keyid = 0;
1228 	asoc->authinfo.assoc_key = NULL;
1229 	asoc->authinfo.assoc_keyid = 0;
1230 	asoc->authinfo.recv_key = NULL;
1231 	asoc->authinfo.recv_keyid = 0;
1232 	LIST_INIT(&asoc->shared_keys);
1233 	asoc->marked_retrans = 0;
1234 	asoc->port = inp->sctp_ep.port;
1235 	asoc->timoinit = 0;
1236 	asoc->timodata = 0;
1237 	asoc->timosack = 0;
1238 	asoc->timoshutdown = 0;
1239 	asoc->timoheartbeat = 0;
1240 	asoc->timocookie = 0;
1241 	asoc->timoshutdownack = 0;
1242 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1243 	asoc->discontinuity_time = asoc->start_time;
1244 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1245 		asoc->abandoned_unsent[i] = 0;
1246 		asoc->abandoned_sent[i] = 0;
1247 	}
1248 	/*
1249 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1250 	 * freed later when the association is freed.
1251 	 */
1252 	return (0);
1253 }
1254 
1255 void
1256 sctp_print_mapping_array(struct sctp_association *asoc)
1257 {
1258 	unsigned int i, limit;
1259 
1260 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1261 	    asoc->mapping_array_size,
1262 	    asoc->mapping_array_base_tsn,
1263 	    asoc->cumulative_tsn,
1264 	    asoc->highest_tsn_inside_map,
1265 	    asoc->highest_tsn_inside_nr_map);
1266 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1267 		if (asoc->mapping_array[limit - 1] != 0) {
1268 			break;
1269 		}
1270 	}
1271 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1272 	for (i = 0; i < limit; i++) {
1273 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1274 	}
1275 	if (limit % 16)
1276 		SCTP_PRINTF("\n");
1277 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1278 		if (asoc->nr_mapping_array[limit - 1]) {
1279 			break;
1280 		}
1281 	}
1282 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1283 	for (i = 0; i < limit; i++) {
1284 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1285 	}
1286 	if (limit % 16)
1287 		SCTP_PRINTF("\n");
1288 }
1289 
1290 int
1291 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1292 {
1293 	/* mapping array needs to grow */
1294 	uint8_t *new_array1, *new_array2;
1295 	uint32_t new_size;
1296 
1297 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1298 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1299 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1300 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1301 		/* can't get more, forget it */
1302 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1303 		if (new_array1) {
1304 			SCTP_FREE(new_array1, SCTP_M_MAP);
1305 		}
1306 		if (new_array2) {
1307 			SCTP_FREE(new_array2, SCTP_M_MAP);
1308 		}
1309 		return (-1);
1310 	}
1311 	memset(new_array1, 0, new_size);
1312 	memset(new_array2, 0, new_size);
1313 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1314 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1315 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1316 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1317 	asoc->mapping_array = new_array1;
1318 	asoc->nr_mapping_array = new_array2;
1319 	asoc->mapping_array_size = new_size;
1320 	return (0);
1321 }
1322 
1323 
1324 static void
1325 sctp_iterator_work(struct sctp_iterator *it)
1326 {
1327 	int iteration_count = 0;
1328 	int inp_skip = 0;
1329 	int first_in = 1;
1330 	struct sctp_inpcb *tinp;
1331 
1332 	SCTP_INP_INFO_RLOCK();
1333 	SCTP_ITERATOR_LOCK();
1334 	sctp_it_ctl.cur_it = it;
1335 	if (it->inp) {
1336 		SCTP_INP_RLOCK(it->inp);
1337 		SCTP_INP_DECR_REF(it->inp);
1338 	}
1339 	if (it->inp == NULL) {
1340 		/* iterator is complete */
1341 done_with_iterator:
1342 		sctp_it_ctl.cur_it = NULL;
1343 		SCTP_ITERATOR_UNLOCK();
1344 		SCTP_INP_INFO_RUNLOCK();
1345 		if (it->function_atend != NULL) {
1346 			(*it->function_atend) (it->pointer, it->val);
1347 		}
1348 		SCTP_FREE(it, SCTP_M_ITER);
1349 		return;
1350 	}
1351 select_a_new_ep:
1352 	if (first_in) {
1353 		first_in = 0;
1354 	} else {
1355 		SCTP_INP_RLOCK(it->inp);
1356 	}
1357 	while (((it->pcb_flags) &&
1358 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1359 	    ((it->pcb_features) &&
1360 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1361 		/* endpoint flags or features don't match, so keep looking */
1362 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1363 			SCTP_INP_RUNLOCK(it->inp);
1364 			goto done_with_iterator;
1365 		}
1366 		tinp = it->inp;
1367 		it->inp = LIST_NEXT(it->inp, sctp_list);
1368 		SCTP_INP_RUNLOCK(tinp);
1369 		if (it->inp == NULL) {
1370 			goto done_with_iterator;
1371 		}
1372 		SCTP_INP_RLOCK(it->inp);
1373 	}
1374 	/* now go through each assoc which is in the desired state */
1375 	if (it->done_current_ep == 0) {
1376 		if (it->function_inp != NULL)
1377 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1378 		it->done_current_ep = 1;
1379 	}
1380 	if (it->stcb == NULL) {
1381 		/* run the per instance function */
1382 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1383 	}
1384 	if ((inp_skip) || it->stcb == NULL) {
1385 		if (it->function_inp_end != NULL) {
1386 			inp_skip = (*it->function_inp_end) (it->inp,
1387 			    it->pointer,
1388 			    it->val);
1389 		}
1390 		SCTP_INP_RUNLOCK(it->inp);
1391 		goto no_stcb;
1392 	}
1393 	while (it->stcb) {
1394 		SCTP_TCB_LOCK(it->stcb);
1395 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1396 			/* not in the right state... keep looking */
1397 			SCTP_TCB_UNLOCK(it->stcb);
1398 			goto next_assoc;
1399 		}
1400 		/* see if we have limited out the iterator loop */
1401 		iteration_count++;
1402 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1403 			/* Pause to let others grab the lock */
1404 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1405 			SCTP_TCB_UNLOCK(it->stcb);
1406 			SCTP_INP_INCR_REF(it->inp);
1407 			SCTP_INP_RUNLOCK(it->inp);
1408 			SCTP_ITERATOR_UNLOCK();
1409 			SCTP_INP_INFO_RUNLOCK();
1410 			SCTP_INP_INFO_RLOCK();
1411 			SCTP_ITERATOR_LOCK();
1412 			if (sctp_it_ctl.iterator_flags) {
1413 				/* We won't be staying here */
1414 				SCTP_INP_DECR_REF(it->inp);
1415 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1416 				if (sctp_it_ctl.iterator_flags &
1417 				    SCTP_ITERATOR_STOP_CUR_IT) {
1418 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1419 					goto done_with_iterator;
1420 				}
1421 				if (sctp_it_ctl.iterator_flags &
1422 				    SCTP_ITERATOR_STOP_CUR_INP) {
1423 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1424 					goto no_stcb;
1425 				}
1426 				/* If we reach here huh? */
1427 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1428 				    sctp_it_ctl.iterator_flags);
1429 				sctp_it_ctl.iterator_flags = 0;
1430 			}
1431 			SCTP_INP_RLOCK(it->inp);
1432 			SCTP_INP_DECR_REF(it->inp);
1433 			SCTP_TCB_LOCK(it->stcb);
1434 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1435 			iteration_count = 0;
1436 		}
1437 
1438 		/* run function on this one */
1439 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1440 
1441 		/*
1442 		 * we lie here, it really needs to have its own type but
1443 		 * first I must verify that this won't effect things :-0
1444 		 */
1445 		if (it->no_chunk_output == 0)
1446 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1447 
1448 		SCTP_TCB_UNLOCK(it->stcb);
1449 next_assoc:
1450 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1451 		if (it->stcb == NULL) {
1452 			/* Run last function */
1453 			if (it->function_inp_end != NULL) {
1454 				inp_skip = (*it->function_inp_end) (it->inp,
1455 				    it->pointer,
1456 				    it->val);
1457 			}
1458 		}
1459 	}
1460 	SCTP_INP_RUNLOCK(it->inp);
1461 no_stcb:
1462 	/* done with all assocs on this endpoint, move on to next endpoint */
1463 	it->done_current_ep = 0;
1464 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1465 		it->inp = NULL;
1466 	} else {
1467 		it->inp = LIST_NEXT(it->inp, sctp_list);
1468 	}
1469 	if (it->inp == NULL) {
1470 		goto done_with_iterator;
1471 	}
1472 	goto select_a_new_ep;
1473 }
1474 
1475 void
1476 sctp_iterator_worker(void)
1477 {
1478 	struct sctp_iterator *it;
1479 
1480 	/* This function is called with the WQ lock in place */
1481 	sctp_it_ctl.iterator_running = 1;
1482 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1483 		/* now lets work on this one */
1484 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1485 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1486 		CURVNET_SET(it->vn);
1487 		sctp_iterator_work(it);
1488 		CURVNET_RESTORE();
1489 		SCTP_IPI_ITERATOR_WQ_LOCK();
1490 		/* sa_ignore FREED_MEMORY */
1491 	}
1492 	sctp_it_ctl.iterator_running = 0;
1493 	return;
1494 }
1495 
1496 
1497 static void
1498 sctp_handle_addr_wq(void)
1499 {
1500 	/* deal with the ADDR wq from the rtsock calls */
1501 	struct sctp_laddr *wi, *nwi;
1502 	struct sctp_asconf_iterator *asc;
1503 
1504 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1505 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1506 	if (asc == NULL) {
1507 		/* Try later, no memory */
1508 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1509 		    (struct sctp_inpcb *)NULL,
1510 		    (struct sctp_tcb *)NULL,
1511 		    (struct sctp_nets *)NULL);
1512 		return;
1513 	}
1514 	LIST_INIT(&asc->list_of_work);
1515 	asc->cnt = 0;
1516 
1517 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1518 		LIST_REMOVE(wi, sctp_nxt_addr);
1519 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1520 		asc->cnt++;
1521 	}
1522 
1523 	if (asc->cnt == 0) {
1524 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1525 	} else {
1526 		int ret;
1527 
1528 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1529 		    sctp_asconf_iterator_stcb,
1530 		    NULL,	/* No ep end for boundall */
1531 		    SCTP_PCB_FLAGS_BOUNDALL,
1532 		    SCTP_PCB_ANY_FEATURES,
1533 		    SCTP_ASOC_ANY_STATE,
1534 		    (void *)asc, 0,
1535 		    sctp_asconf_iterator_end, NULL, 0);
1536 		if (ret) {
1537 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1538 			/*
1539 			 * Freeing if we are stopping or put back on the
1540 			 * addr_wq.
1541 			 */
1542 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1543 				sctp_asconf_iterator_end(asc, 0);
1544 			} else {
1545 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1546 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1547 				}
1548 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1549 			}
1550 		}
1551 	}
1552 }
1553 
1554 void
1555 sctp_timeout_handler(void *t)
1556 {
1557 	struct sctp_inpcb *inp;
1558 	struct sctp_tcb *stcb;
1559 	struct sctp_nets *net;
1560 	struct sctp_timer *tmr;
1561 	struct mbuf *op_err;
1562 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1563 	struct socket *so;
1564 #endif
1565 	int did_output;
1566 	int type;
1567 
1568 	tmr = (struct sctp_timer *)t;
1569 	inp = (struct sctp_inpcb *)tmr->ep;
1570 	stcb = (struct sctp_tcb *)tmr->tcb;
1571 	net = (struct sctp_nets *)tmr->net;
1572 	CURVNET_SET((struct vnet *)tmr->vnet);
1573 	did_output = 1;
1574 
1575 #ifdef SCTP_AUDITING_ENABLED
1576 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1577 	sctp_auditing(3, inp, stcb, net);
1578 #endif
1579 
1580 	/* sanity checks... */
1581 	if (tmr->self != (void *)tmr) {
1582 		/*
1583 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1584 		 * (void *)tmr);
1585 		 */
1586 		CURVNET_RESTORE();
1587 		return;
1588 	}
1589 	tmr->stopped_from = 0xa001;
1590 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1591 		/*
1592 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1593 		 * tmr->type);
1594 		 */
1595 		CURVNET_RESTORE();
1596 		return;
1597 	}
1598 	tmr->stopped_from = 0xa002;
1599 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1600 		CURVNET_RESTORE();
1601 		return;
1602 	}
1603 	/* if this is an iterator timeout, get the struct and clear inp */
1604 	tmr->stopped_from = 0xa003;
1605 	if (inp) {
1606 		SCTP_INP_INCR_REF(inp);
1607 		if ((inp->sctp_socket == NULL) &&
1608 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1609 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1610 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1611 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1612 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1613 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1614 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1615 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1616 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1617 			SCTP_INP_DECR_REF(inp);
1618 			CURVNET_RESTORE();
1619 			return;
1620 		}
1621 	}
1622 	tmr->stopped_from = 0xa004;
1623 	if (stcb) {
1624 		atomic_add_int(&stcb->asoc.refcnt, 1);
1625 		if (stcb->asoc.state == 0) {
1626 			atomic_add_int(&stcb->asoc.refcnt, -1);
1627 			if (inp) {
1628 				SCTP_INP_DECR_REF(inp);
1629 			}
1630 			CURVNET_RESTORE();
1631 			return;
1632 		}
1633 	}
1634 	type = tmr->type;
1635 	tmr->stopped_from = 0xa005;
1636 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1637 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1638 		if (inp) {
1639 			SCTP_INP_DECR_REF(inp);
1640 		}
1641 		if (stcb) {
1642 			atomic_add_int(&stcb->asoc.refcnt, -1);
1643 		}
1644 		CURVNET_RESTORE();
1645 		return;
1646 	}
1647 	tmr->stopped_from = 0xa006;
1648 
1649 	if (stcb) {
1650 		SCTP_TCB_LOCK(stcb);
1651 		atomic_add_int(&stcb->asoc.refcnt, -1);
1652 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1653 		    ((stcb->asoc.state == 0) ||
1654 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1655 			SCTP_TCB_UNLOCK(stcb);
1656 			if (inp) {
1657 				SCTP_INP_DECR_REF(inp);
1658 			}
1659 			CURVNET_RESTORE();
1660 			return;
1661 		}
1662 	} else if (inp != NULL) {
1663 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1664 			SCTP_INP_WLOCK(inp);
1665 		}
1666 	} else {
1667 		SCTP_WQ_ADDR_LOCK();
1668 	}
1669 	/* record in stopped what t-o occurred */
1670 	tmr->stopped_from = type;
1671 
1672 	/* mark as being serviced now */
1673 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1674 		/*
1675 		 * Callout has been rescheduled.
1676 		 */
1677 		goto get_out;
1678 	}
1679 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1680 		/*
1681 		 * Not active, so no action.
1682 		 */
1683 		goto get_out;
1684 	}
1685 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1686 
1687 	/* call the handler for the appropriate timer type */
1688 	switch (type) {
1689 	case SCTP_TIMER_TYPE_ADDR_WQ:
1690 		sctp_handle_addr_wq();
1691 		break;
1692 	case SCTP_TIMER_TYPE_SEND:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		SCTP_STAT_INCR(sctps_timodata);
1697 		stcb->asoc.timodata++;
1698 		stcb->asoc.num_send_timers_up--;
1699 		if (stcb->asoc.num_send_timers_up < 0) {
1700 			stcb->asoc.num_send_timers_up = 0;
1701 		}
1702 		SCTP_TCB_LOCK_ASSERT(stcb);
1703 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1704 			/* no need to unlock on tcb its gone */
1705 
1706 			goto out_decr;
1707 		}
1708 		SCTP_TCB_LOCK_ASSERT(stcb);
1709 #ifdef SCTP_AUDITING_ENABLED
1710 		sctp_auditing(4, inp, stcb, net);
1711 #endif
1712 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1713 		if ((stcb->asoc.num_send_timers_up == 0) &&
1714 		    (stcb->asoc.sent_queue_cnt > 0)) {
1715 			struct sctp_tmit_chunk *chk;
1716 
1717 			/*
1718 			 * safeguard. If there on some on the sent queue
1719 			 * somewhere but no timers running something is
1720 			 * wrong... so we start a timer on the first chunk
1721 			 * on the send queue on whatever net it is sent to.
1722 			 */
1723 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1724 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1725 			    chk->whoTo);
1726 		}
1727 		break;
1728 	case SCTP_TIMER_TYPE_INIT:
1729 		if ((stcb == NULL) || (inp == NULL)) {
1730 			break;
1731 		}
1732 		SCTP_STAT_INCR(sctps_timoinit);
1733 		stcb->asoc.timoinit++;
1734 		if (sctp_t1init_timer(inp, stcb, net)) {
1735 			/* no need to unlock on tcb its gone */
1736 			goto out_decr;
1737 		}
1738 		/* We do output but not here */
1739 		did_output = 0;
1740 		break;
1741 	case SCTP_TIMER_TYPE_RECV:
1742 		if ((stcb == NULL) || (inp == NULL)) {
1743 			break;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timosack);
1746 		stcb->asoc.timosack++;
1747 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1748 #ifdef SCTP_AUDITING_ENABLED
1749 		sctp_auditing(4, inp, stcb, net);
1750 #endif
1751 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1752 		break;
1753 	case SCTP_TIMER_TYPE_SHUTDOWN:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		if (sctp_shutdown_timer(inp, stcb, net)) {
1758 			/* no need to unlock on tcb its gone */
1759 			goto out_decr;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timoshutdown);
1762 		stcb->asoc.timoshutdown++;
1763 #ifdef SCTP_AUDITING_ENABLED
1764 		sctp_auditing(4, inp, stcb, net);
1765 #endif
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_HEARTBEAT:
1769 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1770 			break;
1771 		}
1772 		SCTP_STAT_INCR(sctps_timoheartbeat);
1773 		stcb->asoc.timoheartbeat++;
1774 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1775 			/* no need to unlock on tcb its gone */
1776 			goto out_decr;
1777 		}
1778 #ifdef SCTP_AUDITING_ENABLED
1779 		sctp_auditing(4, inp, stcb, net);
1780 #endif
1781 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1782 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1783 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1784 		}
1785 		break;
1786 	case SCTP_TIMER_TYPE_COOKIE:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 
1791 		if (sctp_cookie_timer(inp, stcb, net)) {
1792 			/* no need to unlock on tcb its gone */
1793 			goto out_decr;
1794 		}
1795 		SCTP_STAT_INCR(sctps_timocookie);
1796 		stcb->asoc.timocookie++;
1797 #ifdef SCTP_AUDITING_ENABLED
1798 		sctp_auditing(4, inp, stcb, net);
1799 #endif
1800 		/*
1801 		 * We consider T3 and Cookie timer pretty much the same with
1802 		 * respect to where from in chunk_output.
1803 		 */
1804 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1805 		break;
1806 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1807 		{
1808 			struct timeval tv;
1809 			int i, secret;
1810 
1811 			if (inp == NULL) {
1812 				break;
1813 			}
1814 			SCTP_STAT_INCR(sctps_timosecret);
1815 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1816 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1817 			inp->sctp_ep.last_secret_number =
1818 			    inp->sctp_ep.current_secret_number;
1819 			inp->sctp_ep.current_secret_number++;
1820 			if (inp->sctp_ep.current_secret_number >=
1821 			    SCTP_HOW_MANY_SECRETS) {
1822 				inp->sctp_ep.current_secret_number = 0;
1823 			}
1824 			secret = (int)inp->sctp_ep.current_secret_number;
1825 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1826 				inp->sctp_ep.secret_key[secret][i] =
1827 				    sctp_select_initial_TSN(&inp->sctp_ep);
1828 			}
1829 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1830 		}
1831 		did_output = 0;
1832 		break;
1833 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1834 		if ((stcb == NULL) || (inp == NULL)) {
1835 			break;
1836 		}
1837 		SCTP_STAT_INCR(sctps_timopathmtu);
1838 		sctp_pathmtu_timer(inp, stcb, net);
1839 		did_output = 0;
1840 		break;
1841 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1842 		if ((stcb == NULL) || (inp == NULL)) {
1843 			break;
1844 		}
1845 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1846 			/* no need to unlock on tcb its gone */
1847 			goto out_decr;
1848 		}
1849 		SCTP_STAT_INCR(sctps_timoshutdownack);
1850 		stcb->asoc.timoshutdownack++;
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1861 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1862 		    "Shutdown guard timer expired");
1863 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1864 		/* no need to unlock on tcb its gone */
1865 		goto out_decr;
1866 
1867 	case SCTP_TIMER_TYPE_STRRESET:
1868 		if ((stcb == NULL) || (inp == NULL)) {
1869 			break;
1870 		}
1871 		if (sctp_strreset_timer(inp, stcb, net)) {
1872 			/* no need to unlock on tcb its gone */
1873 			goto out_decr;
1874 		}
1875 		SCTP_STAT_INCR(sctps_timostrmrst);
1876 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1877 		break;
1878 	case SCTP_TIMER_TYPE_ASCONF:
1879 		if ((stcb == NULL) || (inp == NULL)) {
1880 			break;
1881 		}
1882 		if (sctp_asconf_timer(inp, stcb, net)) {
1883 			/* no need to unlock on tcb its gone */
1884 			goto out_decr;
1885 		}
1886 		SCTP_STAT_INCR(sctps_timoasconf);
1887 #ifdef SCTP_AUDITING_ENABLED
1888 		sctp_auditing(4, inp, stcb, net);
1889 #endif
1890 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1891 		break;
1892 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1893 		if ((stcb == NULL) || (inp == NULL)) {
1894 			break;
1895 		}
1896 		sctp_delete_prim_timer(inp, stcb, net);
1897 		SCTP_STAT_INCR(sctps_timodelprim);
1898 		break;
1899 
1900 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1901 		if ((stcb == NULL) || (inp == NULL)) {
1902 			break;
1903 		}
1904 		SCTP_STAT_INCR(sctps_timoautoclose);
1905 		sctp_autoclose_timer(inp, stcb, net);
1906 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1907 		did_output = 0;
1908 		break;
1909 	case SCTP_TIMER_TYPE_ASOCKILL:
1910 		if ((stcb == NULL) || (inp == NULL)) {
1911 			break;
1912 		}
1913 		SCTP_STAT_INCR(sctps_timoassockill);
1914 		/* Can we free it yet? */
1915 		SCTP_INP_DECR_REF(inp);
1916 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1917 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1918 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1919 		so = SCTP_INP_SO(inp);
1920 		atomic_add_int(&stcb->asoc.refcnt, 1);
1921 		SCTP_TCB_UNLOCK(stcb);
1922 		SCTP_SOCKET_LOCK(so, 1);
1923 		SCTP_TCB_LOCK(stcb);
1924 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1925 #endif
1926 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1927 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1928 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1929 		SCTP_SOCKET_UNLOCK(so, 1);
1930 #endif
1931 		/*
1932 		 * free asoc, always unlocks (or destroy's) so prevent
1933 		 * duplicate unlock or unlock of a free mtx :-0
1934 		 */
1935 		stcb = NULL;
1936 		goto out_no_decr;
1937 	case SCTP_TIMER_TYPE_INPKILL:
1938 		SCTP_STAT_INCR(sctps_timoinpkill);
1939 		if (inp == NULL) {
1940 			break;
1941 		}
1942 		/*
1943 		 * special case, take away our increment since WE are the
1944 		 * killer
1945 		 */
1946 		SCTP_INP_DECR_REF(inp);
1947 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1948 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1949 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1950 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1951 		inp = NULL;
1952 		goto out_no_decr;
1953 	default:
1954 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1955 		    type);
1956 		break;
1957 	}
1958 #ifdef SCTP_AUDITING_ENABLED
1959 	sctp_audit_log(0xF1, (uint8_t)type);
1960 	if (inp)
1961 		sctp_auditing(5, inp, stcb, net);
1962 #endif
1963 	if ((did_output) && stcb) {
1964 		/*
1965 		 * Now we need to clean up the control chunk chain if an
1966 		 * ECNE is on it. It must be marked as UNSENT again so next
1967 		 * call will continue to send it until such time that we get
1968 		 * a CWR, to remove it. It is, however, less likely that we
1969 		 * will find a ecn echo on the chain though.
1970 		 */
1971 		sctp_fix_ecn_echo(&stcb->asoc);
1972 	}
1973 get_out:
1974 	if (stcb) {
1975 		SCTP_TCB_UNLOCK(stcb);
1976 	} else if (inp != NULL) {
1977 		SCTP_INP_WUNLOCK(inp);
1978 	} else {
1979 		SCTP_WQ_ADDR_UNLOCK();
1980 	}
1981 
1982 out_decr:
1983 	if (inp) {
1984 		SCTP_INP_DECR_REF(inp);
1985 	}
1986 
1987 out_no_decr:
1988 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1989 	CURVNET_RESTORE();
1990 }
1991 
1992 void
1993 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1994     struct sctp_nets *net)
1995 {
1996 	uint32_t to_ticks;
1997 	struct sctp_timer *tmr;
1998 
1999 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2000 		return;
2001 
2002 	tmr = NULL;
2003 	if (stcb) {
2004 		SCTP_TCB_LOCK_ASSERT(stcb);
2005 	}
2006 	switch (t_type) {
2007 	case SCTP_TIMER_TYPE_ADDR_WQ:
2008 		/* Only 1 tick away :-) */
2009 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2010 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2011 		break;
2012 	case SCTP_TIMER_TYPE_SEND:
2013 		/* Here we use the RTO timer */
2014 		{
2015 			int rto_val;
2016 
2017 			if ((stcb == NULL) || (net == NULL)) {
2018 				return;
2019 			}
2020 			tmr = &net->rxt_timer;
2021 			if (net->RTO == 0) {
2022 				rto_val = stcb->asoc.initial_rto;
2023 			} else {
2024 				rto_val = net->RTO;
2025 			}
2026 			to_ticks = MSEC_TO_TICKS(rto_val);
2027 		}
2028 		break;
2029 	case SCTP_TIMER_TYPE_INIT:
2030 		/*
2031 		 * Here we use the INIT timer default usually about 1
2032 		 * minute.
2033 		 */
2034 		if ((stcb == NULL) || (net == NULL)) {
2035 			return;
2036 		}
2037 		tmr = &net->rxt_timer;
2038 		if (net->RTO == 0) {
2039 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2040 		} else {
2041 			to_ticks = MSEC_TO_TICKS(net->RTO);
2042 		}
2043 		break;
2044 	case SCTP_TIMER_TYPE_RECV:
2045 		/*
2046 		 * Here we use the Delayed-Ack timer value from the inp
2047 		 * ususually about 200ms.
2048 		 */
2049 		if (stcb == NULL) {
2050 			return;
2051 		}
2052 		tmr = &stcb->asoc.dack_timer;
2053 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2054 		break;
2055 	case SCTP_TIMER_TYPE_SHUTDOWN:
2056 		/* Here we use the RTO of the destination. */
2057 		if ((stcb == NULL) || (net == NULL)) {
2058 			return;
2059 		}
2060 		if (net->RTO == 0) {
2061 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2062 		} else {
2063 			to_ticks = MSEC_TO_TICKS(net->RTO);
2064 		}
2065 		tmr = &net->rxt_timer;
2066 		break;
2067 	case SCTP_TIMER_TYPE_HEARTBEAT:
2068 		/*
2069 		 * the net is used here so that we can add in the RTO. Even
2070 		 * though we use a different timer. We also add the HB timer
2071 		 * PLUS a random jitter.
2072 		 */
2073 		if ((stcb == NULL) || (net == NULL)) {
2074 			return;
2075 		} else {
2076 			uint32_t rndval;
2077 			uint32_t jitter;
2078 
2079 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2080 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2081 				return;
2082 			}
2083 			if (net->RTO == 0) {
2084 				to_ticks = stcb->asoc.initial_rto;
2085 			} else {
2086 				to_ticks = net->RTO;
2087 			}
2088 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2089 			jitter = rndval % to_ticks;
2090 			if (jitter >= (to_ticks >> 1)) {
2091 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2092 			} else {
2093 				to_ticks = to_ticks - jitter;
2094 			}
2095 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2096 			    !(net->dest_state & SCTP_ADDR_PF)) {
2097 				to_ticks += net->heart_beat_delay;
2098 			}
2099 			/*
2100 			 * Now we must convert the to_ticks that are now in
2101 			 * ms to ticks.
2102 			 */
2103 			to_ticks = MSEC_TO_TICKS(to_ticks);
2104 			tmr = &net->hb_timer;
2105 		}
2106 		break;
2107 	case SCTP_TIMER_TYPE_COOKIE:
2108 		/*
2109 		 * Here we can use the RTO timer from the network since one
2110 		 * RTT was compelete. If a retran happened then we will be
2111 		 * using the RTO initial value.
2112 		 */
2113 		if ((stcb == NULL) || (net == NULL)) {
2114 			return;
2115 		}
2116 		if (net->RTO == 0) {
2117 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2118 		} else {
2119 			to_ticks = MSEC_TO_TICKS(net->RTO);
2120 		}
2121 		tmr = &net->rxt_timer;
2122 		break;
2123 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2124 		/*
2125 		 * nothing needed but the endpoint here ususually about 60
2126 		 * minutes.
2127 		 */
2128 		tmr = &inp->sctp_ep.signature_change;
2129 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2130 		break;
2131 	case SCTP_TIMER_TYPE_ASOCKILL:
2132 		if (stcb == NULL) {
2133 			return;
2134 		}
2135 		tmr = &stcb->asoc.strreset_timer;
2136 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2137 		break;
2138 	case SCTP_TIMER_TYPE_INPKILL:
2139 		/*
2140 		 * The inp is setup to die. We re-use the signature_chage
2141 		 * timer since that has stopped and we are in the GONE
2142 		 * state.
2143 		 */
2144 		tmr = &inp->sctp_ep.signature_change;
2145 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2146 		break;
2147 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2148 		/*
2149 		 * Here we use the value found in the EP for PMTU ususually
2150 		 * about 10 minutes.
2151 		 */
2152 		if ((stcb == NULL) || (net == NULL)) {
2153 			return;
2154 		}
2155 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2156 			return;
2157 		}
2158 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2159 		tmr = &net->pmtu_timer;
2160 		break;
2161 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2162 		/* Here we use the RTO of the destination */
2163 		if ((stcb == NULL) || (net == NULL)) {
2164 			return;
2165 		}
2166 		if (net->RTO == 0) {
2167 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		} else {
2169 			to_ticks = MSEC_TO_TICKS(net->RTO);
2170 		}
2171 		tmr = &net->rxt_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2174 		/*
2175 		 * Here we use the endpoints shutdown guard timer usually
2176 		 * about 3 minutes.
2177 		 */
2178 		if (stcb == NULL) {
2179 			return;
2180 		}
2181 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2182 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2183 		} else {
2184 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2185 		}
2186 		tmr = &stcb->asoc.shut_guard_timer;
2187 		break;
2188 	case SCTP_TIMER_TYPE_STRRESET:
2189 		/*
2190 		 * Here the timer comes from the stcb but its value is from
2191 		 * the net's RTO.
2192 		 */
2193 		if ((stcb == NULL) || (net == NULL)) {
2194 			return;
2195 		}
2196 		if (net->RTO == 0) {
2197 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2198 		} else {
2199 			to_ticks = MSEC_TO_TICKS(net->RTO);
2200 		}
2201 		tmr = &stcb->asoc.strreset_timer;
2202 		break;
2203 	case SCTP_TIMER_TYPE_ASCONF:
2204 		/*
2205 		 * Here the timer comes from the stcb but its value is from
2206 		 * the net's RTO.
2207 		 */
2208 		if ((stcb == NULL) || (net == NULL)) {
2209 			return;
2210 		}
2211 		if (net->RTO == 0) {
2212 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2213 		} else {
2214 			to_ticks = MSEC_TO_TICKS(net->RTO);
2215 		}
2216 		tmr = &stcb->asoc.asconf_timer;
2217 		break;
2218 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2219 		if ((stcb == NULL) || (net != NULL)) {
2220 			return;
2221 		}
2222 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2223 		tmr = &stcb->asoc.delete_prim_timer;
2224 		break;
2225 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2226 		if (stcb == NULL) {
2227 			return;
2228 		}
2229 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2230 			/*
2231 			 * Really an error since stcb is NOT set to
2232 			 * autoclose
2233 			 */
2234 			return;
2235 		}
2236 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2237 		tmr = &stcb->asoc.autoclose_timer;
2238 		break;
2239 	default:
2240 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2241 		    __func__, t_type);
2242 		return;
2243 		break;
2244 	}
2245 	if ((to_ticks <= 0) || (tmr == NULL)) {
2246 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2247 		    __func__, t_type, to_ticks, (void *)tmr);
2248 		return;
2249 	}
2250 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2251 		/*
2252 		 * we do NOT allow you to have it already running. if it is
2253 		 * we leave the current one up unchanged
2254 		 */
2255 		return;
2256 	}
2257 	/* At this point we can proceed */
2258 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2259 		stcb->asoc.num_send_timers_up++;
2260 	}
2261 	tmr->stopped_from = 0;
2262 	tmr->type = t_type;
2263 	tmr->ep = (void *)inp;
2264 	tmr->tcb = (void *)stcb;
2265 	tmr->net = (void *)net;
2266 	tmr->self = (void *)tmr;
2267 	tmr->vnet = (void *)curvnet;
2268 	tmr->ticks = sctp_get_tick_count();
2269 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2270 	return;
2271 }
2272 
2273 void
2274 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2275     struct sctp_nets *net, uint32_t from)
2276 {
2277 	struct sctp_timer *tmr;
2278 
2279 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2280 	    (inp == NULL))
2281 		return;
2282 
2283 	tmr = NULL;
2284 	if (stcb) {
2285 		SCTP_TCB_LOCK_ASSERT(stcb);
2286 	}
2287 	switch (t_type) {
2288 	case SCTP_TIMER_TYPE_ADDR_WQ:
2289 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2290 		break;
2291 	case SCTP_TIMER_TYPE_SEND:
2292 		if ((stcb == NULL) || (net == NULL)) {
2293 			return;
2294 		}
2295 		tmr = &net->rxt_timer;
2296 		break;
2297 	case SCTP_TIMER_TYPE_INIT:
2298 		if ((stcb == NULL) || (net == NULL)) {
2299 			return;
2300 		}
2301 		tmr = &net->rxt_timer;
2302 		break;
2303 	case SCTP_TIMER_TYPE_RECV:
2304 		if (stcb == NULL) {
2305 			return;
2306 		}
2307 		tmr = &stcb->asoc.dack_timer;
2308 		break;
2309 	case SCTP_TIMER_TYPE_SHUTDOWN:
2310 		if ((stcb == NULL) || (net == NULL)) {
2311 			return;
2312 		}
2313 		tmr = &net->rxt_timer;
2314 		break;
2315 	case SCTP_TIMER_TYPE_HEARTBEAT:
2316 		if ((stcb == NULL) || (net == NULL)) {
2317 			return;
2318 		}
2319 		tmr = &net->hb_timer;
2320 		break;
2321 	case SCTP_TIMER_TYPE_COOKIE:
2322 		if ((stcb == NULL) || (net == NULL)) {
2323 			return;
2324 		}
2325 		tmr = &net->rxt_timer;
2326 		break;
2327 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2328 		/* nothing needed but the endpoint here */
2329 		tmr = &inp->sctp_ep.signature_change;
2330 		/*
2331 		 * We re-use the newcookie timer for the INP kill timer. We
2332 		 * must assure that we do not kill it by accident.
2333 		 */
2334 		break;
2335 	case SCTP_TIMER_TYPE_ASOCKILL:
2336 		/*
2337 		 * Stop the asoc kill timer.
2338 		 */
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.strreset_timer;
2343 		break;
2344 
2345 	case SCTP_TIMER_TYPE_INPKILL:
2346 		/*
2347 		 * The inp is setup to die. We re-use the signature_chage
2348 		 * timer since that has stopped and we are in the GONE
2349 		 * state.
2350 		 */
2351 		tmr = &inp->sctp_ep.signature_change;
2352 		break;
2353 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2354 		if ((stcb == NULL) || (net == NULL)) {
2355 			return;
2356 		}
2357 		tmr = &net->pmtu_timer;
2358 		break;
2359 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2360 		if ((stcb == NULL) || (net == NULL)) {
2361 			return;
2362 		}
2363 		tmr = &net->rxt_timer;
2364 		break;
2365 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2366 		if (stcb == NULL) {
2367 			return;
2368 		}
2369 		tmr = &stcb->asoc.shut_guard_timer;
2370 		break;
2371 	case SCTP_TIMER_TYPE_STRRESET:
2372 		if (stcb == NULL) {
2373 			return;
2374 		}
2375 		tmr = &stcb->asoc.strreset_timer;
2376 		break;
2377 	case SCTP_TIMER_TYPE_ASCONF:
2378 		if (stcb == NULL) {
2379 			return;
2380 		}
2381 		tmr = &stcb->asoc.asconf_timer;
2382 		break;
2383 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2384 		if (stcb == NULL) {
2385 			return;
2386 		}
2387 		tmr = &stcb->asoc.delete_prim_timer;
2388 		break;
2389 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2390 		if (stcb == NULL) {
2391 			return;
2392 		}
2393 		tmr = &stcb->asoc.autoclose_timer;
2394 		break;
2395 	default:
2396 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2397 		    __func__, t_type);
2398 		break;
2399 	}
2400 	if (tmr == NULL) {
2401 		return;
2402 	}
2403 	if ((tmr->type != t_type) && tmr->type) {
2404 		/*
2405 		 * Ok we have a timer that is under joint use. Cookie timer
2406 		 * per chance with the SEND timer. We therefore are NOT
2407 		 * running the timer that the caller wants stopped.  So just
2408 		 * return.
2409 		 */
2410 		return;
2411 	}
2412 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2413 		stcb->asoc.num_send_timers_up--;
2414 		if (stcb->asoc.num_send_timers_up < 0) {
2415 			stcb->asoc.num_send_timers_up = 0;
2416 		}
2417 	}
2418 	tmr->self = NULL;
2419 	tmr->stopped_from = from;
2420 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2421 	return;
2422 }
2423 
2424 uint32_t
2425 sctp_calculate_len(struct mbuf *m)
2426 {
2427 	uint32_t tlen = 0;
2428 	struct mbuf *at;
2429 
2430 	at = m;
2431 	while (at) {
2432 		tlen += SCTP_BUF_LEN(at);
2433 		at = SCTP_BUF_NEXT(at);
2434 	}
2435 	return (tlen);
2436 }
2437 
2438 void
2439 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2440     struct sctp_association *asoc, uint32_t mtu)
2441 {
2442 	/*
2443 	 * Reset the P-MTU size on this association, this involves changing
2444 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2445 	 * allow the DF flag to be cleared.
2446 	 */
2447 	struct sctp_tmit_chunk *chk;
2448 	unsigned int eff_mtu, ovh;
2449 
2450 	asoc->smallest_mtu = mtu;
2451 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2452 		ovh = SCTP_MIN_OVERHEAD;
2453 	} else {
2454 		ovh = SCTP_MIN_V4_OVERHEAD;
2455 	}
2456 	eff_mtu = mtu - ovh;
2457 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2458 		if (chk->send_size > eff_mtu) {
2459 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2460 		}
2461 	}
2462 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2463 		if (chk->send_size > eff_mtu) {
2464 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2465 		}
2466 	}
2467 }
2468 
2469 
2470 /*
2471  * Given an association and starting time of the current RTT period, update
2472  * RTO in number of msecs. net should point to the current network.
2473  * Return 1, if an RTO update was performed, return 0 if no update was
2474  * performed due to invalid starting point.
2475  */
2476 
2477 int
2478 sctp_calculate_rto(struct sctp_tcb *stcb,
2479     struct sctp_association *asoc,
2480     struct sctp_nets *net,
2481     struct timeval *old,
2482     int rtt_from_sack)
2483 {
2484 	struct timeval now;
2485 	uint64_t rtt_us;	/* RTT in us */
2486 	int32_t rtt;		/* RTT in ms */
2487 	uint32_t new_rto;
2488 	int first_measure = 0;
2489 
2490 	/************************/
2491 	/* 1. calculate new RTT */
2492 	/************************/
2493 	/* get the current time */
2494 	if (stcb->asoc.use_precise_time) {
2495 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2496 	} else {
2497 		(void)SCTP_GETTIME_TIMEVAL(&now);
2498 	}
2499 	if ((old->tv_sec > now.tv_sec) ||
2500 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2501 		/* The starting point is in the future. */
2502 		return (0);
2503 	}
2504 	timevalsub(&now, old);
2505 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2506 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2507 		/* The RTT is larger than a sane value. */
2508 		return (0);
2509 	}
2510 	/* store the current RTT in us */
2511 	net->rtt = rtt_us;
2512 	/* compute rtt in ms */
2513 	rtt = (int32_t)(net->rtt / 1000);
2514 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2515 		/*
2516 		 * Tell the CC module that a new update has just occurred
2517 		 * from a sack
2518 		 */
2519 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2520 	}
2521 	/*
2522 	 * Do we need to determine the lan? We do this only on sacks i.e.
2523 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2524 	 */
2525 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2526 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2527 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2528 			net->lan_type = SCTP_LAN_INTERNET;
2529 		} else {
2530 			net->lan_type = SCTP_LAN_LOCAL;
2531 		}
2532 	}
2533 
2534 	/***************************/
2535 	/* 2. update RTTVAR & SRTT */
2536 	/***************************/
2537 	/*-
2538 	 * Compute the scaled average lastsa and the
2539 	 * scaled variance lastsv as described in van Jacobson
2540 	 * Paper "Congestion Avoidance and Control", Annex A.
2541 	 *
2542 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2543 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2544 	 */
2545 	if (net->RTO_measured) {
2546 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2547 		net->lastsa += rtt;
2548 		if (rtt < 0) {
2549 			rtt = -rtt;
2550 		}
2551 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2552 		net->lastsv += rtt;
2553 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2554 			rto_logging(net, SCTP_LOG_RTTVAR);
2555 		}
2556 	} else {
2557 		/* First RTO measurment */
2558 		net->RTO_measured = 1;
2559 		first_measure = 1;
2560 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2561 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2562 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2563 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2564 		}
2565 	}
2566 	if (net->lastsv == 0) {
2567 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2568 	}
2569 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2570 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2571 	    (stcb->asoc.sat_network_lockout == 0)) {
2572 		stcb->asoc.sat_network = 1;
2573 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2574 		stcb->asoc.sat_network = 0;
2575 		stcb->asoc.sat_network_lockout = 1;
2576 	}
2577 	/* bound it, per C6/C7 in Section 5.3.1 */
2578 	if (new_rto < stcb->asoc.minrto) {
2579 		new_rto = stcb->asoc.minrto;
2580 	}
2581 	if (new_rto > stcb->asoc.maxrto) {
2582 		new_rto = stcb->asoc.maxrto;
2583 	}
2584 	net->RTO = new_rto;
2585 	return (1);
2586 }
2587 
2588 /*
2589  * return a pointer to a contiguous piece of data from the given mbuf chain
2590  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2591  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2592  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2593  */
2594 caddr_t
2595 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2596 {
2597 	uint32_t count;
2598 	uint8_t *ptr;
2599 
2600 	ptr = in_ptr;
2601 	if ((off < 0) || (len <= 0))
2602 		return (NULL);
2603 
2604 	/* find the desired start location */
2605 	while ((m != NULL) && (off > 0)) {
2606 		if (off < SCTP_BUF_LEN(m))
2607 			break;
2608 		off -= SCTP_BUF_LEN(m);
2609 		m = SCTP_BUF_NEXT(m);
2610 	}
2611 	if (m == NULL)
2612 		return (NULL);
2613 
2614 	/* is the current mbuf large enough (eg. contiguous)? */
2615 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2616 		return (mtod(m, caddr_t)+off);
2617 	} else {
2618 		/* else, it spans more than one mbuf, so save a temp copy... */
2619 		while ((m != NULL) && (len > 0)) {
2620 			count = min(SCTP_BUF_LEN(m) - off, len);
2621 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2622 			len -= count;
2623 			ptr += count;
2624 			off = 0;
2625 			m = SCTP_BUF_NEXT(m);
2626 		}
2627 		if ((m == NULL) && (len > 0))
2628 			return (NULL);
2629 		else
2630 			return ((caddr_t)in_ptr);
2631 	}
2632 }
2633 
2634 
2635 
2636 struct sctp_paramhdr *
2637 sctp_get_next_param(struct mbuf *m,
2638     int offset,
2639     struct sctp_paramhdr *pull,
2640     int pull_limit)
2641 {
2642 	/* This just provides a typed signature to Peter's Pull routine */
2643 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2644 	    (uint8_t *)pull));
2645 }
2646 
2647 
2648 struct mbuf *
2649 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2650 {
2651 	struct mbuf *m_last;
2652 	caddr_t dp;
2653 
2654 	if (padlen > 3) {
2655 		return (NULL);
2656 	}
2657 	if (padlen <= M_TRAILINGSPACE(m)) {
2658 		/*
2659 		 * The easy way. We hope the majority of the time we hit
2660 		 * here :)
2661 		 */
2662 		m_last = m;
2663 	} else {
2664 		/* Hard way we must grow the mbuf chain */
2665 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2666 		if (m_last == NULL) {
2667 			return (NULL);
2668 		}
2669 		SCTP_BUF_LEN(m_last) = 0;
2670 		SCTP_BUF_NEXT(m_last) = NULL;
2671 		SCTP_BUF_NEXT(m) = m_last;
2672 	}
2673 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2674 	SCTP_BUF_LEN(m_last) += padlen;
2675 	memset(dp, 0, padlen);
2676 	return (m_last);
2677 }
2678 
2679 struct mbuf *
2680 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2681 {
2682 	/* find the last mbuf in chain and pad it */
2683 	struct mbuf *m_at;
2684 
2685 	if (last_mbuf != NULL) {
2686 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2687 	} else {
2688 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2689 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2690 				return (sctp_add_pad_tombuf(m_at, padval));
2691 			}
2692 		}
2693 	}
2694 	return (NULL);
2695 }
2696 
2697 static void
2698 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2699     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2700 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2701     SCTP_UNUSED
2702 #endif
2703 )
2704 {
2705 	struct mbuf *m_notify;
2706 	struct sctp_assoc_change *sac;
2707 	struct sctp_queued_to_read *control;
2708 	unsigned int notif_len;
2709 	uint16_t abort_len;
2710 	unsigned int i;
2711 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2712 	struct socket *so;
2713 #endif
2714 
2715 	if (stcb == NULL) {
2716 		return;
2717 	}
2718 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2719 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2720 		if (abort != NULL) {
2721 			abort_len = ntohs(abort->ch.chunk_length);
2722 			/*
2723 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2724 			 * contiguous.
2725 			 */
2726 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2727 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2728 			}
2729 		} else {
2730 			abort_len = 0;
2731 		}
2732 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2733 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2734 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2735 			notif_len += abort_len;
2736 		}
2737 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2738 		if (m_notify == NULL) {
2739 			/* Retry with smaller value. */
2740 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2741 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2742 			if (m_notify == NULL) {
2743 				goto set_error;
2744 			}
2745 		}
2746 		SCTP_BUF_NEXT(m_notify) = NULL;
2747 		sac = mtod(m_notify, struct sctp_assoc_change *);
2748 		memset(sac, 0, notif_len);
2749 		sac->sac_type = SCTP_ASSOC_CHANGE;
2750 		sac->sac_flags = 0;
2751 		sac->sac_length = sizeof(struct sctp_assoc_change);
2752 		sac->sac_state = state;
2753 		sac->sac_error = error;
2754 		/* XXX verify these stream counts */
2755 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2756 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2757 		sac->sac_assoc_id = sctp_get_associd(stcb);
2758 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2759 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2760 				i = 0;
2761 				if (stcb->asoc.prsctp_supported == 1) {
2762 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2763 				}
2764 				if (stcb->asoc.auth_supported == 1) {
2765 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2766 				}
2767 				if (stcb->asoc.asconf_supported == 1) {
2768 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2769 				}
2770 				if (stcb->asoc.idata_supported == 1) {
2771 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2772 				}
2773 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2774 				if (stcb->asoc.reconfig_supported == 1) {
2775 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2776 				}
2777 				sac->sac_length += i;
2778 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2779 				memcpy(sac->sac_info, abort, abort_len);
2780 				sac->sac_length += abort_len;
2781 			}
2782 		}
2783 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2784 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2785 		    0, 0, stcb->asoc.context, 0, 0, 0,
2786 		    m_notify);
2787 		if (control != NULL) {
2788 			control->length = SCTP_BUF_LEN(m_notify);
2789 			control->spec_flags = M_NOTIFICATION;
2790 			/* not that we need this */
2791 			control->tail_mbuf = m_notify;
2792 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2793 			    control,
2794 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2795 			    so_locked);
2796 		} else {
2797 			sctp_m_freem(m_notify);
2798 		}
2799 	}
2800 	/*
2801 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2802 	 * comes in.
2803 	 */
2804 set_error:
2805 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2806 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2807 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2808 		SOCK_LOCK(stcb->sctp_socket);
2809 		if (from_peer) {
2810 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2811 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2812 				stcb->sctp_socket->so_error = ECONNREFUSED;
2813 			} else {
2814 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2815 				stcb->sctp_socket->so_error = ECONNRESET;
2816 			}
2817 		} else {
2818 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2819 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2820 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2821 				stcb->sctp_socket->so_error = ETIMEDOUT;
2822 			} else {
2823 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2824 				stcb->sctp_socket->so_error = ECONNABORTED;
2825 			}
2826 		}
2827 		SOCK_UNLOCK(stcb->sctp_socket);
2828 	}
2829 	/* Wake ANY sleepers */
2830 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2831 	so = SCTP_INP_SO(stcb->sctp_ep);
2832 	if (!so_locked) {
2833 		atomic_add_int(&stcb->asoc.refcnt, 1);
2834 		SCTP_TCB_UNLOCK(stcb);
2835 		SCTP_SOCKET_LOCK(so, 1);
2836 		SCTP_TCB_LOCK(stcb);
2837 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2838 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2839 			SCTP_SOCKET_UNLOCK(so, 1);
2840 			return;
2841 		}
2842 	}
2843 #endif
2844 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2845 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2846 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2847 		socantrcvmore(stcb->sctp_socket);
2848 	}
2849 	sorwakeup(stcb->sctp_socket);
2850 	sowwakeup(stcb->sctp_socket);
2851 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2852 	if (!so_locked) {
2853 		SCTP_SOCKET_UNLOCK(so, 1);
2854 	}
2855 #endif
2856 }
2857 
2858 static void
2859 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2860     struct sockaddr *sa, uint32_t error, int so_locked
2861 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2862     SCTP_UNUSED
2863 #endif
2864 )
2865 {
2866 	struct mbuf *m_notify;
2867 	struct sctp_paddr_change *spc;
2868 	struct sctp_queued_to_read *control;
2869 
2870 	if ((stcb == NULL) ||
2871 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2872 		/* event not enabled */
2873 		return;
2874 	}
2875 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2876 	if (m_notify == NULL)
2877 		return;
2878 	SCTP_BUF_LEN(m_notify) = 0;
2879 	spc = mtod(m_notify, struct sctp_paddr_change *);
2880 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2881 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2882 	spc->spc_flags = 0;
2883 	spc->spc_length = sizeof(struct sctp_paddr_change);
2884 	switch (sa->sa_family) {
2885 #ifdef INET
2886 	case AF_INET:
2887 #ifdef INET6
2888 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2889 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2890 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2891 		} else {
2892 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2893 		}
2894 #else
2895 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2896 #endif
2897 		break;
2898 #endif
2899 #ifdef INET6
2900 	case AF_INET6:
2901 		{
2902 			struct sockaddr_in6 *sin6;
2903 
2904 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2905 
2906 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2907 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2908 				if (sin6->sin6_scope_id == 0) {
2909 					/* recover scope_id for user */
2910 					(void)sa6_recoverscope(sin6);
2911 				} else {
2912 					/* clear embedded scope_id for user */
2913 					in6_clearscope(&sin6->sin6_addr);
2914 				}
2915 			}
2916 			break;
2917 		}
2918 #endif
2919 	default:
2920 		/* TSNH */
2921 		break;
2922 	}
2923 	spc->spc_state = state;
2924 	spc->spc_error = error;
2925 	spc->spc_assoc_id = sctp_get_associd(stcb);
2926 
2927 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2928 	SCTP_BUF_NEXT(m_notify) = NULL;
2929 
2930 	/* append to socket */
2931 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2932 	    0, 0, stcb->asoc.context, 0, 0, 0,
2933 	    m_notify);
2934 	if (control == NULL) {
2935 		/* no memory */
2936 		sctp_m_freem(m_notify);
2937 		return;
2938 	}
2939 	control->length = SCTP_BUF_LEN(m_notify);
2940 	control->spec_flags = M_NOTIFICATION;
2941 	/* not that we need this */
2942 	control->tail_mbuf = m_notify;
2943 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2944 	    control,
2945 	    &stcb->sctp_socket->so_rcv, 1,
2946 	    SCTP_READ_LOCK_NOT_HELD,
2947 	    so_locked);
2948 }
2949 
2950 
2951 static void
2952 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2953     struct sctp_tmit_chunk *chk, int so_locked
2954 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2955     SCTP_UNUSED
2956 #endif
2957 )
2958 {
2959 	struct mbuf *m_notify;
2960 	struct sctp_send_failed *ssf;
2961 	struct sctp_send_failed_event *ssfe;
2962 	struct sctp_queued_to_read *control;
2963 	struct sctp_chunkhdr *chkhdr;
2964 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2965 
2966 	if ((stcb == NULL) ||
2967 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2968 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2969 		/* event not enabled */
2970 		return;
2971 	}
2972 
2973 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2974 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2975 	} else {
2976 		notifhdr_len = sizeof(struct sctp_send_failed);
2977 	}
2978 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2979 	if (m_notify == NULL)
2980 		/* no space left */
2981 		return;
2982 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2983 	if (stcb->asoc.idata_supported) {
2984 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2985 	} else {
2986 		chkhdr_len = sizeof(struct sctp_data_chunk);
2987 	}
2988 	/* Use some defaults in case we can't access the chunk header */
2989 	if (chk->send_size >= chkhdr_len) {
2990 		payload_len = chk->send_size - chkhdr_len;
2991 	} else {
2992 		payload_len = 0;
2993 	}
2994 	padding_len = 0;
2995 	if (chk->data != NULL) {
2996 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2997 		if (chkhdr != NULL) {
2998 			chk_len = ntohs(chkhdr->chunk_length);
2999 			if ((chk_len >= chkhdr_len) &&
3000 			    (chk->send_size >= chk_len) &&
3001 			    (chk->send_size - chk_len < 4)) {
3002 				padding_len = chk->send_size - chk_len;
3003 				payload_len = chk->send_size - chkhdr_len - padding_len;
3004 			}
3005 		}
3006 	}
3007 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3008 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3009 		memset(ssfe, 0, notifhdr_len);
3010 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3011 		if (sent) {
3012 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3013 		} else {
3014 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3015 		}
3016 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3017 		ssfe->ssfe_error = error;
3018 		/* not exactly what the user sent in, but should be close :) */
3019 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3020 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3021 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3022 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3023 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3024 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3025 	} else {
3026 		ssf = mtod(m_notify, struct sctp_send_failed *);
3027 		memset(ssf, 0, notifhdr_len);
3028 		ssf->ssf_type = SCTP_SEND_FAILED;
3029 		if (sent) {
3030 			ssf->ssf_flags = SCTP_DATA_SENT;
3031 		} else {
3032 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3033 		}
3034 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3035 		ssf->ssf_error = error;
3036 		/* not exactly what the user sent in, but should be close :) */
3037 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3038 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3039 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3040 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3041 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3042 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3043 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3044 	}
3045 	if (chk->data != NULL) {
3046 		/* Trim off the sctp chunk header (it should be there) */
3047 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3048 			m_adj(chk->data, chkhdr_len);
3049 			m_adj(chk->data, -padding_len);
3050 			sctp_mbuf_crush(chk->data);
3051 			chk->send_size -= (chkhdr_len + padding_len);
3052 		}
3053 	}
3054 	SCTP_BUF_NEXT(m_notify) = chk->data;
3055 	/* Steal off the mbuf */
3056 	chk->data = NULL;
3057 	/*
3058 	 * For this case, we check the actual socket buffer, since the assoc
3059 	 * is going away we don't want to overfill the socket buffer for a
3060 	 * non-reader
3061 	 */
3062 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3063 		sctp_m_freem(m_notify);
3064 		return;
3065 	}
3066 	/* append to socket */
3067 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3068 	    0, 0, stcb->asoc.context, 0, 0, 0,
3069 	    m_notify);
3070 	if (control == NULL) {
3071 		/* no memory */
3072 		sctp_m_freem(m_notify);
3073 		return;
3074 	}
3075 	control->length = SCTP_BUF_LEN(m_notify);
3076 	control->spec_flags = M_NOTIFICATION;
3077 	/* not that we need this */
3078 	control->tail_mbuf = m_notify;
3079 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3080 	    control,
3081 	    &stcb->sctp_socket->so_rcv, 1,
3082 	    SCTP_READ_LOCK_NOT_HELD,
3083 	    so_locked);
3084 }
3085 
3086 
3087 static void
3088 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3089     struct sctp_stream_queue_pending *sp, int so_locked
3090 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3091     SCTP_UNUSED
3092 #endif
3093 )
3094 {
3095 	struct mbuf *m_notify;
3096 	struct sctp_send_failed *ssf;
3097 	struct sctp_send_failed_event *ssfe;
3098 	struct sctp_queued_to_read *control;
3099 	int notifhdr_len;
3100 
3101 	if ((stcb == NULL) ||
3102 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3103 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3104 		/* event not enabled */
3105 		return;
3106 	}
3107 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3108 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3109 	} else {
3110 		notifhdr_len = sizeof(struct sctp_send_failed);
3111 	}
3112 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3113 	if (m_notify == NULL) {
3114 		/* no space left */
3115 		return;
3116 	}
3117 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3118 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3119 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3120 		memset(ssfe, 0, notifhdr_len);
3121 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3122 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3123 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3124 		ssfe->ssfe_error = error;
3125 		/* not exactly what the user sent in, but should be close :) */
3126 		ssfe->ssfe_info.snd_sid = sp->sid;
3127 		if (sp->some_taken) {
3128 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3129 		} else {
3130 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3131 		}
3132 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3133 		ssfe->ssfe_info.snd_context = sp->context;
3134 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3135 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3136 	} else {
3137 		ssf = mtod(m_notify, struct sctp_send_failed *);
3138 		memset(ssf, 0, notifhdr_len);
3139 		ssf->ssf_type = SCTP_SEND_FAILED;
3140 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3141 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3142 		ssf->ssf_error = error;
3143 		/* not exactly what the user sent in, but should be close :) */
3144 		ssf->ssf_info.sinfo_stream = sp->sid;
3145 		ssf->ssf_info.sinfo_ssn = 0;
3146 		if (sp->some_taken) {
3147 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3148 		} else {
3149 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3150 		}
3151 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3152 		ssf->ssf_info.sinfo_context = sp->context;
3153 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3154 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3155 	}
3156 	SCTP_BUF_NEXT(m_notify) = sp->data;
3157 
3158 	/* Steal off the mbuf */
3159 	sp->data = NULL;
3160 	/*
3161 	 * For this case, we check the actual socket buffer, since the assoc
3162 	 * is going away we don't want to overfill the socket buffer for a
3163 	 * non-reader
3164 	 */
3165 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3166 		sctp_m_freem(m_notify);
3167 		return;
3168 	}
3169 	/* append to socket */
3170 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3171 	    0, 0, stcb->asoc.context, 0, 0, 0,
3172 	    m_notify);
3173 	if (control == NULL) {
3174 		/* no memory */
3175 		sctp_m_freem(m_notify);
3176 		return;
3177 	}
3178 	control->length = SCTP_BUF_LEN(m_notify);
3179 	control->spec_flags = M_NOTIFICATION;
3180 	/* not that we need this */
3181 	control->tail_mbuf = m_notify;
3182 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3183 	    control,
3184 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3185 }
3186 
3187 
3188 
3189 static void
3190 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3191 {
3192 	struct mbuf *m_notify;
3193 	struct sctp_adaptation_event *sai;
3194 	struct sctp_queued_to_read *control;
3195 
3196 	if ((stcb == NULL) ||
3197 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3198 		/* event not enabled */
3199 		return;
3200 	}
3201 
3202 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3203 	if (m_notify == NULL)
3204 		/* no space left */
3205 		return;
3206 	SCTP_BUF_LEN(m_notify) = 0;
3207 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3208 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3209 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3210 	sai->sai_flags = 0;
3211 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3212 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3213 	sai->sai_assoc_id = sctp_get_associd(stcb);
3214 
3215 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3216 	SCTP_BUF_NEXT(m_notify) = NULL;
3217 
3218 	/* append to socket */
3219 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3220 	    0, 0, stcb->asoc.context, 0, 0, 0,
3221 	    m_notify);
3222 	if (control == NULL) {
3223 		/* no memory */
3224 		sctp_m_freem(m_notify);
3225 		return;
3226 	}
3227 	control->length = SCTP_BUF_LEN(m_notify);
3228 	control->spec_flags = M_NOTIFICATION;
3229 	/* not that we need this */
3230 	control->tail_mbuf = m_notify;
3231 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3232 	    control,
3233 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3234 }
3235 
3236 /* This always must be called with the read-queue LOCKED in the INP */
3237 static void
3238 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3239     uint32_t val, int so_locked
3240 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3241     SCTP_UNUSED
3242 #endif
3243 )
3244 {
3245 	struct mbuf *m_notify;
3246 	struct sctp_pdapi_event *pdapi;
3247 	struct sctp_queued_to_read *control;
3248 	struct sockbuf *sb;
3249 
3250 	if ((stcb == NULL) ||
3251 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3252 		/* event not enabled */
3253 		return;
3254 	}
3255 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3256 		return;
3257 	}
3258 
3259 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3260 	if (m_notify == NULL)
3261 		/* no space left */
3262 		return;
3263 	SCTP_BUF_LEN(m_notify) = 0;
3264 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3265 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3266 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3267 	pdapi->pdapi_flags = 0;
3268 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3269 	pdapi->pdapi_indication = error;
3270 	pdapi->pdapi_stream = (val >> 16);
3271 	pdapi->pdapi_seq = (val & 0x0000ffff);
3272 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3273 
3274 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3275 	SCTP_BUF_NEXT(m_notify) = NULL;
3276 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3277 	    0, 0, stcb->asoc.context, 0, 0, 0,
3278 	    m_notify);
3279 	if (control == NULL) {
3280 		/* no memory */
3281 		sctp_m_freem(m_notify);
3282 		return;
3283 	}
3284 	control->length = SCTP_BUF_LEN(m_notify);
3285 	control->spec_flags = M_NOTIFICATION;
3286 	/* not that we need this */
3287 	control->tail_mbuf = m_notify;
3288 	sb = &stcb->sctp_socket->so_rcv;
3289 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3290 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3291 	}
3292 	sctp_sballoc(stcb, sb, m_notify);
3293 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3294 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3295 	}
3296 	control->end_added = 1;
3297 	if (stcb->asoc.control_pdapi)
3298 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3299 	else {
3300 		/* we really should not see this case */
3301 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3302 	}
3303 	if (stcb->sctp_ep && stcb->sctp_socket) {
3304 		/* This should always be the case */
3305 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3306 		struct socket *so;
3307 
3308 		so = SCTP_INP_SO(stcb->sctp_ep);
3309 		if (!so_locked) {
3310 			atomic_add_int(&stcb->asoc.refcnt, 1);
3311 			SCTP_TCB_UNLOCK(stcb);
3312 			SCTP_SOCKET_LOCK(so, 1);
3313 			SCTP_TCB_LOCK(stcb);
3314 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3315 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3316 				SCTP_SOCKET_UNLOCK(so, 1);
3317 				return;
3318 			}
3319 		}
3320 #endif
3321 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3322 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3323 		if (!so_locked) {
3324 			SCTP_SOCKET_UNLOCK(so, 1);
3325 		}
3326 #endif
3327 	}
3328 }
3329 
3330 static void
3331 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3332 {
3333 	struct mbuf *m_notify;
3334 	struct sctp_shutdown_event *sse;
3335 	struct sctp_queued_to_read *control;
3336 
3337 	/*
3338 	 * For TCP model AND UDP connected sockets we will send an error up
3339 	 * when an SHUTDOWN completes
3340 	 */
3341 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3342 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3343 		/* mark socket closed for read/write and wakeup! */
3344 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3345 		struct socket *so;
3346 
3347 		so = SCTP_INP_SO(stcb->sctp_ep);
3348 		atomic_add_int(&stcb->asoc.refcnt, 1);
3349 		SCTP_TCB_UNLOCK(stcb);
3350 		SCTP_SOCKET_LOCK(so, 1);
3351 		SCTP_TCB_LOCK(stcb);
3352 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3353 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3354 			SCTP_SOCKET_UNLOCK(so, 1);
3355 			return;
3356 		}
3357 #endif
3358 		socantsendmore(stcb->sctp_socket);
3359 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3360 		SCTP_SOCKET_UNLOCK(so, 1);
3361 #endif
3362 	}
3363 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3364 		/* event not enabled */
3365 		return;
3366 	}
3367 
3368 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3369 	if (m_notify == NULL)
3370 		/* no space left */
3371 		return;
3372 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3373 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3374 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3375 	sse->sse_flags = 0;
3376 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3377 	sse->sse_assoc_id = sctp_get_associd(stcb);
3378 
3379 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3380 	SCTP_BUF_NEXT(m_notify) = NULL;
3381 
3382 	/* append to socket */
3383 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3384 	    0, 0, stcb->asoc.context, 0, 0, 0,
3385 	    m_notify);
3386 	if (control == NULL) {
3387 		/* no memory */
3388 		sctp_m_freem(m_notify);
3389 		return;
3390 	}
3391 	control->length = SCTP_BUF_LEN(m_notify);
3392 	control->spec_flags = M_NOTIFICATION;
3393 	/* not that we need this */
3394 	control->tail_mbuf = m_notify;
3395 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3396 	    control,
3397 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3398 }
3399 
3400 static void
3401 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3402     int so_locked
3403 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3404     SCTP_UNUSED
3405 #endif
3406 )
3407 {
3408 	struct mbuf *m_notify;
3409 	struct sctp_sender_dry_event *event;
3410 	struct sctp_queued_to_read *control;
3411 
3412 	if ((stcb == NULL) ||
3413 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3414 		/* event not enabled */
3415 		return;
3416 	}
3417 
3418 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3419 	if (m_notify == NULL) {
3420 		/* no space left */
3421 		return;
3422 	}
3423 	SCTP_BUF_LEN(m_notify) = 0;
3424 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3425 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3426 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3427 	event->sender_dry_flags = 0;
3428 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3429 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3430 
3431 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3432 	SCTP_BUF_NEXT(m_notify) = NULL;
3433 
3434 	/* append to socket */
3435 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3436 	    0, 0, stcb->asoc.context, 0, 0, 0,
3437 	    m_notify);
3438 	if (control == NULL) {
3439 		/* no memory */
3440 		sctp_m_freem(m_notify);
3441 		return;
3442 	}
3443 	control->length = SCTP_BUF_LEN(m_notify);
3444 	control->spec_flags = M_NOTIFICATION;
3445 	/* not that we need this */
3446 	control->tail_mbuf = m_notify;
3447 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3448 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3449 }
3450 
3451 
3452 void
3453 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3454 {
3455 	struct mbuf *m_notify;
3456 	struct sctp_queued_to_read *control;
3457 	struct sctp_stream_change_event *stradd;
3458 
3459 	if ((stcb == NULL) ||
3460 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3461 		/* event not enabled */
3462 		return;
3463 	}
3464 	if ((stcb->asoc.peer_req_out) && flag) {
3465 		/* Peer made the request, don't tell the local user */
3466 		stcb->asoc.peer_req_out = 0;
3467 		return;
3468 	}
3469 	stcb->asoc.peer_req_out = 0;
3470 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3471 	if (m_notify == NULL)
3472 		/* no space left */
3473 		return;
3474 	SCTP_BUF_LEN(m_notify) = 0;
3475 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3476 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3477 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3478 	stradd->strchange_flags = flag;
3479 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3480 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3481 	stradd->strchange_instrms = numberin;
3482 	stradd->strchange_outstrms = numberout;
3483 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3484 	SCTP_BUF_NEXT(m_notify) = NULL;
3485 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3486 		/* no space */
3487 		sctp_m_freem(m_notify);
3488 		return;
3489 	}
3490 	/* append to socket */
3491 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3492 	    0, 0, stcb->asoc.context, 0, 0, 0,
3493 	    m_notify);
3494 	if (control == NULL) {
3495 		/* no memory */
3496 		sctp_m_freem(m_notify);
3497 		return;
3498 	}
3499 	control->length = SCTP_BUF_LEN(m_notify);
3500 	control->spec_flags = M_NOTIFICATION;
3501 	/* not that we need this */
3502 	control->tail_mbuf = m_notify;
3503 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3504 	    control,
3505 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3506 }
3507 
3508 void
3509 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3510 {
3511 	struct mbuf *m_notify;
3512 	struct sctp_queued_to_read *control;
3513 	struct sctp_assoc_reset_event *strasoc;
3514 
3515 	if ((stcb == NULL) ||
3516 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3517 		/* event not enabled */
3518 		return;
3519 	}
3520 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3521 	if (m_notify == NULL)
3522 		/* no space left */
3523 		return;
3524 	SCTP_BUF_LEN(m_notify) = 0;
3525 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3526 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3527 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3528 	strasoc->assocreset_flags = flag;
3529 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3530 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3531 	strasoc->assocreset_local_tsn = sending_tsn;
3532 	strasoc->assocreset_remote_tsn = recv_tsn;
3533 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3534 	SCTP_BUF_NEXT(m_notify) = NULL;
3535 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3536 		/* no space */
3537 		sctp_m_freem(m_notify);
3538 		return;
3539 	}
3540 	/* append to socket */
3541 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3542 	    0, 0, stcb->asoc.context, 0, 0, 0,
3543 	    m_notify);
3544 	if (control == NULL) {
3545 		/* no memory */
3546 		sctp_m_freem(m_notify);
3547 		return;
3548 	}
3549 	control->length = SCTP_BUF_LEN(m_notify);
3550 	control->spec_flags = M_NOTIFICATION;
3551 	/* not that we need this */
3552 	control->tail_mbuf = m_notify;
3553 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3554 	    control,
3555 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3556 }
3557 
3558 
3559 
3560 static void
3561 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3562     int number_entries, uint16_t *list, int flag)
3563 {
3564 	struct mbuf *m_notify;
3565 	struct sctp_queued_to_read *control;
3566 	struct sctp_stream_reset_event *strreset;
3567 	int len;
3568 
3569 	if ((stcb == NULL) ||
3570 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3571 		/* event not enabled */
3572 		return;
3573 	}
3574 
3575 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3576 	if (m_notify == NULL)
3577 		/* no space left */
3578 		return;
3579 	SCTP_BUF_LEN(m_notify) = 0;
3580 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3581 	if (len > M_TRAILINGSPACE(m_notify)) {
3582 		/* never enough room */
3583 		sctp_m_freem(m_notify);
3584 		return;
3585 	}
3586 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3587 	memset(strreset, 0, len);
3588 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3589 	strreset->strreset_flags = flag;
3590 	strreset->strreset_length = len;
3591 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3592 	if (number_entries) {
3593 		int i;
3594 
3595 		for (i = 0; i < number_entries; i++) {
3596 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3597 		}
3598 	}
3599 	SCTP_BUF_LEN(m_notify) = len;
3600 	SCTP_BUF_NEXT(m_notify) = NULL;
3601 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3602 		/* no space */
3603 		sctp_m_freem(m_notify);
3604 		return;
3605 	}
3606 	/* append to socket */
3607 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3608 	    0, 0, stcb->asoc.context, 0, 0, 0,
3609 	    m_notify);
3610 	if (control == NULL) {
3611 		/* no memory */
3612 		sctp_m_freem(m_notify);
3613 		return;
3614 	}
3615 	control->length = SCTP_BUF_LEN(m_notify);
3616 	control->spec_flags = M_NOTIFICATION;
3617 	/* not that we need this */
3618 	control->tail_mbuf = m_notify;
3619 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3620 	    control,
3621 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3622 }
3623 
3624 
3625 static void
3626 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3627 {
3628 	struct mbuf *m_notify;
3629 	struct sctp_remote_error *sre;
3630 	struct sctp_queued_to_read *control;
3631 	unsigned int notif_len;
3632 	uint16_t chunk_len;
3633 
3634 	if ((stcb == NULL) ||
3635 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3636 		return;
3637 	}
3638 	if (chunk != NULL) {
3639 		chunk_len = ntohs(chunk->ch.chunk_length);
3640 		/*
3641 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3642 		 * contiguous.
3643 		 */
3644 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3645 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3646 		}
3647 	} else {
3648 		chunk_len = 0;
3649 	}
3650 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3651 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3652 	if (m_notify == NULL) {
3653 		/* Retry with smaller value. */
3654 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3655 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3656 		if (m_notify == NULL) {
3657 			return;
3658 		}
3659 	}
3660 	SCTP_BUF_NEXT(m_notify) = NULL;
3661 	sre = mtod(m_notify, struct sctp_remote_error *);
3662 	memset(sre, 0, notif_len);
3663 	sre->sre_type = SCTP_REMOTE_ERROR;
3664 	sre->sre_flags = 0;
3665 	sre->sre_length = sizeof(struct sctp_remote_error);
3666 	sre->sre_error = error;
3667 	sre->sre_assoc_id = sctp_get_associd(stcb);
3668 	if (notif_len > sizeof(struct sctp_remote_error)) {
3669 		memcpy(sre->sre_data, chunk, chunk_len);
3670 		sre->sre_length += chunk_len;
3671 	}
3672 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3673 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3674 	    0, 0, stcb->asoc.context, 0, 0, 0,
3675 	    m_notify);
3676 	if (control != NULL) {
3677 		control->length = SCTP_BUF_LEN(m_notify);
3678 		control->spec_flags = M_NOTIFICATION;
3679 		/* not that we need this */
3680 		control->tail_mbuf = m_notify;
3681 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3682 		    control,
3683 		    &stcb->sctp_socket->so_rcv, 1,
3684 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3685 	} else {
3686 		sctp_m_freem(m_notify);
3687 	}
3688 }
3689 
3690 
3691 void
3692 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3693     uint32_t error, void *data, int so_locked
3694 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3695     SCTP_UNUSED
3696 #endif
3697 )
3698 {
3699 	if ((stcb == NULL) ||
3700 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3701 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3702 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3703 		/* If the socket is gone we are out of here */
3704 		return;
3705 	}
3706 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3707 		return;
3708 	}
3709 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3710 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3711 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3712 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3713 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3714 			/* Don't report these in front states */
3715 			return;
3716 		}
3717 	}
3718 	switch (notification) {
3719 	case SCTP_NOTIFY_ASSOC_UP:
3720 		if (stcb->asoc.assoc_up_sent == 0) {
3721 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3722 			stcb->asoc.assoc_up_sent = 1;
3723 		}
3724 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3725 			sctp_notify_adaptation_layer(stcb);
3726 		}
3727 		if (stcb->asoc.auth_supported == 0) {
3728 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3729 			    NULL, so_locked);
3730 		}
3731 		break;
3732 	case SCTP_NOTIFY_ASSOC_DOWN:
3733 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3734 		break;
3735 	case SCTP_NOTIFY_INTERFACE_DOWN:
3736 		{
3737 			struct sctp_nets *net;
3738 
3739 			net = (struct sctp_nets *)data;
3740 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3741 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3742 			break;
3743 		}
3744 	case SCTP_NOTIFY_INTERFACE_UP:
3745 		{
3746 			struct sctp_nets *net;
3747 
3748 			net = (struct sctp_nets *)data;
3749 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3750 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3751 			break;
3752 		}
3753 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3754 		{
3755 			struct sctp_nets *net;
3756 
3757 			net = (struct sctp_nets *)data;
3758 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3759 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3760 			break;
3761 		}
3762 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3763 		sctp_notify_send_failed2(stcb, error,
3764 		    (struct sctp_stream_queue_pending *)data, so_locked);
3765 		break;
3766 	case SCTP_NOTIFY_SENT_DG_FAIL:
3767 		sctp_notify_send_failed(stcb, 1, error,
3768 		    (struct sctp_tmit_chunk *)data, so_locked);
3769 		break;
3770 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3771 		sctp_notify_send_failed(stcb, 0, error,
3772 		    (struct sctp_tmit_chunk *)data, so_locked);
3773 		break;
3774 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3775 		{
3776 			uint32_t val;
3777 
3778 			val = *((uint32_t *)data);
3779 
3780 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3781 			break;
3782 		}
3783 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3784 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3785 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3786 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3787 		} else {
3788 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3789 		}
3790 		break;
3791 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3792 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3793 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3794 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3795 		} else {
3796 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3797 		}
3798 		break;
3799 	case SCTP_NOTIFY_ASSOC_RESTART:
3800 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3801 		if (stcb->asoc.auth_supported == 0) {
3802 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3803 			    NULL, so_locked);
3804 		}
3805 		break;
3806 	case SCTP_NOTIFY_STR_RESET_SEND:
3807 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3808 		break;
3809 	case SCTP_NOTIFY_STR_RESET_RECV:
3810 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3811 		break;
3812 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3813 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3814 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3815 		break;
3816 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3817 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3818 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3819 		break;
3820 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3821 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3822 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3823 		break;
3824 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3825 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3826 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3827 		break;
3828 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3829 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3830 		    error, so_locked);
3831 		break;
3832 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3833 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3834 		    error, so_locked);
3835 		break;
3836 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3837 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3838 		    error, so_locked);
3839 		break;
3840 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3841 		sctp_notify_shutdown_event(stcb);
3842 		break;
3843 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3844 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3845 		    (uint16_t)(uintptr_t)data,
3846 		    so_locked);
3847 		break;
3848 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3849 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3850 		    (uint16_t)(uintptr_t)data,
3851 		    so_locked);
3852 		break;
3853 	case SCTP_NOTIFY_NO_PEER_AUTH:
3854 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3855 		    (uint16_t)(uintptr_t)data,
3856 		    so_locked);
3857 		break;
3858 	case SCTP_NOTIFY_SENDER_DRY:
3859 		sctp_notify_sender_dry_event(stcb, so_locked);
3860 		break;
3861 	case SCTP_NOTIFY_REMOTE_ERROR:
3862 		sctp_notify_remote_error(stcb, error, data);
3863 		break;
3864 	default:
3865 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3866 		    __func__, notification, notification);
3867 		break;
3868 	}			/* end switch */
3869 }
3870 
3871 void
3872 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3873 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3874     SCTP_UNUSED
3875 #endif
3876 )
3877 {
3878 	struct sctp_association *asoc;
3879 	struct sctp_stream_out *outs;
3880 	struct sctp_tmit_chunk *chk, *nchk;
3881 	struct sctp_stream_queue_pending *sp, *nsp;
3882 	int i;
3883 
3884 	if (stcb == NULL) {
3885 		return;
3886 	}
3887 	asoc = &stcb->asoc;
3888 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3889 		/* already being freed */
3890 		return;
3891 	}
3892 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3893 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3894 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3895 		return;
3896 	}
3897 	/* now through all the gunk freeing chunks */
3898 	if (holds_lock == 0) {
3899 		SCTP_TCB_SEND_LOCK(stcb);
3900 	}
3901 	/* sent queue SHOULD be empty */
3902 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3903 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3904 		asoc->sent_queue_cnt--;
3905 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3906 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3907 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3908 #ifdef INVARIANTS
3909 			} else {
3910 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3911 #endif
3912 			}
3913 		}
3914 		if (chk->data != NULL) {
3915 			sctp_free_bufspace(stcb, asoc, chk, 1);
3916 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3917 			    error, chk, so_locked);
3918 			if (chk->data) {
3919 				sctp_m_freem(chk->data);
3920 				chk->data = NULL;
3921 			}
3922 		}
3923 		sctp_free_a_chunk(stcb, chk, so_locked);
3924 		/* sa_ignore FREED_MEMORY */
3925 	}
3926 	/* pending send queue SHOULD be empty */
3927 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3928 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3929 		asoc->send_queue_cnt--;
3930 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3931 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3932 #ifdef INVARIANTS
3933 		} else {
3934 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3935 #endif
3936 		}
3937 		if (chk->data != NULL) {
3938 			sctp_free_bufspace(stcb, asoc, chk, 1);
3939 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3940 			    error, chk, so_locked);
3941 			if (chk->data) {
3942 				sctp_m_freem(chk->data);
3943 				chk->data = NULL;
3944 			}
3945 		}
3946 		sctp_free_a_chunk(stcb, chk, so_locked);
3947 		/* sa_ignore FREED_MEMORY */
3948 	}
3949 	for (i = 0; i < asoc->streamoutcnt; i++) {
3950 		/* For each stream */
3951 		outs = &asoc->strmout[i];
3952 		/* clean up any sends there */
3953 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3954 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3955 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3956 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
3957 			sctp_free_spbufspace(stcb, asoc, sp);
3958 			if (sp->data) {
3959 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3960 				    error, (void *)sp, so_locked);
3961 				if (sp->data) {
3962 					sctp_m_freem(sp->data);
3963 					sp->data = NULL;
3964 					sp->tail_mbuf = NULL;
3965 					sp->length = 0;
3966 				}
3967 			}
3968 			if (sp->net) {
3969 				sctp_free_remote_addr(sp->net);
3970 				sp->net = NULL;
3971 			}
3972 			/* Free the chunk */
3973 			sctp_free_a_strmoq(stcb, sp, so_locked);
3974 			/* sa_ignore FREED_MEMORY */
3975 		}
3976 	}
3977 
3978 	if (holds_lock == 0) {
3979 		SCTP_TCB_SEND_UNLOCK(stcb);
3980 	}
3981 }
3982 
3983 void
3984 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3985     struct sctp_abort_chunk *abort, int so_locked
3986 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3987     SCTP_UNUSED
3988 #endif
3989 )
3990 {
3991 	if (stcb == NULL) {
3992 		return;
3993 	}
3994 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3995 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3996 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3997 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3998 	}
3999 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4000 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4001 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4002 		return;
4003 	}
4004 	/* Tell them we lost the asoc */
4005 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4006 	if (from_peer) {
4007 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4008 	} else {
4009 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4010 	}
4011 }
4012 
4013 void
4014 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4015     struct mbuf *m, int iphlen,
4016     struct sockaddr *src, struct sockaddr *dst,
4017     struct sctphdr *sh, struct mbuf *op_err,
4018     uint8_t mflowtype, uint32_t mflowid,
4019     uint32_t vrf_id, uint16_t port)
4020 {
4021 	uint32_t vtag;
4022 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4023 	struct socket *so;
4024 #endif
4025 
4026 	vtag = 0;
4027 	if (stcb != NULL) {
4028 		vtag = stcb->asoc.peer_vtag;
4029 		vrf_id = stcb->asoc.vrf_id;
4030 	}
4031 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4032 	    mflowtype, mflowid, inp->fibnum,
4033 	    vrf_id, port);
4034 	if (stcb != NULL) {
4035 		/* We have a TCB to abort, send notification too */
4036 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4037 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4038 		/* Ok, now lets free it */
4039 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4040 		so = SCTP_INP_SO(inp);
4041 		atomic_add_int(&stcb->asoc.refcnt, 1);
4042 		SCTP_TCB_UNLOCK(stcb);
4043 		SCTP_SOCKET_LOCK(so, 1);
4044 		SCTP_TCB_LOCK(stcb);
4045 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4046 #endif
4047 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4048 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4049 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4050 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4051 		}
4052 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4053 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4054 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4055 		SCTP_SOCKET_UNLOCK(so, 1);
4056 #endif
4057 	}
4058 }
4059 #ifdef SCTP_ASOCLOG_OF_TSNS
4060 void
4061 sctp_print_out_track_log(struct sctp_tcb *stcb)
4062 {
4063 #ifdef NOSIY_PRINTS
4064 	int i;
4065 
4066 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4067 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4068 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4069 		SCTP_PRINTF("None rcvd\n");
4070 		goto none_in;
4071 	}
4072 	if (stcb->asoc.tsn_in_wrapped) {
4073 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4074 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4075 			    stcb->asoc.in_tsnlog[i].tsn,
4076 			    stcb->asoc.in_tsnlog[i].strm,
4077 			    stcb->asoc.in_tsnlog[i].seq,
4078 			    stcb->asoc.in_tsnlog[i].flgs,
4079 			    stcb->asoc.in_tsnlog[i].sz);
4080 		}
4081 	}
4082 	if (stcb->asoc.tsn_in_at) {
4083 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4084 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4085 			    stcb->asoc.in_tsnlog[i].tsn,
4086 			    stcb->asoc.in_tsnlog[i].strm,
4087 			    stcb->asoc.in_tsnlog[i].seq,
4088 			    stcb->asoc.in_tsnlog[i].flgs,
4089 			    stcb->asoc.in_tsnlog[i].sz);
4090 		}
4091 	}
4092 none_in:
4093 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4094 	if ((stcb->asoc.tsn_out_at == 0) &&
4095 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4096 		SCTP_PRINTF("None sent\n");
4097 	}
4098 	if (stcb->asoc.tsn_out_wrapped) {
4099 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4100 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4101 			    stcb->asoc.out_tsnlog[i].tsn,
4102 			    stcb->asoc.out_tsnlog[i].strm,
4103 			    stcb->asoc.out_tsnlog[i].seq,
4104 			    stcb->asoc.out_tsnlog[i].flgs,
4105 			    stcb->asoc.out_tsnlog[i].sz);
4106 		}
4107 	}
4108 	if (stcb->asoc.tsn_out_at) {
4109 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4110 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4111 			    stcb->asoc.out_tsnlog[i].tsn,
4112 			    stcb->asoc.out_tsnlog[i].strm,
4113 			    stcb->asoc.out_tsnlog[i].seq,
4114 			    stcb->asoc.out_tsnlog[i].flgs,
4115 			    stcb->asoc.out_tsnlog[i].sz);
4116 		}
4117 	}
4118 #endif
4119 }
4120 #endif
4121 
4122 void
4123 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4124     struct mbuf *op_err,
4125     int so_locked
4126 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4127     SCTP_UNUSED
4128 #endif
4129 )
4130 {
4131 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4132 	struct socket *so;
4133 #endif
4134 
4135 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4136 	so = SCTP_INP_SO(inp);
4137 #endif
4138 	if (stcb == NULL) {
4139 		/* Got to have a TCB */
4140 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4141 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4142 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4143 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4144 			}
4145 		}
4146 		return;
4147 	} else {
4148 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4149 	}
4150 	/* notify the peer */
4151 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4152 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4153 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4154 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4155 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4156 	}
4157 	/* notify the ulp */
4158 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4159 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4160 	}
4161 	/* now free the asoc */
4162 #ifdef SCTP_ASOCLOG_OF_TSNS
4163 	sctp_print_out_track_log(stcb);
4164 #endif
4165 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4166 	if (!so_locked) {
4167 		atomic_add_int(&stcb->asoc.refcnt, 1);
4168 		SCTP_TCB_UNLOCK(stcb);
4169 		SCTP_SOCKET_LOCK(so, 1);
4170 		SCTP_TCB_LOCK(stcb);
4171 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4172 	}
4173 #endif
4174 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4175 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4176 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4177 	if (!so_locked) {
4178 		SCTP_SOCKET_UNLOCK(so, 1);
4179 	}
4180 #endif
4181 }
4182 
4183 void
4184 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4185     struct sockaddr *src, struct sockaddr *dst,
4186     struct sctphdr *sh, struct sctp_inpcb *inp,
4187     struct mbuf *cause,
4188     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4189     uint32_t vrf_id, uint16_t port)
4190 {
4191 	struct sctp_chunkhdr *ch, chunk_buf;
4192 	unsigned int chk_length;
4193 	int contains_init_chunk;
4194 
4195 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4196 	/* Generate a TO address for future reference */
4197 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4198 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4199 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4200 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4201 		}
4202 	}
4203 	contains_init_chunk = 0;
4204 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4205 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4206 	while (ch != NULL) {
4207 		chk_length = ntohs(ch->chunk_length);
4208 		if (chk_length < sizeof(*ch)) {
4209 			/* break to abort land */
4210 			break;
4211 		}
4212 		switch (ch->chunk_type) {
4213 		case SCTP_INIT:
4214 			contains_init_chunk = 1;
4215 			break;
4216 		case SCTP_PACKET_DROPPED:
4217 			/* we don't respond to pkt-dropped */
4218 			return;
4219 		case SCTP_ABORT_ASSOCIATION:
4220 			/* we don't respond with an ABORT to an ABORT */
4221 			return;
4222 		case SCTP_SHUTDOWN_COMPLETE:
4223 			/*
4224 			 * we ignore it since we are not waiting for it and
4225 			 * peer is gone
4226 			 */
4227 			return;
4228 		case SCTP_SHUTDOWN_ACK:
4229 			sctp_send_shutdown_complete2(src, dst, sh,
4230 			    mflowtype, mflowid, fibnum,
4231 			    vrf_id, port);
4232 			return;
4233 		default:
4234 			break;
4235 		}
4236 		offset += SCTP_SIZE32(chk_length);
4237 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4238 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4239 	}
4240 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4241 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4242 	    (contains_init_chunk == 0))) {
4243 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4244 		    mflowtype, mflowid, fibnum,
4245 		    vrf_id, port);
4246 	}
4247 }
4248 
4249 /*
4250  * check the inbound datagram to make sure there is not an abort inside it,
4251  * if there is return 1, else return 0.
4252  */
4253 int
4254 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4255 {
4256 	struct sctp_chunkhdr *ch;
4257 	struct sctp_init_chunk *init_chk, chunk_buf;
4258 	int offset;
4259 	unsigned int chk_length;
4260 
4261 	offset = iphlen + sizeof(struct sctphdr);
4262 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4263 	    (uint8_t *)&chunk_buf);
4264 	while (ch != NULL) {
4265 		chk_length = ntohs(ch->chunk_length);
4266 		if (chk_length < sizeof(*ch)) {
4267 			/* packet is probably corrupt */
4268 			break;
4269 		}
4270 		/* we seem to be ok, is it an abort? */
4271 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4272 			/* yep, tell them */
4273 			return (1);
4274 		}
4275 		if (ch->chunk_type == SCTP_INITIATION) {
4276 			/* need to update the Vtag */
4277 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4278 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4279 			if (init_chk != NULL) {
4280 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4281 			}
4282 		}
4283 		/* Nope, move to the next chunk */
4284 		offset += SCTP_SIZE32(chk_length);
4285 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4286 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4287 	}
4288 	return (0);
4289 }
4290 
4291 /*
4292  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4293  * set (i.e. it's 0) so, create this function to compare link local scopes
4294  */
4295 #ifdef INET6
4296 uint32_t
4297 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4298 {
4299 	struct sockaddr_in6 a, b;
4300 
4301 	/* save copies */
4302 	a = *addr1;
4303 	b = *addr2;
4304 
4305 	if (a.sin6_scope_id == 0)
4306 		if (sa6_recoverscope(&a)) {
4307 			/* can't get scope, so can't match */
4308 			return (0);
4309 		}
4310 	if (b.sin6_scope_id == 0)
4311 		if (sa6_recoverscope(&b)) {
4312 			/* can't get scope, so can't match */
4313 			return (0);
4314 		}
4315 	if (a.sin6_scope_id != b.sin6_scope_id)
4316 		return (0);
4317 
4318 	return (1);
4319 }
4320 
4321 /*
4322  * returns a sockaddr_in6 with embedded scope recovered and removed
4323  */
4324 struct sockaddr_in6 *
4325 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4326 {
4327 	/* check and strip embedded scope junk */
4328 	if (addr->sin6_family == AF_INET6) {
4329 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4330 			if (addr->sin6_scope_id == 0) {
4331 				*store = *addr;
4332 				if (!sa6_recoverscope(store)) {
4333 					/* use the recovered scope */
4334 					addr = store;
4335 				}
4336 			} else {
4337 				/* else, return the original "to" addr */
4338 				in6_clearscope(&addr->sin6_addr);
4339 			}
4340 		}
4341 	}
4342 	return (addr);
4343 }
4344 #endif
4345 
4346 /*
4347  * are the two addresses the same?  currently a "scopeless" check returns: 1
4348  * if same, 0 if not
4349  */
4350 int
4351 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4352 {
4353 
4354 	/* must be valid */
4355 	if (sa1 == NULL || sa2 == NULL)
4356 		return (0);
4357 
4358 	/* must be the same family */
4359 	if (sa1->sa_family != sa2->sa_family)
4360 		return (0);
4361 
4362 	switch (sa1->sa_family) {
4363 #ifdef INET6
4364 	case AF_INET6:
4365 		{
4366 			/* IPv6 addresses */
4367 			struct sockaddr_in6 *sin6_1, *sin6_2;
4368 
4369 			sin6_1 = (struct sockaddr_in6 *)sa1;
4370 			sin6_2 = (struct sockaddr_in6 *)sa2;
4371 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4372 			    sin6_2));
4373 		}
4374 #endif
4375 #ifdef INET
4376 	case AF_INET:
4377 		{
4378 			/* IPv4 addresses */
4379 			struct sockaddr_in *sin_1, *sin_2;
4380 
4381 			sin_1 = (struct sockaddr_in *)sa1;
4382 			sin_2 = (struct sockaddr_in *)sa2;
4383 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4384 		}
4385 #endif
4386 	default:
4387 		/* we don't do these... */
4388 		return (0);
4389 	}
4390 }
4391 
4392 void
4393 sctp_print_address(struct sockaddr *sa)
4394 {
4395 #ifdef INET6
4396 	char ip6buf[INET6_ADDRSTRLEN];
4397 #endif
4398 
4399 	switch (sa->sa_family) {
4400 #ifdef INET6
4401 	case AF_INET6:
4402 		{
4403 			struct sockaddr_in6 *sin6;
4404 
4405 			sin6 = (struct sockaddr_in6 *)sa;
4406 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4407 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4408 			    ntohs(sin6->sin6_port),
4409 			    sin6->sin6_scope_id);
4410 			break;
4411 		}
4412 #endif
4413 #ifdef INET
4414 	case AF_INET:
4415 		{
4416 			struct sockaddr_in *sin;
4417 			unsigned char *p;
4418 
4419 			sin = (struct sockaddr_in *)sa;
4420 			p = (unsigned char *)&sin->sin_addr;
4421 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4422 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4423 			break;
4424 		}
4425 #endif
4426 	default:
4427 		SCTP_PRINTF("?\n");
4428 		break;
4429 	}
4430 }
4431 
4432 void
4433 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4434     struct sctp_inpcb *new_inp,
4435     struct sctp_tcb *stcb,
4436     int waitflags)
4437 {
4438 	/*
4439 	 * go through our old INP and pull off any control structures that
4440 	 * belong to stcb and move then to the new inp.
4441 	 */
4442 	struct socket *old_so, *new_so;
4443 	struct sctp_queued_to_read *control, *nctl;
4444 	struct sctp_readhead tmp_queue;
4445 	struct mbuf *m;
4446 	int error = 0;
4447 
4448 	old_so = old_inp->sctp_socket;
4449 	new_so = new_inp->sctp_socket;
4450 	TAILQ_INIT(&tmp_queue);
4451 	error = sblock(&old_so->so_rcv, waitflags);
4452 	if (error) {
4453 		/*
4454 		 * Gak, can't get sblock, we have a problem. data will be
4455 		 * left stranded.. and we don't dare look at it since the
4456 		 * other thread may be reading something. Oh well, its a
4457 		 * screwed up app that does a peeloff OR a accept while
4458 		 * reading from the main socket... actually its only the
4459 		 * peeloff() case, since I think read will fail on a
4460 		 * listening socket..
4461 		 */
4462 		return;
4463 	}
4464 	/* lock the socket buffers */
4465 	SCTP_INP_READ_LOCK(old_inp);
4466 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4467 		/* Pull off all for out target stcb */
4468 		if (control->stcb == stcb) {
4469 			/* remove it we want it */
4470 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4471 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4472 			m = control->data;
4473 			while (m) {
4474 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4475 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4476 				}
4477 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4478 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4479 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4480 				}
4481 				m = SCTP_BUF_NEXT(m);
4482 			}
4483 		}
4484 	}
4485 	SCTP_INP_READ_UNLOCK(old_inp);
4486 	/* Remove the sb-lock on the old socket */
4487 
4488 	sbunlock(&old_so->so_rcv);
4489 	/* Now we move them over to the new socket buffer */
4490 	SCTP_INP_READ_LOCK(new_inp);
4491 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4492 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4493 		m = control->data;
4494 		while (m) {
4495 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4496 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4497 			}
4498 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4499 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4500 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4501 			}
4502 			m = SCTP_BUF_NEXT(m);
4503 		}
4504 	}
4505 	SCTP_INP_READ_UNLOCK(new_inp);
4506 }
4507 
4508 void
4509 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4510     struct sctp_tcb *stcb,
4511     int so_locked
4512 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4513     SCTP_UNUSED
4514 #endif
4515 )
4516 {
4517 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4518 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4519 		struct socket *so;
4520 
4521 		so = SCTP_INP_SO(inp);
4522 		if (!so_locked) {
4523 			if (stcb) {
4524 				atomic_add_int(&stcb->asoc.refcnt, 1);
4525 				SCTP_TCB_UNLOCK(stcb);
4526 			}
4527 			SCTP_SOCKET_LOCK(so, 1);
4528 			if (stcb) {
4529 				SCTP_TCB_LOCK(stcb);
4530 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4531 			}
4532 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4533 				SCTP_SOCKET_UNLOCK(so, 1);
4534 				return;
4535 			}
4536 		}
4537 #endif
4538 		sctp_sorwakeup(inp, inp->sctp_socket);
4539 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4540 		if (!so_locked) {
4541 			SCTP_SOCKET_UNLOCK(so, 1);
4542 		}
4543 #endif
4544 	}
4545 }
4546 
4547 void
4548 sctp_add_to_readq(struct sctp_inpcb *inp,
4549     struct sctp_tcb *stcb,
4550     struct sctp_queued_to_read *control,
4551     struct sockbuf *sb,
4552     int end,
4553     int inp_read_lock_held,
4554     int so_locked
4555 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4556     SCTP_UNUSED
4557 #endif
4558 )
4559 {
4560 	/*
4561 	 * Here we must place the control on the end of the socket read
4562 	 * queue AND increment sb_cc so that select will work properly on
4563 	 * read.
4564 	 */
4565 	struct mbuf *m, *prev = NULL;
4566 
4567 	if (inp == NULL) {
4568 		/* Gak, TSNH!! */
4569 #ifdef INVARIANTS
4570 		panic("Gak, inp NULL on add_to_readq");
4571 #endif
4572 		return;
4573 	}
4574 	if (inp_read_lock_held == 0)
4575 		SCTP_INP_READ_LOCK(inp);
4576 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4577 		if (!control->on_strm_q) {
4578 			sctp_free_remote_addr(control->whoFrom);
4579 			if (control->data) {
4580 				sctp_m_freem(control->data);
4581 				control->data = NULL;
4582 			}
4583 			sctp_free_a_readq(stcb, control);
4584 		}
4585 		if (inp_read_lock_held == 0)
4586 			SCTP_INP_READ_UNLOCK(inp);
4587 		return;
4588 	}
4589 	if (!(control->spec_flags & M_NOTIFICATION)) {
4590 		atomic_add_int(&inp->total_recvs, 1);
4591 		if (!control->do_not_ref_stcb) {
4592 			atomic_add_int(&stcb->total_recvs, 1);
4593 		}
4594 	}
4595 	m = control->data;
4596 	control->held_length = 0;
4597 	control->length = 0;
4598 	while (m) {
4599 		if (SCTP_BUF_LEN(m) == 0) {
4600 			/* Skip mbufs with NO length */
4601 			if (prev == NULL) {
4602 				/* First one */
4603 				control->data = sctp_m_free(m);
4604 				m = control->data;
4605 			} else {
4606 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4607 				m = SCTP_BUF_NEXT(prev);
4608 			}
4609 			if (m == NULL) {
4610 				control->tail_mbuf = prev;
4611 			}
4612 			continue;
4613 		}
4614 		prev = m;
4615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4616 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4617 		}
4618 		sctp_sballoc(stcb, sb, m);
4619 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4620 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4621 		}
4622 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4623 		m = SCTP_BUF_NEXT(m);
4624 	}
4625 	if (prev != NULL) {
4626 		control->tail_mbuf = prev;
4627 	} else {
4628 		/* Everything got collapsed out?? */
4629 		if (!control->on_strm_q) {
4630 			sctp_free_remote_addr(control->whoFrom);
4631 			sctp_free_a_readq(stcb, control);
4632 		}
4633 		if (inp_read_lock_held == 0)
4634 			SCTP_INP_READ_UNLOCK(inp);
4635 		return;
4636 	}
4637 	if (end) {
4638 		control->end_added = 1;
4639 	}
4640 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4641 	control->on_read_q = 1;
4642 	if (inp_read_lock_held == 0)
4643 		SCTP_INP_READ_UNLOCK(inp);
4644 	if (inp && inp->sctp_socket) {
4645 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4646 	}
4647 }
4648 
4649 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4650  *************ALTERNATE ROUTING CODE
4651  */
4652 
4653 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4654  *************ALTERNATE ROUTING CODE
4655  */
4656 
4657 struct mbuf *
4658 sctp_generate_cause(uint16_t code, char *info)
4659 {
4660 	struct mbuf *m;
4661 	struct sctp_gen_error_cause *cause;
4662 	size_t info_len;
4663 	uint16_t len;
4664 
4665 	if ((code == 0) || (info == NULL)) {
4666 		return (NULL);
4667 	}
4668 	info_len = strlen(info);
4669 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4670 		return (NULL);
4671 	}
4672 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4673 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4674 	if (m != NULL) {
4675 		SCTP_BUF_LEN(m) = len;
4676 		cause = mtod(m, struct sctp_gen_error_cause *);
4677 		cause->code = htons(code);
4678 		cause->length = htons(len);
4679 		memcpy(cause->info, info, info_len);
4680 	}
4681 	return (m);
4682 }
4683 
4684 struct mbuf *
4685 sctp_generate_no_user_data_cause(uint32_t tsn)
4686 {
4687 	struct mbuf *m;
4688 	struct sctp_error_no_user_data *no_user_data_cause;
4689 	uint16_t len;
4690 
4691 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4692 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4693 	if (m != NULL) {
4694 		SCTP_BUF_LEN(m) = len;
4695 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4696 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4697 		no_user_data_cause->cause.length = htons(len);
4698 		no_user_data_cause->tsn = htonl(tsn);
4699 	}
4700 	return (m);
4701 }
4702 
4703 #ifdef SCTP_MBCNT_LOGGING
4704 void
4705 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4706     struct sctp_tmit_chunk *tp1, int chk_cnt)
4707 {
4708 	if (tp1->data == NULL) {
4709 		return;
4710 	}
4711 	asoc->chunks_on_out_queue -= chk_cnt;
4712 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4713 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4714 		    asoc->total_output_queue_size,
4715 		    tp1->book_size,
4716 		    0,
4717 		    tp1->mbcnt);
4718 	}
4719 	if (asoc->total_output_queue_size >= tp1->book_size) {
4720 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4721 	} else {
4722 		asoc->total_output_queue_size = 0;
4723 	}
4724 
4725 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4726 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4727 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4728 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4729 		} else {
4730 			stcb->sctp_socket->so_snd.sb_cc = 0;
4731 
4732 		}
4733 	}
4734 }
4735 
4736 #endif
4737 
4738 int
4739 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4740     uint8_t sent, int so_locked
4741 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4742     SCTP_UNUSED
4743 #endif
4744 )
4745 {
4746 	struct sctp_stream_out *strq;
4747 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4748 	struct sctp_stream_queue_pending *sp;
4749 	uint32_t mid;
4750 	uint16_t sid;
4751 	uint8_t foundeom = 0;
4752 	int ret_sz = 0;
4753 	int notdone;
4754 	int do_wakeup_routine = 0;
4755 
4756 	sid = tp1->rec.data.sid;
4757 	mid = tp1->rec.data.mid;
4758 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4759 		stcb->asoc.abandoned_sent[0]++;
4760 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4761 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4762 #if defined(SCTP_DETAILED_STR_STATS)
4763 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4764 #endif
4765 	} else {
4766 		stcb->asoc.abandoned_unsent[0]++;
4767 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4768 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4769 #if defined(SCTP_DETAILED_STR_STATS)
4770 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4771 #endif
4772 	}
4773 	do {
4774 		ret_sz += tp1->book_size;
4775 		if (tp1->data != NULL) {
4776 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4777 				sctp_flight_size_decrease(tp1);
4778 				sctp_total_flight_decrease(stcb, tp1);
4779 			}
4780 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4781 			stcb->asoc.peers_rwnd += tp1->send_size;
4782 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4783 			if (sent) {
4784 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4785 			} else {
4786 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4787 			}
4788 			if (tp1->data) {
4789 				sctp_m_freem(tp1->data);
4790 				tp1->data = NULL;
4791 			}
4792 			do_wakeup_routine = 1;
4793 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4794 				stcb->asoc.sent_queue_cnt_removeable--;
4795 			}
4796 		}
4797 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4798 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4799 		    SCTP_DATA_NOT_FRAG) {
4800 			/* not frag'ed we ae done   */
4801 			notdone = 0;
4802 			foundeom = 1;
4803 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4804 			/* end of frag, we are done */
4805 			notdone = 0;
4806 			foundeom = 1;
4807 		} else {
4808 			/*
4809 			 * Its a begin or middle piece, we must mark all of
4810 			 * it
4811 			 */
4812 			notdone = 1;
4813 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4814 		}
4815 	} while (tp1 && notdone);
4816 	if (foundeom == 0) {
4817 		/*
4818 		 * The multi-part message was scattered across the send and
4819 		 * sent queue.
4820 		 */
4821 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4822 			if ((tp1->rec.data.sid != sid) ||
4823 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4824 				break;
4825 			}
4826 			/*
4827 			 * save to chk in case we have some on stream out
4828 			 * queue. If so and we have an un-transmitted one we
4829 			 * don't have to fudge the TSN.
4830 			 */
4831 			chk = tp1;
4832 			ret_sz += tp1->book_size;
4833 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4834 			if (sent) {
4835 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4836 			} else {
4837 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4838 			}
4839 			if (tp1->data) {
4840 				sctp_m_freem(tp1->data);
4841 				tp1->data = NULL;
4842 			}
4843 			/* No flight involved here book the size to 0 */
4844 			tp1->book_size = 0;
4845 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4846 				foundeom = 1;
4847 			}
4848 			do_wakeup_routine = 1;
4849 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4850 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4851 			/*
4852 			 * on to the sent queue so we can wait for it to be
4853 			 * passed by.
4854 			 */
4855 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4856 			    sctp_next);
4857 			stcb->asoc.send_queue_cnt--;
4858 			stcb->asoc.sent_queue_cnt++;
4859 		}
4860 	}
4861 	if (foundeom == 0) {
4862 		/*
4863 		 * Still no eom found. That means there is stuff left on the
4864 		 * stream out queue.. yuck.
4865 		 */
4866 		SCTP_TCB_SEND_LOCK(stcb);
4867 		strq = &stcb->asoc.strmout[sid];
4868 		sp = TAILQ_FIRST(&strq->outqueue);
4869 		if (sp != NULL) {
4870 			sp->discard_rest = 1;
4871 			/*
4872 			 * We may need to put a chunk on the queue that
4873 			 * holds the TSN that would have been sent with the
4874 			 * LAST bit.
4875 			 */
4876 			if (chk == NULL) {
4877 				/* Yep, we have to */
4878 				sctp_alloc_a_chunk(stcb, chk);
4879 				if (chk == NULL) {
4880 					/*
4881 					 * we are hosed. All we can do is
4882 					 * nothing.. which will cause an
4883 					 * abort if the peer is paying
4884 					 * attention.
4885 					 */
4886 					goto oh_well;
4887 				}
4888 				memset(chk, 0, sizeof(*chk));
4889 				chk->rec.data.rcv_flags = 0;
4890 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4891 				chk->asoc = &stcb->asoc;
4892 				if (stcb->asoc.idata_supported == 0) {
4893 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4894 						chk->rec.data.mid = 0;
4895 					} else {
4896 						chk->rec.data.mid = strq->next_mid_ordered;
4897 					}
4898 				} else {
4899 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4900 						chk->rec.data.mid = strq->next_mid_unordered;
4901 					} else {
4902 						chk->rec.data.mid = strq->next_mid_ordered;
4903 					}
4904 				}
4905 				chk->rec.data.sid = sp->sid;
4906 				chk->rec.data.ppid = sp->ppid;
4907 				chk->rec.data.context = sp->context;
4908 				chk->flags = sp->act_flags;
4909 				chk->whoTo = NULL;
4910 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4911 				strq->chunks_on_queues++;
4912 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4913 				stcb->asoc.sent_queue_cnt++;
4914 				stcb->asoc.pr_sctp_cnt++;
4915 			}
4916 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4917 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4918 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4919 			}
4920 			if (stcb->asoc.idata_supported == 0) {
4921 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4922 					strq->next_mid_ordered++;
4923 				}
4924 			} else {
4925 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4926 					strq->next_mid_unordered++;
4927 				} else {
4928 					strq->next_mid_ordered++;
4929 				}
4930 			}
4931 	oh_well:
4932 			if (sp->data) {
4933 				/*
4934 				 * Pull any data to free up the SB and allow
4935 				 * sender to "add more" while we will throw
4936 				 * away :-)
4937 				 */
4938 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4939 				ret_sz += sp->length;
4940 				do_wakeup_routine = 1;
4941 				sp->some_taken = 1;
4942 				sctp_m_freem(sp->data);
4943 				sp->data = NULL;
4944 				sp->tail_mbuf = NULL;
4945 				sp->length = 0;
4946 			}
4947 		}
4948 		SCTP_TCB_SEND_UNLOCK(stcb);
4949 	}
4950 	if (do_wakeup_routine) {
4951 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4952 		struct socket *so;
4953 
4954 		so = SCTP_INP_SO(stcb->sctp_ep);
4955 		if (!so_locked) {
4956 			atomic_add_int(&stcb->asoc.refcnt, 1);
4957 			SCTP_TCB_UNLOCK(stcb);
4958 			SCTP_SOCKET_LOCK(so, 1);
4959 			SCTP_TCB_LOCK(stcb);
4960 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4961 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4962 				/* assoc was freed while we were unlocked */
4963 				SCTP_SOCKET_UNLOCK(so, 1);
4964 				return (ret_sz);
4965 			}
4966 		}
4967 #endif
4968 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4969 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4970 		if (!so_locked) {
4971 			SCTP_SOCKET_UNLOCK(so, 1);
4972 		}
4973 #endif
4974 	}
4975 	return (ret_sz);
4976 }
4977 
4978 /*
4979  * checks to see if the given address, sa, is one that is currently known by
4980  * the kernel note: can't distinguish the same address on multiple interfaces
4981  * and doesn't handle multiple addresses with different zone/scope id's note:
4982  * ifa_ifwithaddr() compares the entire sockaddr struct
4983  */
4984 struct sctp_ifa *
4985 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4986     int holds_lock)
4987 {
4988 	struct sctp_laddr *laddr;
4989 
4990 	if (holds_lock == 0) {
4991 		SCTP_INP_RLOCK(inp);
4992 	}
4993 
4994 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4995 		if (laddr->ifa == NULL)
4996 			continue;
4997 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4998 			continue;
4999 #ifdef INET
5000 		if (addr->sa_family == AF_INET) {
5001 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5002 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5003 				/* found him. */
5004 				if (holds_lock == 0) {
5005 					SCTP_INP_RUNLOCK(inp);
5006 				}
5007 				return (laddr->ifa);
5008 				break;
5009 			}
5010 		}
5011 #endif
5012 #ifdef INET6
5013 		if (addr->sa_family == AF_INET6) {
5014 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5015 			    &laddr->ifa->address.sin6)) {
5016 				/* found him. */
5017 				if (holds_lock == 0) {
5018 					SCTP_INP_RUNLOCK(inp);
5019 				}
5020 				return (laddr->ifa);
5021 				break;
5022 			}
5023 		}
5024 #endif
5025 	}
5026 	if (holds_lock == 0) {
5027 		SCTP_INP_RUNLOCK(inp);
5028 	}
5029 	return (NULL);
5030 }
5031 
5032 uint32_t
5033 sctp_get_ifa_hash_val(struct sockaddr *addr)
5034 {
5035 	switch (addr->sa_family) {
5036 #ifdef INET
5037 	case AF_INET:
5038 		{
5039 			struct sockaddr_in *sin;
5040 
5041 			sin = (struct sockaddr_in *)addr;
5042 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5043 		}
5044 #endif
5045 #ifdef INET6
5046 	case AF_INET6:
5047 		{
5048 			struct sockaddr_in6 *sin6;
5049 			uint32_t hash_of_addr;
5050 
5051 			sin6 = (struct sockaddr_in6 *)addr;
5052 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5053 			    sin6->sin6_addr.s6_addr32[1] +
5054 			    sin6->sin6_addr.s6_addr32[2] +
5055 			    sin6->sin6_addr.s6_addr32[3]);
5056 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5057 			return (hash_of_addr);
5058 		}
5059 #endif
5060 	default:
5061 		break;
5062 	}
5063 	return (0);
5064 }
5065 
5066 struct sctp_ifa *
5067 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5068 {
5069 	struct sctp_ifa *sctp_ifap;
5070 	struct sctp_vrf *vrf;
5071 	struct sctp_ifalist *hash_head;
5072 	uint32_t hash_of_addr;
5073 
5074 	if (holds_lock == 0)
5075 		SCTP_IPI_ADDR_RLOCK();
5076 
5077 	vrf = sctp_find_vrf(vrf_id);
5078 	if (vrf == NULL) {
5079 		if (holds_lock == 0)
5080 			SCTP_IPI_ADDR_RUNLOCK();
5081 		return (NULL);
5082 	}
5083 
5084 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5085 
5086 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5087 	if (hash_head == NULL) {
5088 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5089 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5090 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5091 		sctp_print_address(addr);
5092 		SCTP_PRINTF("No such bucket for address\n");
5093 		if (holds_lock == 0)
5094 			SCTP_IPI_ADDR_RUNLOCK();
5095 
5096 		return (NULL);
5097 	}
5098 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5099 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5100 			continue;
5101 #ifdef INET
5102 		if (addr->sa_family == AF_INET) {
5103 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5104 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5105 				/* found him. */
5106 				if (holds_lock == 0)
5107 					SCTP_IPI_ADDR_RUNLOCK();
5108 				return (sctp_ifap);
5109 				break;
5110 			}
5111 		}
5112 #endif
5113 #ifdef INET6
5114 		if (addr->sa_family == AF_INET6) {
5115 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5116 			    &sctp_ifap->address.sin6)) {
5117 				/* found him. */
5118 				if (holds_lock == 0)
5119 					SCTP_IPI_ADDR_RUNLOCK();
5120 				return (sctp_ifap);
5121 				break;
5122 			}
5123 		}
5124 #endif
5125 	}
5126 	if (holds_lock == 0)
5127 		SCTP_IPI_ADDR_RUNLOCK();
5128 	return (NULL);
5129 }
5130 
5131 static void
5132 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5133     uint32_t rwnd_req)
5134 {
5135 	/* User pulled some data, do we need a rwnd update? */
5136 	int r_unlocked = 0;
5137 	uint32_t dif, rwnd;
5138 	struct socket *so = NULL;
5139 
5140 	if (stcb == NULL)
5141 		return;
5142 
5143 	atomic_add_int(&stcb->asoc.refcnt, 1);
5144 
5145 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5146 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5147 		/* Pre-check If we are freeing no update */
5148 		goto no_lock;
5149 	}
5150 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5151 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5152 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5153 		goto out;
5154 	}
5155 	so = stcb->sctp_socket;
5156 	if (so == NULL) {
5157 		goto out;
5158 	}
5159 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5160 	/* Have you have freed enough to look */
5161 	*freed_so_far = 0;
5162 	/* Yep, its worth a look and the lock overhead */
5163 
5164 	/* Figure out what the rwnd would be */
5165 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5166 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5167 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5168 	} else {
5169 		dif = 0;
5170 	}
5171 	if (dif >= rwnd_req) {
5172 		if (hold_rlock) {
5173 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5174 			r_unlocked = 1;
5175 		}
5176 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5177 			/*
5178 			 * One last check before we allow the guy possibly
5179 			 * to get in. There is a race, where the guy has not
5180 			 * reached the gate. In that case
5181 			 */
5182 			goto out;
5183 		}
5184 		SCTP_TCB_LOCK(stcb);
5185 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5186 			/* No reports here */
5187 			SCTP_TCB_UNLOCK(stcb);
5188 			goto out;
5189 		}
5190 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5191 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5192 
5193 		sctp_chunk_output(stcb->sctp_ep, stcb,
5194 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5195 		/* make sure no timer is running */
5196 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5197 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5198 		SCTP_TCB_UNLOCK(stcb);
5199 	} else {
5200 		/* Update how much we have pending */
5201 		stcb->freed_by_sorcv_sincelast = dif;
5202 	}
5203 out:
5204 	if (so && r_unlocked && hold_rlock) {
5205 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5206 	}
5207 
5208 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5209 no_lock:
5210 	atomic_add_int(&stcb->asoc.refcnt, -1);
5211 	return;
5212 }
5213 
5214 int
5215 sctp_sorecvmsg(struct socket *so,
5216     struct uio *uio,
5217     struct mbuf **mp,
5218     struct sockaddr *from,
5219     int fromlen,
5220     int *msg_flags,
5221     struct sctp_sndrcvinfo *sinfo,
5222     int filling_sinfo)
5223 {
5224 	/*
5225 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5226 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5227 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5228 	 * On the way out we may send out any combination of:
5229 	 * MSG_NOTIFICATION MSG_EOR
5230 	 *
5231 	 */
5232 	struct sctp_inpcb *inp = NULL;
5233 	ssize_t my_len = 0;
5234 	ssize_t cp_len = 0;
5235 	int error = 0;
5236 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5237 	struct mbuf *m = NULL;
5238 	struct sctp_tcb *stcb = NULL;
5239 	int wakeup_read_socket = 0;
5240 	int freecnt_applied = 0;
5241 	int out_flags = 0, in_flags = 0;
5242 	int block_allowed = 1;
5243 	uint32_t freed_so_far = 0;
5244 	ssize_t copied_so_far = 0;
5245 	int in_eeor_mode = 0;
5246 	int no_rcv_needed = 0;
5247 	uint32_t rwnd_req = 0;
5248 	int hold_sblock = 0;
5249 	int hold_rlock = 0;
5250 	ssize_t slen = 0;
5251 	uint32_t held_length = 0;
5252 	int sockbuf_lock = 0;
5253 
5254 	if (uio == NULL) {
5255 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5256 		return (EINVAL);
5257 	}
5258 
5259 	if (msg_flags) {
5260 		in_flags = *msg_flags;
5261 		if (in_flags & MSG_PEEK)
5262 			SCTP_STAT_INCR(sctps_read_peeks);
5263 	} else {
5264 		in_flags = 0;
5265 	}
5266 	slen = uio->uio_resid;
5267 
5268 	/* Pull in and set up our int flags */
5269 	if (in_flags & MSG_OOB) {
5270 		/* Out of band's NOT supported */
5271 		return (EOPNOTSUPP);
5272 	}
5273 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5274 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5275 		return (EINVAL);
5276 	}
5277 	if ((in_flags & (MSG_DONTWAIT
5278 	    | MSG_NBIO
5279 	    )) ||
5280 	    SCTP_SO_IS_NBIO(so)) {
5281 		block_allowed = 0;
5282 	}
5283 	/* setup the endpoint */
5284 	inp = (struct sctp_inpcb *)so->so_pcb;
5285 	if (inp == NULL) {
5286 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5287 		return (EFAULT);
5288 	}
5289 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5290 	/* Must be at least a MTU's worth */
5291 	if (rwnd_req < SCTP_MIN_RWND)
5292 		rwnd_req = SCTP_MIN_RWND;
5293 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5294 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5295 		sctp_misc_ints(SCTP_SORECV_ENTER,
5296 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5297 	}
5298 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5299 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5300 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5301 	}
5302 
5303 
5304 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5305 	if (error) {
5306 		goto release_unlocked;
5307 	}
5308 	sockbuf_lock = 1;
5309 restart:
5310 
5311 
5312 restart_nosblocks:
5313 	if (hold_sblock == 0) {
5314 		SOCKBUF_LOCK(&so->so_rcv);
5315 		hold_sblock = 1;
5316 	}
5317 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5318 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5319 		goto out;
5320 	}
5321 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5322 		if (so->so_error) {
5323 			error = so->so_error;
5324 			if ((in_flags & MSG_PEEK) == 0)
5325 				so->so_error = 0;
5326 			goto out;
5327 		} else {
5328 			if (so->so_rcv.sb_cc == 0) {
5329 				/* indicate EOF */
5330 				error = 0;
5331 				goto out;
5332 			}
5333 		}
5334 	}
5335 	if (so->so_rcv.sb_cc <= held_length) {
5336 		if (so->so_error) {
5337 			error = so->so_error;
5338 			if ((in_flags & MSG_PEEK) == 0) {
5339 				so->so_error = 0;
5340 			}
5341 			goto out;
5342 		}
5343 		if ((so->so_rcv.sb_cc == 0) &&
5344 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5345 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5346 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5347 				/*
5348 				 * For active open side clear flags for
5349 				 * re-use passive open is blocked by
5350 				 * connect.
5351 				 */
5352 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5353 					/*
5354 					 * You were aborted, passive side
5355 					 * always hits here
5356 					 */
5357 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5358 					error = ECONNRESET;
5359 				}
5360 				so->so_state &= ~(SS_ISCONNECTING |
5361 				    SS_ISDISCONNECTING |
5362 				    SS_ISCONFIRMING |
5363 				    SS_ISCONNECTED);
5364 				if (error == 0) {
5365 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5366 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5367 						error = ENOTCONN;
5368 					}
5369 				}
5370 				goto out;
5371 			}
5372 		}
5373 		if (block_allowed) {
5374 			error = sbwait(&so->so_rcv);
5375 			if (error) {
5376 				goto out;
5377 			}
5378 			held_length = 0;
5379 			goto restart_nosblocks;
5380 		} else {
5381 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5382 			error = EWOULDBLOCK;
5383 			goto out;
5384 		}
5385 	}
5386 	if (hold_sblock == 1) {
5387 		SOCKBUF_UNLOCK(&so->so_rcv);
5388 		hold_sblock = 0;
5389 	}
5390 	/* we possibly have data we can read */
5391 	/* sa_ignore FREED_MEMORY */
5392 	control = TAILQ_FIRST(&inp->read_queue);
5393 	if (control == NULL) {
5394 		/*
5395 		 * This could be happening since the appender did the
5396 		 * increment but as not yet did the tailq insert onto the
5397 		 * read_queue
5398 		 */
5399 		if (hold_rlock == 0) {
5400 			SCTP_INP_READ_LOCK(inp);
5401 		}
5402 		control = TAILQ_FIRST(&inp->read_queue);
5403 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5404 #ifdef INVARIANTS
5405 			panic("Huh, its non zero and nothing on control?");
5406 #endif
5407 			so->so_rcv.sb_cc = 0;
5408 		}
5409 		SCTP_INP_READ_UNLOCK(inp);
5410 		hold_rlock = 0;
5411 		goto restart;
5412 	}
5413 
5414 	if ((control->length == 0) &&
5415 	    (control->do_not_ref_stcb)) {
5416 		/*
5417 		 * Clean up code for freeing assoc that left behind a
5418 		 * pdapi.. maybe a peer in EEOR that just closed after
5419 		 * sending and never indicated a EOR.
5420 		 */
5421 		if (hold_rlock == 0) {
5422 			hold_rlock = 1;
5423 			SCTP_INP_READ_LOCK(inp);
5424 		}
5425 		control->held_length = 0;
5426 		if (control->data) {
5427 			/* Hmm there is data here .. fix */
5428 			struct mbuf *m_tmp;
5429 			int cnt = 0;
5430 
5431 			m_tmp = control->data;
5432 			while (m_tmp) {
5433 				cnt += SCTP_BUF_LEN(m_tmp);
5434 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5435 					control->tail_mbuf = m_tmp;
5436 					control->end_added = 1;
5437 				}
5438 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5439 			}
5440 			control->length = cnt;
5441 		} else {
5442 			/* remove it */
5443 			TAILQ_REMOVE(&inp->read_queue, control, next);
5444 			/* Add back any hiddend data */
5445 			sctp_free_remote_addr(control->whoFrom);
5446 			sctp_free_a_readq(stcb, control);
5447 		}
5448 		if (hold_rlock) {
5449 			hold_rlock = 0;
5450 			SCTP_INP_READ_UNLOCK(inp);
5451 		}
5452 		goto restart;
5453 	}
5454 	if ((control->length == 0) &&
5455 	    (control->end_added == 1)) {
5456 		/*
5457 		 * Do we also need to check for (control->pdapi_aborted ==
5458 		 * 1)?
5459 		 */
5460 		if (hold_rlock == 0) {
5461 			hold_rlock = 1;
5462 			SCTP_INP_READ_LOCK(inp);
5463 		}
5464 		TAILQ_REMOVE(&inp->read_queue, control, next);
5465 		if (control->data) {
5466 #ifdef INVARIANTS
5467 			panic("control->data not null but control->length == 0");
5468 #else
5469 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5470 			sctp_m_freem(control->data);
5471 			control->data = NULL;
5472 #endif
5473 		}
5474 		if (control->aux_data) {
5475 			sctp_m_free(control->aux_data);
5476 			control->aux_data = NULL;
5477 		}
5478 #ifdef INVARIANTS
5479 		if (control->on_strm_q) {
5480 			panic("About to free ctl:%p so:%p and its in %d",
5481 			    control, so, control->on_strm_q);
5482 		}
5483 #endif
5484 		sctp_free_remote_addr(control->whoFrom);
5485 		sctp_free_a_readq(stcb, control);
5486 		if (hold_rlock) {
5487 			hold_rlock = 0;
5488 			SCTP_INP_READ_UNLOCK(inp);
5489 		}
5490 		goto restart;
5491 	}
5492 	if (control->length == 0) {
5493 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5494 		    (filling_sinfo)) {
5495 			/* find a more suitable one then this */
5496 			ctl = TAILQ_NEXT(control, next);
5497 			while (ctl) {
5498 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5499 				    (ctl->some_taken ||
5500 				    (ctl->spec_flags & M_NOTIFICATION) ||
5501 				    ((ctl->do_not_ref_stcb == 0) &&
5502 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5503 				    ) {
5504 					/*-
5505 					 * If we have a different TCB next, and there is data
5506 					 * present. If we have already taken some (pdapi), OR we can
5507 					 * ref the tcb and no delivery as started on this stream, we
5508 					 * take it. Note we allow a notification on a different
5509 					 * assoc to be delivered..
5510 					 */
5511 					control = ctl;
5512 					goto found_one;
5513 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5514 					    (ctl->length) &&
5515 					    ((ctl->some_taken) ||
5516 					    ((ctl->do_not_ref_stcb == 0) &&
5517 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5518 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5519 					/*-
5520 					 * If we have the same tcb, and there is data present, and we
5521 					 * have the strm interleave feature present. Then if we have
5522 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5523 					 * not started a delivery for this stream, we can take it.
5524 					 * Note we do NOT allow a notificaiton on the same assoc to
5525 					 * be delivered.
5526 					 */
5527 					control = ctl;
5528 					goto found_one;
5529 				}
5530 				ctl = TAILQ_NEXT(ctl, next);
5531 			}
5532 		}
5533 		/*
5534 		 * if we reach here, not suitable replacement is available
5535 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5536 		 * into the our held count, and its time to sleep again.
5537 		 */
5538 		held_length = so->so_rcv.sb_cc;
5539 		control->held_length = so->so_rcv.sb_cc;
5540 		goto restart;
5541 	}
5542 	/* Clear the held length since there is something to read */
5543 	control->held_length = 0;
5544 found_one:
5545 	/*
5546 	 * If we reach here, control has a some data for us to read off.
5547 	 * Note that stcb COULD be NULL.
5548 	 */
5549 	if (hold_rlock == 0) {
5550 		hold_rlock = 1;
5551 		SCTP_INP_READ_LOCK(inp);
5552 	}
5553 	control->some_taken++;
5554 	stcb = control->stcb;
5555 	if (stcb) {
5556 		if ((control->do_not_ref_stcb == 0) &&
5557 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5558 			if (freecnt_applied == 0)
5559 				stcb = NULL;
5560 		} else if (control->do_not_ref_stcb == 0) {
5561 			/* you can't free it on me please */
5562 			/*
5563 			 * The lock on the socket buffer protects us so the
5564 			 * free code will stop. But since we used the
5565 			 * socketbuf lock and the sender uses the tcb_lock
5566 			 * to increment, we need to use the atomic add to
5567 			 * the refcnt
5568 			 */
5569 			if (freecnt_applied) {
5570 #ifdef INVARIANTS
5571 				panic("refcnt already incremented");
5572 #else
5573 				SCTP_PRINTF("refcnt already incremented?\n");
5574 #endif
5575 			} else {
5576 				atomic_add_int(&stcb->asoc.refcnt, 1);
5577 				freecnt_applied = 1;
5578 			}
5579 			/*
5580 			 * Setup to remember how much we have not yet told
5581 			 * the peer our rwnd has opened up. Note we grab the
5582 			 * value from the tcb from last time. Note too that
5583 			 * sack sending clears this when a sack is sent,
5584 			 * which is fine. Once we hit the rwnd_req, we then
5585 			 * will go to the sctp_user_rcvd() that will not
5586 			 * lock until it KNOWs it MUST send a WUP-SACK.
5587 			 */
5588 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5589 			stcb->freed_by_sorcv_sincelast = 0;
5590 		}
5591 	}
5592 	if (stcb &&
5593 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5594 	    control->do_not_ref_stcb == 0) {
5595 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5596 	}
5597 
5598 	/* First lets get off the sinfo and sockaddr info */
5599 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5600 		sinfo->sinfo_stream = control->sinfo_stream;
5601 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5602 		sinfo->sinfo_flags = control->sinfo_flags;
5603 		sinfo->sinfo_ppid = control->sinfo_ppid;
5604 		sinfo->sinfo_context = control->sinfo_context;
5605 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5606 		sinfo->sinfo_tsn = control->sinfo_tsn;
5607 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5608 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5609 		nxt = TAILQ_NEXT(control, next);
5610 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5611 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5612 			struct sctp_extrcvinfo *s_extra;
5613 
5614 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5615 			if ((nxt) &&
5616 			    (nxt->length)) {
5617 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5618 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5619 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5620 				}
5621 				if (nxt->spec_flags & M_NOTIFICATION) {
5622 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5623 				}
5624 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5625 				s_extra->serinfo_next_length = nxt->length;
5626 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5627 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5628 				if (nxt->tail_mbuf != NULL) {
5629 					if (nxt->end_added) {
5630 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5631 					}
5632 				}
5633 			} else {
5634 				/*
5635 				 * we explicitly 0 this, since the memcpy
5636 				 * got some other things beyond the older
5637 				 * sinfo_ that is on the control's structure
5638 				 * :-D
5639 				 */
5640 				nxt = NULL;
5641 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5642 				s_extra->serinfo_next_aid = 0;
5643 				s_extra->serinfo_next_length = 0;
5644 				s_extra->serinfo_next_ppid = 0;
5645 				s_extra->serinfo_next_stream = 0;
5646 			}
5647 		}
5648 		/*
5649 		 * update off the real current cum-ack, if we have an stcb.
5650 		 */
5651 		if ((control->do_not_ref_stcb == 0) && stcb)
5652 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5653 		/*
5654 		 * mask off the high bits, we keep the actual chunk bits in
5655 		 * there.
5656 		 */
5657 		sinfo->sinfo_flags &= 0x00ff;
5658 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5659 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5660 		}
5661 	}
5662 #ifdef SCTP_ASOCLOG_OF_TSNS
5663 	{
5664 		int index, newindex;
5665 		struct sctp_pcbtsn_rlog *entry;
5666 
5667 		do {
5668 			index = inp->readlog_index;
5669 			newindex = index + 1;
5670 			if (newindex >= SCTP_READ_LOG_SIZE) {
5671 				newindex = 0;
5672 			}
5673 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5674 		entry = &inp->readlog[index];
5675 		entry->vtag = control->sinfo_assoc_id;
5676 		entry->strm = control->sinfo_stream;
5677 		entry->seq = (uint16_t)control->mid;
5678 		entry->sz = control->length;
5679 		entry->flgs = control->sinfo_flags;
5680 	}
5681 #endif
5682 	if ((fromlen > 0) && (from != NULL)) {
5683 		union sctp_sockstore store;
5684 		size_t len;
5685 
5686 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5687 #ifdef INET6
5688 		case AF_INET6:
5689 			len = sizeof(struct sockaddr_in6);
5690 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5691 			store.sin6.sin6_port = control->port_from;
5692 			break;
5693 #endif
5694 #ifdef INET
5695 		case AF_INET:
5696 #ifdef INET6
5697 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5698 				len = sizeof(struct sockaddr_in6);
5699 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5700 				    &store.sin6);
5701 				store.sin6.sin6_port = control->port_from;
5702 			} else {
5703 				len = sizeof(struct sockaddr_in);
5704 				store.sin = control->whoFrom->ro._l_addr.sin;
5705 				store.sin.sin_port = control->port_from;
5706 			}
5707 #else
5708 			len = sizeof(struct sockaddr_in);
5709 			store.sin = control->whoFrom->ro._l_addr.sin;
5710 			store.sin.sin_port = control->port_from;
5711 #endif
5712 			break;
5713 #endif
5714 		default:
5715 			len = 0;
5716 			break;
5717 		}
5718 		memcpy(from, &store, min((size_t)fromlen, len));
5719 #ifdef INET6
5720 		{
5721 			struct sockaddr_in6 lsa6, *from6;
5722 
5723 			from6 = (struct sockaddr_in6 *)from;
5724 			sctp_recover_scope_mac(from6, (&lsa6));
5725 		}
5726 #endif
5727 	}
5728 	if (hold_rlock) {
5729 		SCTP_INP_READ_UNLOCK(inp);
5730 		hold_rlock = 0;
5731 	}
5732 	if (hold_sblock) {
5733 		SOCKBUF_UNLOCK(&so->so_rcv);
5734 		hold_sblock = 0;
5735 	}
5736 	/* now copy out what data we can */
5737 	if (mp == NULL) {
5738 		/* copy out each mbuf in the chain up to length */
5739 get_more_data:
5740 		m = control->data;
5741 		while (m) {
5742 			/* Move out all we can */
5743 			cp_len = uio->uio_resid;
5744 			my_len = SCTP_BUF_LEN(m);
5745 			if (cp_len > my_len) {
5746 				/* not enough in this buf */
5747 				cp_len = my_len;
5748 			}
5749 			if (hold_rlock) {
5750 				SCTP_INP_READ_UNLOCK(inp);
5751 				hold_rlock = 0;
5752 			}
5753 			if (cp_len > 0)
5754 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
5755 			/* re-read */
5756 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5757 				goto release;
5758 			}
5759 
5760 			if ((control->do_not_ref_stcb == 0) && stcb &&
5761 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5762 				no_rcv_needed = 1;
5763 			}
5764 			if (error) {
5765 				/* error we are out of here */
5766 				goto release;
5767 			}
5768 			SCTP_INP_READ_LOCK(inp);
5769 			hold_rlock = 1;
5770 			if (cp_len == SCTP_BUF_LEN(m)) {
5771 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5772 				    (control->end_added)) {
5773 					out_flags |= MSG_EOR;
5774 					if ((control->do_not_ref_stcb == 0) &&
5775 					    (control->stcb != NULL) &&
5776 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5777 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5778 				}
5779 				if (control->spec_flags & M_NOTIFICATION) {
5780 					out_flags |= MSG_NOTIFICATION;
5781 				}
5782 				/* we ate up the mbuf */
5783 				if (in_flags & MSG_PEEK) {
5784 					/* just looking */
5785 					m = SCTP_BUF_NEXT(m);
5786 					copied_so_far += cp_len;
5787 				} else {
5788 					/* dispose of the mbuf */
5789 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5790 						sctp_sblog(&so->so_rcv,
5791 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5792 					}
5793 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5794 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5795 						sctp_sblog(&so->so_rcv,
5796 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5797 					}
5798 					copied_so_far += cp_len;
5799 					freed_so_far += (uint32_t)cp_len;
5800 					freed_so_far += MSIZE;
5801 					atomic_subtract_int(&control->length, cp_len);
5802 					control->data = sctp_m_free(m);
5803 					m = control->data;
5804 					/*
5805 					 * been through it all, must hold sb
5806 					 * lock ok to null tail
5807 					 */
5808 					if (control->data == NULL) {
5809 #ifdef INVARIANTS
5810 						if ((control->end_added == 0) ||
5811 						    (TAILQ_NEXT(control, next) == NULL)) {
5812 							/*
5813 							 * If the end is not
5814 							 * added, OR the
5815 							 * next is NOT null
5816 							 * we MUST have the
5817 							 * lock.
5818 							 */
5819 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5820 								panic("Hmm we don't own the lock?");
5821 							}
5822 						}
5823 #endif
5824 						control->tail_mbuf = NULL;
5825 #ifdef INVARIANTS
5826 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5827 							panic("end_added, nothing left and no MSG_EOR");
5828 						}
5829 #endif
5830 					}
5831 				}
5832 			} else {
5833 				/* Do we need to trim the mbuf? */
5834 				if (control->spec_flags & M_NOTIFICATION) {
5835 					out_flags |= MSG_NOTIFICATION;
5836 				}
5837 				if ((in_flags & MSG_PEEK) == 0) {
5838 					SCTP_BUF_RESV_UF(m, cp_len);
5839 					SCTP_BUF_LEN(m) -= (int)cp_len;
5840 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5841 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5842 					}
5843 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5844 					if ((control->do_not_ref_stcb == 0) &&
5845 					    stcb) {
5846 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5847 					}
5848 					copied_so_far += cp_len;
5849 					freed_so_far += (uint32_t)cp_len;
5850 					freed_so_far += MSIZE;
5851 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5852 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5853 						    SCTP_LOG_SBRESULT, 0);
5854 					}
5855 					atomic_subtract_int(&control->length, cp_len);
5856 				} else {
5857 					copied_so_far += cp_len;
5858 				}
5859 			}
5860 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5861 				break;
5862 			}
5863 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5864 			    (control->do_not_ref_stcb == 0) &&
5865 			    (freed_so_far >= rwnd_req)) {
5866 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5867 			}
5868 		}		/* end while(m) */
5869 		/*
5870 		 * At this point we have looked at it all and we either have
5871 		 * a MSG_EOR/or read all the user wants... <OR>
5872 		 * control->length == 0.
5873 		 */
5874 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5875 			/* we are done with this control */
5876 			if (control->length == 0) {
5877 				if (control->data) {
5878 #ifdef INVARIANTS
5879 					panic("control->data not null at read eor?");
5880 #else
5881 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5882 					sctp_m_freem(control->data);
5883 					control->data = NULL;
5884 #endif
5885 				}
5886 		done_with_control:
5887 				if (hold_rlock == 0) {
5888 					SCTP_INP_READ_LOCK(inp);
5889 					hold_rlock = 1;
5890 				}
5891 				TAILQ_REMOVE(&inp->read_queue, control, next);
5892 				/* Add back any hiddend data */
5893 				if (control->held_length) {
5894 					held_length = 0;
5895 					control->held_length = 0;
5896 					wakeup_read_socket = 1;
5897 				}
5898 				if (control->aux_data) {
5899 					sctp_m_free(control->aux_data);
5900 					control->aux_data = NULL;
5901 				}
5902 				no_rcv_needed = control->do_not_ref_stcb;
5903 				sctp_free_remote_addr(control->whoFrom);
5904 				control->data = NULL;
5905 #ifdef INVARIANTS
5906 				if (control->on_strm_q) {
5907 					panic("About to free ctl:%p so:%p and its in %d",
5908 					    control, so, control->on_strm_q);
5909 				}
5910 #endif
5911 				sctp_free_a_readq(stcb, control);
5912 				control = NULL;
5913 				if ((freed_so_far >= rwnd_req) &&
5914 				    (no_rcv_needed == 0))
5915 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5916 
5917 			} else {
5918 				/*
5919 				 * The user did not read all of this
5920 				 * message, turn off the returned MSG_EOR
5921 				 * since we are leaving more behind on the
5922 				 * control to read.
5923 				 */
5924 #ifdef INVARIANTS
5925 				if (control->end_added &&
5926 				    (control->data == NULL) &&
5927 				    (control->tail_mbuf == NULL)) {
5928 					panic("Gak, control->length is corrupt?");
5929 				}
5930 #endif
5931 				no_rcv_needed = control->do_not_ref_stcb;
5932 				out_flags &= ~MSG_EOR;
5933 			}
5934 		}
5935 		if (out_flags & MSG_EOR) {
5936 			goto release;
5937 		}
5938 		if ((uio->uio_resid == 0) ||
5939 		    ((in_eeor_mode) &&
5940 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5941 			goto release;
5942 		}
5943 		/*
5944 		 * If I hit here the receiver wants more and this message is
5945 		 * NOT done (pd-api). So two questions. Can we block? if not
5946 		 * we are done. Did the user NOT set MSG_WAITALL?
5947 		 */
5948 		if (block_allowed == 0) {
5949 			goto release;
5950 		}
5951 		/*
5952 		 * We need to wait for more data a few things: - We don't
5953 		 * sbunlock() so we don't get someone else reading. - We
5954 		 * must be sure to account for the case where what is added
5955 		 * is NOT to our control when we wakeup.
5956 		 */
5957 
5958 		/*
5959 		 * Do we need to tell the transport a rwnd update might be
5960 		 * needed before we go to sleep?
5961 		 */
5962 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5963 		    ((freed_so_far >= rwnd_req) &&
5964 		    (control->do_not_ref_stcb == 0) &&
5965 		    (no_rcv_needed == 0))) {
5966 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5967 		}
5968 wait_some_more:
5969 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5970 			goto release;
5971 		}
5972 
5973 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5974 			goto release;
5975 
5976 		if (hold_rlock == 1) {
5977 			SCTP_INP_READ_UNLOCK(inp);
5978 			hold_rlock = 0;
5979 		}
5980 		if (hold_sblock == 0) {
5981 			SOCKBUF_LOCK(&so->so_rcv);
5982 			hold_sblock = 1;
5983 		}
5984 		if ((copied_so_far) && (control->length == 0) &&
5985 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5986 			goto release;
5987 		}
5988 		if (so->so_rcv.sb_cc <= control->held_length) {
5989 			error = sbwait(&so->so_rcv);
5990 			if (error) {
5991 				goto release;
5992 			}
5993 			control->held_length = 0;
5994 		}
5995 		if (hold_sblock) {
5996 			SOCKBUF_UNLOCK(&so->so_rcv);
5997 			hold_sblock = 0;
5998 		}
5999 		if (control->length == 0) {
6000 			/* still nothing here */
6001 			if (control->end_added == 1) {
6002 				/* he aborted, or is done i.e.did a shutdown */
6003 				out_flags |= MSG_EOR;
6004 				if (control->pdapi_aborted) {
6005 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6006 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6007 
6008 					out_flags |= MSG_TRUNC;
6009 				} else {
6010 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6011 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6012 				}
6013 				goto done_with_control;
6014 			}
6015 			if (so->so_rcv.sb_cc > held_length) {
6016 				control->held_length = so->so_rcv.sb_cc;
6017 				held_length = 0;
6018 			}
6019 			goto wait_some_more;
6020 		} else if (control->data == NULL) {
6021 			/*
6022 			 * we must re-sync since data is probably being
6023 			 * added
6024 			 */
6025 			SCTP_INP_READ_LOCK(inp);
6026 			if ((control->length > 0) && (control->data == NULL)) {
6027 				/*
6028 				 * big trouble.. we have the lock and its
6029 				 * corrupt?
6030 				 */
6031 #ifdef INVARIANTS
6032 				panic("Impossible data==NULL length !=0");
6033 #endif
6034 				out_flags |= MSG_EOR;
6035 				out_flags |= MSG_TRUNC;
6036 				control->length = 0;
6037 				SCTP_INP_READ_UNLOCK(inp);
6038 				goto done_with_control;
6039 			}
6040 			SCTP_INP_READ_UNLOCK(inp);
6041 			/* We will fall around to get more data */
6042 		}
6043 		goto get_more_data;
6044 	} else {
6045 		/*-
6046 		 * Give caller back the mbuf chain,
6047 		 * store in uio_resid the length
6048 		 */
6049 		wakeup_read_socket = 0;
6050 		if ((control->end_added == 0) ||
6051 		    (TAILQ_NEXT(control, next) == NULL)) {
6052 			/* Need to get rlock */
6053 			if (hold_rlock == 0) {
6054 				SCTP_INP_READ_LOCK(inp);
6055 				hold_rlock = 1;
6056 			}
6057 		}
6058 		if (control->end_added) {
6059 			out_flags |= MSG_EOR;
6060 			if ((control->do_not_ref_stcb == 0) &&
6061 			    (control->stcb != NULL) &&
6062 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6063 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6064 		}
6065 		if (control->spec_flags & M_NOTIFICATION) {
6066 			out_flags |= MSG_NOTIFICATION;
6067 		}
6068 		uio->uio_resid = control->length;
6069 		*mp = control->data;
6070 		m = control->data;
6071 		while (m) {
6072 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6073 				sctp_sblog(&so->so_rcv,
6074 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6075 			}
6076 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6077 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6078 			freed_so_far += MSIZE;
6079 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6080 				sctp_sblog(&so->so_rcv,
6081 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6082 			}
6083 			m = SCTP_BUF_NEXT(m);
6084 		}
6085 		control->data = control->tail_mbuf = NULL;
6086 		control->length = 0;
6087 		if (out_flags & MSG_EOR) {
6088 			/* Done with this control */
6089 			goto done_with_control;
6090 		}
6091 	}
6092 release:
6093 	if (hold_rlock == 1) {
6094 		SCTP_INP_READ_UNLOCK(inp);
6095 		hold_rlock = 0;
6096 	}
6097 	if (hold_sblock == 1) {
6098 		SOCKBUF_UNLOCK(&so->so_rcv);
6099 		hold_sblock = 0;
6100 	}
6101 
6102 	sbunlock(&so->so_rcv);
6103 	sockbuf_lock = 0;
6104 
6105 release_unlocked:
6106 	if (hold_sblock) {
6107 		SOCKBUF_UNLOCK(&so->so_rcv);
6108 		hold_sblock = 0;
6109 	}
6110 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6111 		if ((freed_so_far >= rwnd_req) &&
6112 		    (control && (control->do_not_ref_stcb == 0)) &&
6113 		    (no_rcv_needed == 0))
6114 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6115 	}
6116 out:
6117 	if (msg_flags) {
6118 		*msg_flags = out_flags;
6119 	}
6120 	if (((out_flags & MSG_EOR) == 0) &&
6121 	    ((in_flags & MSG_PEEK) == 0) &&
6122 	    (sinfo) &&
6123 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6124 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6125 		struct sctp_extrcvinfo *s_extra;
6126 
6127 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6128 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6129 	}
6130 	if (hold_rlock == 1) {
6131 		SCTP_INP_READ_UNLOCK(inp);
6132 	}
6133 	if (hold_sblock) {
6134 		SOCKBUF_UNLOCK(&so->so_rcv);
6135 	}
6136 	if (sockbuf_lock) {
6137 		sbunlock(&so->so_rcv);
6138 	}
6139 
6140 	if (freecnt_applied) {
6141 		/*
6142 		 * The lock on the socket buffer protects us so the free
6143 		 * code will stop. But since we used the socketbuf lock and
6144 		 * the sender uses the tcb_lock to increment, we need to use
6145 		 * the atomic add to the refcnt.
6146 		 */
6147 		if (stcb == NULL) {
6148 #ifdef INVARIANTS
6149 			panic("stcb for refcnt has gone NULL?");
6150 			goto stage_left;
6151 #else
6152 			goto stage_left;
6153 #endif
6154 		}
6155 		/* Save the value back for next time */
6156 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6157 		atomic_add_int(&stcb->asoc.refcnt, -1);
6158 	}
6159 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6160 		if (stcb) {
6161 			sctp_misc_ints(SCTP_SORECV_DONE,
6162 			    freed_so_far,
6163 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6164 			    stcb->asoc.my_rwnd,
6165 			    so->so_rcv.sb_cc);
6166 		} else {
6167 			sctp_misc_ints(SCTP_SORECV_DONE,
6168 			    freed_so_far,
6169 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6170 			    0,
6171 			    so->so_rcv.sb_cc);
6172 		}
6173 	}
6174 stage_left:
6175 	if (wakeup_read_socket) {
6176 		sctp_sorwakeup(inp, so);
6177 	}
6178 	return (error);
6179 }
6180 
6181 
6182 #ifdef SCTP_MBUF_LOGGING
6183 struct mbuf *
6184 sctp_m_free(struct mbuf *m)
6185 {
6186 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6187 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6188 	}
6189 	return (m_free(m));
6190 }
6191 
6192 void
6193 sctp_m_freem(struct mbuf *mb)
6194 {
6195 	while (mb != NULL)
6196 		mb = sctp_m_free(mb);
6197 }
6198 
6199 #endif
6200 
6201 int
6202 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6203 {
6204 	/*
6205 	 * Given a local address. For all associations that holds the
6206 	 * address, request a peer-set-primary.
6207 	 */
6208 	struct sctp_ifa *ifa;
6209 	struct sctp_laddr *wi;
6210 
6211 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6212 	if (ifa == NULL) {
6213 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6214 		return (EADDRNOTAVAIL);
6215 	}
6216 	/*
6217 	 * Now that we have the ifa we must awaken the iterator with this
6218 	 * message.
6219 	 */
6220 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6221 	if (wi == NULL) {
6222 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6223 		return (ENOMEM);
6224 	}
6225 	/* Now incr the count and int wi structure */
6226 	SCTP_INCR_LADDR_COUNT();
6227 	memset(wi, 0, sizeof(*wi));
6228 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6229 	wi->ifa = ifa;
6230 	wi->action = SCTP_SET_PRIM_ADDR;
6231 	atomic_add_int(&ifa->refcount, 1);
6232 
6233 	/* Now add it to the work queue */
6234 	SCTP_WQ_ADDR_LOCK();
6235 	/*
6236 	 * Should this really be a tailq? As it is we will process the
6237 	 * newest first :-0
6238 	 */
6239 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6240 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6241 	    (struct sctp_inpcb *)NULL,
6242 	    (struct sctp_tcb *)NULL,
6243 	    (struct sctp_nets *)NULL);
6244 	SCTP_WQ_ADDR_UNLOCK();
6245 	return (0);
6246 }
6247 
6248 
6249 int
6250 sctp_soreceive(struct socket *so,
6251     struct sockaddr **psa,
6252     struct uio *uio,
6253     struct mbuf **mp0,
6254     struct mbuf **controlp,
6255     int *flagsp)
6256 {
6257 	int error, fromlen;
6258 	uint8_t sockbuf[256];
6259 	struct sockaddr *from;
6260 	struct sctp_extrcvinfo sinfo;
6261 	int filling_sinfo = 1;
6262 	int flags;
6263 	struct sctp_inpcb *inp;
6264 
6265 	inp = (struct sctp_inpcb *)so->so_pcb;
6266 	/* pickup the assoc we are reading from */
6267 	if (inp == NULL) {
6268 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6269 		return (EINVAL);
6270 	}
6271 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6272 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6273 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6274 	    (controlp == NULL)) {
6275 		/* user does not want the sndrcv ctl */
6276 		filling_sinfo = 0;
6277 	}
6278 	if (psa) {
6279 		from = (struct sockaddr *)sockbuf;
6280 		fromlen = sizeof(sockbuf);
6281 		from->sa_len = 0;
6282 	} else {
6283 		from = NULL;
6284 		fromlen = 0;
6285 	}
6286 
6287 	if (filling_sinfo) {
6288 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6289 	}
6290 	if (flagsp != NULL) {
6291 		flags = *flagsp;
6292 	} else {
6293 		flags = 0;
6294 	}
6295 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6296 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6297 	if (flagsp != NULL) {
6298 		*flagsp = flags;
6299 	}
6300 	if (controlp != NULL) {
6301 		/* copy back the sinfo in a CMSG format */
6302 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6303 			*controlp = sctp_build_ctl_nchunk(inp,
6304 			    (struct sctp_sndrcvinfo *)&sinfo);
6305 		} else {
6306 			*controlp = NULL;
6307 		}
6308 	}
6309 	if (psa) {
6310 		/* copy back the address info */
6311 		if (from && from->sa_len) {
6312 			*psa = sodupsockaddr(from, M_NOWAIT);
6313 		} else {
6314 			*psa = NULL;
6315 		}
6316 	}
6317 	return (error);
6318 }
6319 
6320 
6321 
6322 
6323 
6324 int
6325 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6326     int totaddr, int *error)
6327 {
6328 	int added = 0;
6329 	int i;
6330 	struct sctp_inpcb *inp;
6331 	struct sockaddr *sa;
6332 	size_t incr = 0;
6333 #ifdef INET
6334 	struct sockaddr_in *sin;
6335 #endif
6336 #ifdef INET6
6337 	struct sockaddr_in6 *sin6;
6338 #endif
6339 
6340 	sa = addr;
6341 	inp = stcb->sctp_ep;
6342 	*error = 0;
6343 	for (i = 0; i < totaddr; i++) {
6344 		switch (sa->sa_family) {
6345 #ifdef INET
6346 		case AF_INET:
6347 			incr = sizeof(struct sockaddr_in);
6348 			sin = (struct sockaddr_in *)sa;
6349 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6350 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6351 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6352 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6353 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6354 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6355 				*error = EINVAL;
6356 				goto out_now;
6357 			}
6358 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6359 			    SCTP_DONOT_SETSCOPE,
6360 			    SCTP_ADDR_IS_CONFIRMED)) {
6361 				/* assoc gone no un-lock */
6362 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6363 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6364 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6365 				*error = ENOBUFS;
6366 				goto out_now;
6367 			}
6368 			added++;
6369 			break;
6370 #endif
6371 #ifdef INET6
6372 		case AF_INET6:
6373 			incr = sizeof(struct sockaddr_in6);
6374 			sin6 = (struct sockaddr_in6 *)sa;
6375 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6376 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6377 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6378 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6379 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6380 				*error = EINVAL;
6381 				goto out_now;
6382 			}
6383 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6384 			    SCTP_DONOT_SETSCOPE,
6385 			    SCTP_ADDR_IS_CONFIRMED)) {
6386 				/* assoc gone no un-lock */
6387 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6388 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6389 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6390 				*error = ENOBUFS;
6391 				goto out_now;
6392 			}
6393 			added++;
6394 			break;
6395 #endif
6396 		default:
6397 			break;
6398 		}
6399 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6400 	}
6401 out_now:
6402 	return (added);
6403 }
6404 
6405 int
6406 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6407     unsigned int totaddr,
6408     unsigned int *num_v4, unsigned int *num_v6,
6409     unsigned int limit)
6410 {
6411 	struct sockaddr *sa;
6412 	struct sctp_tcb *stcb;
6413 	unsigned int incr, at, i;
6414 
6415 	at = 0;
6416 	sa = addr;
6417 	*num_v6 = *num_v4 = 0;
6418 	/* account and validate addresses */
6419 	if (totaddr == 0) {
6420 		return (EINVAL);
6421 	}
6422 	for (i = 0; i < totaddr; i++) {
6423 		if (at + sizeof(struct sockaddr) > limit) {
6424 			return (EINVAL);
6425 		}
6426 		switch (sa->sa_family) {
6427 #ifdef INET
6428 		case AF_INET:
6429 			incr = (unsigned int)sizeof(struct sockaddr_in);
6430 			if (sa->sa_len != incr) {
6431 				return (EINVAL);
6432 			}
6433 			(*num_v4) += 1;
6434 			break;
6435 #endif
6436 #ifdef INET6
6437 		case AF_INET6:
6438 			{
6439 				struct sockaddr_in6 *sin6;
6440 
6441 				sin6 = (struct sockaddr_in6 *)sa;
6442 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6443 					/* Must be non-mapped for connectx */
6444 					return (EINVAL);
6445 				}
6446 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6447 				if (sa->sa_len != incr) {
6448 					return (EINVAL);
6449 				}
6450 				(*num_v6) += 1;
6451 				break;
6452 			}
6453 #endif
6454 		default:
6455 			return (EINVAL);
6456 		}
6457 		if ((at + incr) > limit) {
6458 			return (EINVAL);
6459 		}
6460 		SCTP_INP_INCR_REF(inp);
6461 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6462 		if (stcb != NULL) {
6463 			SCTP_TCB_UNLOCK(stcb);
6464 			return (EALREADY);
6465 		} else {
6466 			SCTP_INP_DECR_REF(inp);
6467 		}
6468 		at += incr;
6469 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6470 	}
6471 	return (0);
6472 }
6473 
6474 /*
6475  * sctp_bindx(ADD) for one address.
6476  * assumes all arguments are valid/checked by caller.
6477  */
6478 void
6479 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6480     struct sockaddr *sa, sctp_assoc_t assoc_id,
6481     uint32_t vrf_id, int *error, void *p)
6482 {
6483 	struct sockaddr *addr_touse;
6484 #if defined(INET) && defined(INET6)
6485 	struct sockaddr_in sin;
6486 #endif
6487 
6488 	/* see if we're bound all already! */
6489 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6490 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6491 		*error = EINVAL;
6492 		return;
6493 	}
6494 	addr_touse = sa;
6495 #ifdef INET6
6496 	if (sa->sa_family == AF_INET6) {
6497 #ifdef INET
6498 		struct sockaddr_in6 *sin6;
6499 
6500 #endif
6501 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6502 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6503 			*error = EINVAL;
6504 			return;
6505 		}
6506 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6507 			/* can only bind v6 on PF_INET6 sockets */
6508 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6509 			*error = EINVAL;
6510 			return;
6511 		}
6512 #ifdef INET
6513 		sin6 = (struct sockaddr_in6 *)addr_touse;
6514 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6515 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6516 			    SCTP_IPV6_V6ONLY(inp)) {
6517 				/* can't bind v4-mapped on PF_INET sockets */
6518 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6519 				*error = EINVAL;
6520 				return;
6521 			}
6522 			in6_sin6_2_sin(&sin, sin6);
6523 			addr_touse = (struct sockaddr *)&sin;
6524 		}
6525 #endif
6526 	}
6527 #endif
6528 #ifdef INET
6529 	if (sa->sa_family == AF_INET) {
6530 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6531 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6532 			*error = EINVAL;
6533 			return;
6534 		}
6535 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6536 		    SCTP_IPV6_V6ONLY(inp)) {
6537 			/* can't bind v4 on PF_INET sockets */
6538 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6539 			*error = EINVAL;
6540 			return;
6541 		}
6542 	}
6543 #endif
6544 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6545 		if (p == NULL) {
6546 			/* Can't get proc for Net/Open BSD */
6547 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 			*error = EINVAL;
6549 			return;
6550 		}
6551 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6552 		return;
6553 	}
6554 	/*
6555 	 * No locks required here since bind and mgmt_ep_sa all do their own
6556 	 * locking. If we do something for the FIX: below we may need to
6557 	 * lock in that case.
6558 	 */
6559 	if (assoc_id == 0) {
6560 		/* add the address */
6561 		struct sctp_inpcb *lep;
6562 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6563 
6564 		/* validate the incoming port */
6565 		if ((lsin->sin_port != 0) &&
6566 		    (lsin->sin_port != inp->sctp_lport)) {
6567 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6568 			*error = EINVAL;
6569 			return;
6570 		} else {
6571 			/* user specified 0 port, set it to existing port */
6572 			lsin->sin_port = inp->sctp_lport;
6573 		}
6574 
6575 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6576 		if (lep != NULL) {
6577 			/*
6578 			 * We must decrement the refcount since we have the
6579 			 * ep already and are binding. No remove going on
6580 			 * here.
6581 			 */
6582 			SCTP_INP_DECR_REF(lep);
6583 		}
6584 		if (lep == inp) {
6585 			/* already bound to it.. ok */
6586 			return;
6587 		} else if (lep == NULL) {
6588 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6589 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6590 			    SCTP_ADD_IP_ADDRESS,
6591 			    vrf_id, NULL);
6592 		} else {
6593 			*error = EADDRINUSE;
6594 		}
6595 		if (*error)
6596 			return;
6597 	} else {
6598 		/*
6599 		 * FIX: decide whether we allow assoc based bindx
6600 		 */
6601 	}
6602 }
6603 
6604 /*
6605  * sctp_bindx(DELETE) for one address.
6606  * assumes all arguments are valid/checked by caller.
6607  */
6608 void
6609 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6610     struct sockaddr *sa, sctp_assoc_t assoc_id,
6611     uint32_t vrf_id, int *error)
6612 {
6613 	struct sockaddr *addr_touse;
6614 #if defined(INET) && defined(INET6)
6615 	struct sockaddr_in sin;
6616 #endif
6617 
6618 	/* see if we're bound all already! */
6619 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6620 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6621 		*error = EINVAL;
6622 		return;
6623 	}
6624 	addr_touse = sa;
6625 #ifdef INET6
6626 	if (sa->sa_family == AF_INET6) {
6627 #ifdef INET
6628 		struct sockaddr_in6 *sin6;
6629 #endif
6630 
6631 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6632 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6633 			*error = EINVAL;
6634 			return;
6635 		}
6636 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6637 			/* can only bind v6 on PF_INET6 sockets */
6638 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6639 			*error = EINVAL;
6640 			return;
6641 		}
6642 #ifdef INET
6643 		sin6 = (struct sockaddr_in6 *)addr_touse;
6644 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6645 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6646 			    SCTP_IPV6_V6ONLY(inp)) {
6647 				/* can't bind mapped-v4 on PF_INET sockets */
6648 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6649 				*error = EINVAL;
6650 				return;
6651 			}
6652 			in6_sin6_2_sin(&sin, sin6);
6653 			addr_touse = (struct sockaddr *)&sin;
6654 		}
6655 #endif
6656 	}
6657 #endif
6658 #ifdef INET
6659 	if (sa->sa_family == AF_INET) {
6660 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6661 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6662 			*error = EINVAL;
6663 			return;
6664 		}
6665 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6666 		    SCTP_IPV6_V6ONLY(inp)) {
6667 			/* can't bind v4 on PF_INET sockets */
6668 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6669 			*error = EINVAL;
6670 			return;
6671 		}
6672 	}
6673 #endif
6674 	/*
6675 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6676 	 * below is ever changed we may need to lock before calling
6677 	 * association level binding.
6678 	 */
6679 	if (assoc_id == 0) {
6680 		/* delete the address */
6681 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6682 		    SCTP_DEL_IP_ADDRESS,
6683 		    vrf_id, NULL);
6684 	} else {
6685 		/*
6686 		 * FIX: decide whether we allow assoc based bindx
6687 		 */
6688 	}
6689 }
6690 
6691 /*
6692  * returns the valid local address count for an assoc, taking into account
6693  * all scoping rules
6694  */
6695 int
6696 sctp_local_addr_count(struct sctp_tcb *stcb)
6697 {
6698 	int loopback_scope;
6699 #if defined(INET)
6700 	int ipv4_local_scope, ipv4_addr_legal;
6701 #endif
6702 #if defined (INET6)
6703 	int local_scope, site_scope, ipv6_addr_legal;
6704 #endif
6705 	struct sctp_vrf *vrf;
6706 	struct sctp_ifn *sctp_ifn;
6707 	struct sctp_ifa *sctp_ifa;
6708 	int count = 0;
6709 
6710 	/* Turn on all the appropriate scopes */
6711 	loopback_scope = stcb->asoc.scope.loopback_scope;
6712 #if defined(INET)
6713 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6714 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6715 #endif
6716 #if defined(INET6)
6717 	local_scope = stcb->asoc.scope.local_scope;
6718 	site_scope = stcb->asoc.scope.site_scope;
6719 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6720 #endif
6721 	SCTP_IPI_ADDR_RLOCK();
6722 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6723 	if (vrf == NULL) {
6724 		/* no vrf, no addresses */
6725 		SCTP_IPI_ADDR_RUNLOCK();
6726 		return (0);
6727 	}
6728 
6729 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6730 		/*
6731 		 * bound all case: go through all ifns on the vrf
6732 		 */
6733 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6734 			if ((loopback_scope == 0) &&
6735 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6736 				continue;
6737 			}
6738 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6739 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6740 					continue;
6741 				switch (sctp_ifa->address.sa.sa_family) {
6742 #ifdef INET
6743 				case AF_INET:
6744 					if (ipv4_addr_legal) {
6745 						struct sockaddr_in *sin;
6746 
6747 						sin = &sctp_ifa->address.sin;
6748 						if (sin->sin_addr.s_addr == 0) {
6749 							/*
6750 							 * skip unspecified
6751 							 * addrs
6752 							 */
6753 							continue;
6754 						}
6755 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6756 						    &sin->sin_addr) != 0) {
6757 							continue;
6758 						}
6759 						if ((ipv4_local_scope == 0) &&
6760 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6761 							continue;
6762 						}
6763 						/* count this one */
6764 						count++;
6765 					} else {
6766 						continue;
6767 					}
6768 					break;
6769 #endif
6770 #ifdef INET6
6771 				case AF_INET6:
6772 					if (ipv6_addr_legal) {
6773 						struct sockaddr_in6 *sin6;
6774 
6775 						sin6 = &sctp_ifa->address.sin6;
6776 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6777 							continue;
6778 						}
6779 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6780 						    &sin6->sin6_addr) != 0) {
6781 							continue;
6782 						}
6783 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6784 							if (local_scope == 0)
6785 								continue;
6786 							if (sin6->sin6_scope_id == 0) {
6787 								if (sa6_recoverscope(sin6) != 0)
6788 									/*
6789 									 *
6790 									 * bad
6791 									 * link
6792 									 *
6793 									 * local
6794 									 *
6795 									 * address
6796 									 */
6797 									continue;
6798 							}
6799 						}
6800 						if ((site_scope == 0) &&
6801 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6802 							continue;
6803 						}
6804 						/* count this one */
6805 						count++;
6806 					}
6807 					break;
6808 #endif
6809 				default:
6810 					/* TSNH */
6811 					break;
6812 				}
6813 			}
6814 		}
6815 	} else {
6816 		/*
6817 		 * subset bound case
6818 		 */
6819 		struct sctp_laddr *laddr;
6820 
6821 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6822 		    sctp_nxt_addr) {
6823 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6824 				continue;
6825 			}
6826 			/* count this one */
6827 			count++;
6828 		}
6829 	}
6830 	SCTP_IPI_ADDR_RUNLOCK();
6831 	return (count);
6832 }
6833 
6834 #if defined(SCTP_LOCAL_TRACE_BUF)
6835 
6836 void
6837 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6838 {
6839 	uint32_t saveindex, newindex;
6840 
6841 	do {
6842 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6843 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6844 			newindex = 1;
6845 		} else {
6846 			newindex = saveindex + 1;
6847 		}
6848 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6849 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6850 		saveindex = 0;
6851 	}
6852 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6853 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6854 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6855 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6856 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6857 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6858 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6859 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6860 }
6861 
6862 #endif
6863 static void
6864 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6865     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6866 {
6867 	struct ip *iph;
6868 #ifdef INET6
6869 	struct ip6_hdr *ip6;
6870 #endif
6871 	struct mbuf *sp, *last;
6872 	struct udphdr *uhdr;
6873 	uint16_t port;
6874 
6875 	if ((m->m_flags & M_PKTHDR) == 0) {
6876 		/* Can't handle one that is not a pkt hdr */
6877 		goto out;
6878 	}
6879 	/* Pull the src port */
6880 	iph = mtod(m, struct ip *);
6881 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6882 	port = uhdr->uh_sport;
6883 	/*
6884 	 * Split out the mbuf chain. Leave the IP header in m, place the
6885 	 * rest in the sp.
6886 	 */
6887 	sp = m_split(m, off, M_NOWAIT);
6888 	if (sp == NULL) {
6889 		/* Gak, drop packet, we can't do a split */
6890 		goto out;
6891 	}
6892 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6893 		/* Gak, packet can't have an SCTP header in it - too small */
6894 		m_freem(sp);
6895 		goto out;
6896 	}
6897 	/* Now pull up the UDP header and SCTP header together */
6898 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6899 	if (sp == NULL) {
6900 		/* Gak pullup failed */
6901 		goto out;
6902 	}
6903 	/* Trim out the UDP header */
6904 	m_adj(sp, sizeof(struct udphdr));
6905 
6906 	/* Now reconstruct the mbuf chain */
6907 	for (last = m; last->m_next; last = last->m_next);
6908 	last->m_next = sp;
6909 	m->m_pkthdr.len += sp->m_pkthdr.len;
6910 	/*
6911 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6912 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6913 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6914 	 * SCTP checksum. Therefore, clear the bit.
6915 	 */
6916 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6917 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6918 	    m->m_pkthdr.len,
6919 	    if_name(m->m_pkthdr.rcvif),
6920 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6921 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6922 	iph = mtod(m, struct ip *);
6923 	switch (iph->ip_v) {
6924 #ifdef INET
6925 	case IPVERSION:
6926 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6927 		sctp_input_with_port(m, off, port);
6928 		break;
6929 #endif
6930 #ifdef INET6
6931 	case IPV6_VERSION >> 4:
6932 		ip6 = mtod(m, struct ip6_hdr *);
6933 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6934 		sctp6_input_with_port(&m, &off, port);
6935 		break;
6936 #endif
6937 	default:
6938 		goto out;
6939 		break;
6940 	}
6941 	return;
6942 out:
6943 	m_freem(m);
6944 }
6945 
6946 #ifdef INET
6947 static void
6948 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6949 {
6950 	struct ip *outer_ip, *inner_ip;
6951 	struct sctphdr *sh;
6952 	struct icmp *icmp;
6953 	struct udphdr *udp;
6954 	struct sctp_inpcb *inp;
6955 	struct sctp_tcb *stcb;
6956 	struct sctp_nets *net;
6957 	struct sctp_init_chunk *ch;
6958 	struct sockaddr_in src, dst;
6959 	uint8_t type, code;
6960 
6961 	inner_ip = (struct ip *)vip;
6962 	icmp = (struct icmp *)((caddr_t)inner_ip -
6963 	    (sizeof(struct icmp) - sizeof(struct ip)));
6964 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6965 	if (ntohs(outer_ip->ip_len) <
6966 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6967 		return;
6968 	}
6969 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6970 	sh = (struct sctphdr *)(udp + 1);
6971 	memset(&src, 0, sizeof(struct sockaddr_in));
6972 	src.sin_family = AF_INET;
6973 	src.sin_len = sizeof(struct sockaddr_in);
6974 	src.sin_port = sh->src_port;
6975 	src.sin_addr = inner_ip->ip_src;
6976 	memset(&dst, 0, sizeof(struct sockaddr_in));
6977 	dst.sin_family = AF_INET;
6978 	dst.sin_len = sizeof(struct sockaddr_in);
6979 	dst.sin_port = sh->dest_port;
6980 	dst.sin_addr = inner_ip->ip_dst;
6981 	/*
6982 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6983 	 * holds our local endpoint address. Thus we reverse the dst and the
6984 	 * src in the lookup.
6985 	 */
6986 	inp = NULL;
6987 	net = NULL;
6988 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6989 	    (struct sockaddr *)&src,
6990 	    &inp, &net, 1,
6991 	    SCTP_DEFAULT_VRFID);
6992 	if ((stcb != NULL) &&
6993 	    (net != NULL) &&
6994 	    (inp != NULL)) {
6995 		/* Check the UDP port numbers */
6996 		if ((udp->uh_dport != net->port) ||
6997 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6998 			SCTP_TCB_UNLOCK(stcb);
6999 			return;
7000 		}
7001 		/* Check the verification tag */
7002 		if (ntohl(sh->v_tag) != 0) {
7003 			/*
7004 			 * This must be the verification tag used for
7005 			 * sending out packets. We don't consider packets
7006 			 * reflecting the verification tag.
7007 			 */
7008 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7009 				SCTP_TCB_UNLOCK(stcb);
7010 				return;
7011 			}
7012 		} else {
7013 			if (ntohs(outer_ip->ip_len) >=
7014 			    sizeof(struct ip) +
7015 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7016 				/*
7017 				 * In this case we can check if we got an
7018 				 * INIT chunk and if the initiate tag
7019 				 * matches.
7020 				 */
7021 				ch = (struct sctp_init_chunk *)(sh + 1);
7022 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7023 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7024 					SCTP_TCB_UNLOCK(stcb);
7025 					return;
7026 				}
7027 			} else {
7028 				SCTP_TCB_UNLOCK(stcb);
7029 				return;
7030 			}
7031 		}
7032 		type = icmp->icmp_type;
7033 		code = icmp->icmp_code;
7034 		if ((type == ICMP_UNREACH) &&
7035 		    (code == ICMP_UNREACH_PORT)) {
7036 			code = ICMP_UNREACH_PROTOCOL;
7037 		}
7038 		sctp_notify(inp, stcb, net, type, code,
7039 		    ntohs(inner_ip->ip_len),
7040 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7041 	} else {
7042 		if ((stcb == NULL) && (inp != NULL)) {
7043 			/* reduce ref-count */
7044 			SCTP_INP_WLOCK(inp);
7045 			SCTP_INP_DECR_REF(inp);
7046 			SCTP_INP_WUNLOCK(inp);
7047 		}
7048 		if (stcb) {
7049 			SCTP_TCB_UNLOCK(stcb);
7050 		}
7051 	}
7052 	return;
7053 }
7054 #endif
7055 
7056 #ifdef INET6
7057 static void
7058 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7059 {
7060 	struct ip6ctlparam *ip6cp;
7061 	struct sctp_inpcb *inp;
7062 	struct sctp_tcb *stcb;
7063 	struct sctp_nets *net;
7064 	struct sctphdr sh;
7065 	struct udphdr udp;
7066 	struct sockaddr_in6 src, dst;
7067 	uint8_t type, code;
7068 
7069 	ip6cp = (struct ip6ctlparam *)d;
7070 	/*
7071 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7072 	 */
7073 	if (ip6cp->ip6c_m == NULL) {
7074 		return;
7075 	}
7076 	/*
7077 	 * Check if we can safely examine the ports and the verification tag
7078 	 * of the SCTP common header.
7079 	 */
7080 	if (ip6cp->ip6c_m->m_pkthdr.len <
7081 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7082 		return;
7083 	}
7084 	/* Copy out the UDP header. */
7085 	memset(&udp, 0, sizeof(struct udphdr));
7086 	m_copydata(ip6cp->ip6c_m,
7087 	    ip6cp->ip6c_off,
7088 	    sizeof(struct udphdr),
7089 	    (caddr_t)&udp);
7090 	/* Copy out the port numbers and the verification tag. */
7091 	memset(&sh, 0, sizeof(struct sctphdr));
7092 	m_copydata(ip6cp->ip6c_m,
7093 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7094 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7095 	    (caddr_t)&sh);
7096 	memset(&src, 0, sizeof(struct sockaddr_in6));
7097 	src.sin6_family = AF_INET6;
7098 	src.sin6_len = sizeof(struct sockaddr_in6);
7099 	src.sin6_port = sh.src_port;
7100 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7101 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7102 		return;
7103 	}
7104 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7105 	dst.sin6_family = AF_INET6;
7106 	dst.sin6_len = sizeof(struct sockaddr_in6);
7107 	dst.sin6_port = sh.dest_port;
7108 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7109 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7110 		return;
7111 	}
7112 	inp = NULL;
7113 	net = NULL;
7114 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7115 	    (struct sockaddr *)&src,
7116 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7117 	if ((stcb != NULL) &&
7118 	    (net != NULL) &&
7119 	    (inp != NULL)) {
7120 		/* Check the UDP port numbers */
7121 		if ((udp.uh_dport != net->port) ||
7122 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7123 			SCTP_TCB_UNLOCK(stcb);
7124 			return;
7125 		}
7126 		/* Check the verification tag */
7127 		if (ntohl(sh.v_tag) != 0) {
7128 			/*
7129 			 * This must be the verification tag used for
7130 			 * sending out packets. We don't consider packets
7131 			 * reflecting the verification tag.
7132 			 */
7133 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7134 				SCTP_TCB_UNLOCK(stcb);
7135 				return;
7136 			}
7137 		} else {
7138 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7139 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7140 			    sizeof(struct sctphdr) +
7141 			    sizeof(struct sctp_chunkhdr) +
7142 			    offsetof(struct sctp_init, a_rwnd)) {
7143 				/*
7144 				 * In this case we can check if we got an
7145 				 * INIT chunk and if the initiate tag
7146 				 * matches.
7147 				 */
7148 				uint32_t initiate_tag;
7149 				uint8_t chunk_type;
7150 
7151 				m_copydata(ip6cp->ip6c_m,
7152 				    ip6cp->ip6c_off +
7153 				    sizeof(struct udphdr) +
7154 				    sizeof(struct sctphdr),
7155 				    sizeof(uint8_t),
7156 				    (caddr_t)&chunk_type);
7157 				m_copydata(ip6cp->ip6c_m,
7158 				    ip6cp->ip6c_off +
7159 				    sizeof(struct udphdr) +
7160 				    sizeof(struct sctphdr) +
7161 				    sizeof(struct sctp_chunkhdr),
7162 				    sizeof(uint32_t),
7163 				    (caddr_t)&initiate_tag);
7164 				if ((chunk_type != SCTP_INITIATION) ||
7165 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7166 					SCTP_TCB_UNLOCK(stcb);
7167 					return;
7168 				}
7169 			} else {
7170 				SCTP_TCB_UNLOCK(stcb);
7171 				return;
7172 			}
7173 		}
7174 		type = ip6cp->ip6c_icmp6->icmp6_type;
7175 		code = ip6cp->ip6c_icmp6->icmp6_code;
7176 		if ((type == ICMP6_DST_UNREACH) &&
7177 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7178 			type = ICMP6_PARAM_PROB;
7179 			code = ICMP6_PARAMPROB_NEXTHEADER;
7180 		}
7181 		sctp6_notify(inp, stcb, net, type, code,
7182 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7183 	} else {
7184 		if ((stcb == NULL) && (inp != NULL)) {
7185 			/* reduce inp's ref-count */
7186 			SCTP_INP_WLOCK(inp);
7187 			SCTP_INP_DECR_REF(inp);
7188 			SCTP_INP_WUNLOCK(inp);
7189 		}
7190 		if (stcb) {
7191 			SCTP_TCB_UNLOCK(stcb);
7192 		}
7193 	}
7194 }
7195 #endif
7196 
7197 void
7198 sctp_over_udp_stop(void)
7199 {
7200 	/*
7201 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7202 	 * for writting!
7203 	 */
7204 #ifdef INET
7205 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7206 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7207 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7208 	}
7209 #endif
7210 #ifdef INET6
7211 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7212 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7213 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7214 	}
7215 #endif
7216 }
7217 
7218 int
7219 sctp_over_udp_start(void)
7220 {
7221 	uint16_t port;
7222 	int ret;
7223 #ifdef INET
7224 	struct sockaddr_in sin;
7225 #endif
7226 #ifdef INET6
7227 	struct sockaddr_in6 sin6;
7228 #endif
7229 	/*
7230 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7231 	 * for writting!
7232 	 */
7233 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7234 	if (ntohs(port) == 0) {
7235 		/* Must have a port set */
7236 		return (EINVAL);
7237 	}
7238 #ifdef INET
7239 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7240 		/* Already running -- must stop first */
7241 		return (EALREADY);
7242 	}
7243 #endif
7244 #ifdef INET6
7245 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7246 		/* Already running -- must stop first */
7247 		return (EALREADY);
7248 	}
7249 #endif
7250 #ifdef INET
7251 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7252 	    SOCK_DGRAM, IPPROTO_UDP,
7253 	    curthread->td_ucred, curthread))) {
7254 		sctp_over_udp_stop();
7255 		return (ret);
7256 	}
7257 	/* Call the special UDP hook. */
7258 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7259 	    sctp_recv_udp_tunneled_packet,
7260 	    sctp_recv_icmp_tunneled_packet,
7261 	    NULL))) {
7262 		sctp_over_udp_stop();
7263 		return (ret);
7264 	}
7265 	/* Ok, we have a socket, bind it to the port. */
7266 	memset(&sin, 0, sizeof(struct sockaddr_in));
7267 	sin.sin_len = sizeof(struct sockaddr_in);
7268 	sin.sin_family = AF_INET;
7269 	sin.sin_port = htons(port);
7270 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7271 	    (struct sockaddr *)&sin, curthread))) {
7272 		sctp_over_udp_stop();
7273 		return (ret);
7274 	}
7275 #endif
7276 #ifdef INET6
7277 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7278 	    SOCK_DGRAM, IPPROTO_UDP,
7279 	    curthread->td_ucred, curthread))) {
7280 		sctp_over_udp_stop();
7281 		return (ret);
7282 	}
7283 	/* Call the special UDP hook. */
7284 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7285 	    sctp_recv_udp_tunneled_packet,
7286 	    sctp_recv_icmp6_tunneled_packet,
7287 	    NULL))) {
7288 		sctp_over_udp_stop();
7289 		return (ret);
7290 	}
7291 	/* Ok, we have a socket, bind it to the port. */
7292 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7293 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7294 	sin6.sin6_family = AF_INET6;
7295 	sin6.sin6_port = htons(port);
7296 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7297 	    (struct sockaddr *)&sin6, curthread))) {
7298 		sctp_over_udp_stop();
7299 		return (ret);
7300 	}
7301 #endif
7302 	return (0);
7303 }
7304 
7305 /*
7306  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7307  * If all arguments are zero, zero is returned.
7308  */
7309 uint32_t
7310 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7311 {
7312 	if (mtu1 > 0) {
7313 		if (mtu2 > 0) {
7314 			if (mtu3 > 0) {
7315 				return (min(mtu1, min(mtu2, mtu3)));
7316 			} else {
7317 				return (min(mtu1, mtu2));
7318 			}
7319 		} else {
7320 			if (mtu3 > 0) {
7321 				return (min(mtu1, mtu3));
7322 			} else {
7323 				return (mtu1);
7324 			}
7325 		}
7326 	} else {
7327 		if (mtu2 > 0) {
7328 			if (mtu3 > 0) {
7329 				return (min(mtu2, mtu3));
7330 			} else {
7331 				return (mtu2);
7332 			}
7333 		} else {
7334 			return (mtu3);
7335 		}
7336 	}
7337 }
7338 
7339 void
7340 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7341 {
7342 	struct in_conninfo inc;
7343 
7344 	memset(&inc, 0, sizeof(struct in_conninfo));
7345 	inc.inc_fibnum = fibnum;
7346 	switch (addr->sa.sa_family) {
7347 #ifdef INET
7348 	case AF_INET:
7349 		inc.inc_faddr = addr->sin.sin_addr;
7350 		break;
7351 #endif
7352 #ifdef INET6
7353 	case AF_INET6:
7354 		inc.inc_flags |= INC_ISIPV6;
7355 		inc.inc6_faddr = addr->sin6.sin6_addr;
7356 		break;
7357 #endif
7358 	default:
7359 		return;
7360 	}
7361 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7362 }
7363 
7364 uint32_t
7365 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7366 {
7367 	struct in_conninfo inc;
7368 
7369 	memset(&inc, 0, sizeof(struct in_conninfo));
7370 	inc.inc_fibnum = fibnum;
7371 	switch (addr->sa.sa_family) {
7372 #ifdef INET
7373 	case AF_INET:
7374 		inc.inc_faddr = addr->sin.sin_addr;
7375 		break;
7376 #endif
7377 #ifdef INET6
7378 	case AF_INET6:
7379 		inc.inc_flags |= INC_ISIPV6;
7380 		inc.inc6_faddr = addr->sin6.sin6_addr;
7381 		break;
7382 #endif
7383 	default:
7384 		return (0);
7385 	}
7386 	return ((uint32_t)tcp_hc_getmtu(&inc));
7387 }
7388 
7389 void
7390 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7391 {
7392 #if defined(KDTRACE_HOOKS)
7393 	int old_state = stcb->asoc.state;
7394 #endif
7395 
7396 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7397 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7398 	    new_state));
7399 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7400 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7401 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7402 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7403 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7404 	}
7405 #if defined(KDTRACE_HOOKS)
7406 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7407 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7408 	    (new_state == SCTP_STATE_INUSE))) {
7409 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7410 	}
7411 #endif
7412 }
7413 
7414 void
7415 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7416 {
7417 #if defined(KDTRACE_HOOKS)
7418 	int old_state = stcb->asoc.state;
7419 #endif
7420 
7421 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7422 	    ("sctp_add_substate: Can't set state (substate = %x)",
7423 	    substate));
7424 	stcb->asoc.state |= substate;
7425 #if defined(KDTRACE_HOOKS)
7426 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7427 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7428 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7429 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7430 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7431 	}
7432 #endif
7433 }
7434