xref: /freebsd/sys/netinet/sctputil.c (revision 0183e0151669735d62584fbba9125ed90716af5e)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61 
62 
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66 
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69 
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 	struct sctp_cwnd_log sctp_clog;
74 
75 	sctp_clog.x.sb.stcb = stcb;
76 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 	if (stcb)
78 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 	else
80 		sctp_clog.x.sb.stcb_sbcc = 0;
81 	sctp_clog.x.sb.incr = incr;
82 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 	    SCTP_LOG_EVENT_SB,
84 	    from,
85 	    sctp_clog.x.misc.log1,
86 	    sctp_clog.x.misc.log2,
87 	    sctp_clog.x.misc.log3,
88 	    sctp_clog.x.misc.log4);
89 }
90 
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 	struct sctp_cwnd_log sctp_clog;
95 
96 	sctp_clog.x.close.inp = (void *)inp;
97 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 	if (stcb) {
99 		sctp_clog.x.close.stcb = (void *)stcb;
100 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 	} else {
102 		sctp_clog.x.close.stcb = 0;
103 		sctp_clog.x.close.state = 0;
104 	}
105 	sctp_clog.x.close.loc = loc;
106 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 	    SCTP_LOG_EVENT_CLOSE,
108 	    0,
109 	    sctp_clog.x.misc.log1,
110 	    sctp_clog.x.misc.log2,
111 	    sctp_clog.x.misc.log3,
112 	    sctp_clog.x.misc.log4);
113 }
114 
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 	struct sctp_cwnd_log sctp_clog;
119 
120 	memset(&sctp_clog, 0, sizeof(sctp_clog));
121 	sctp_clog.x.rto.net = (void *)net;
122 	sctp_clog.x.rto.rtt = net->rtt / 1000;
123 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 	    SCTP_LOG_EVENT_RTT,
125 	    from,
126 	    sctp_clog.x.misc.log1,
127 	    sctp_clog.x.misc.log2,
128 	    sctp_clog.x.misc.log3,
129 	    sctp_clog.x.misc.log4);
130 }
131 
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 	struct sctp_cwnd_log sctp_clog;
136 
137 	sctp_clog.x.strlog.stcb = stcb;
138 	sctp_clog.x.strlog.n_tsn = tsn;
139 	sctp_clog.x.strlog.n_sseq = sseq;
140 	sctp_clog.x.strlog.e_tsn = 0;
141 	sctp_clog.x.strlog.e_sseq = 0;
142 	sctp_clog.x.strlog.strm = stream;
143 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 	    SCTP_LOG_EVENT_STRM,
145 	    from,
146 	    sctp_clog.x.misc.log1,
147 	    sctp_clog.x.misc.log2,
148 	    sctp_clog.x.misc.log3,
149 	    sctp_clog.x.misc.log4);
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 	struct sctp_cwnd_log sctp_clog;
212 
213 	memset(&sctp_clog, 0, sizeof(sctp_clog));
214 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 	sctp_clog.x.fr.tsn = tsn;
217 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 	    SCTP_LOG_EVENT_FR,
219 	    from,
220 	    sctp_clog.x.misc.log1,
221 	    sctp_clog.x.misc.log2,
222 	    sctp_clog.x.misc.log3,
223 	    sctp_clog.x.misc.log4);
224 }
225 
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 	struct sctp_cwnd_log sctp_clog;
231 
232 	sctp_clog.x.mb.mp = m;
233 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 	if (SCTP_BUF_IS_EXTENDED(m)) {
237 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 	} else {
240 		sctp_clog.x.mb.ext = 0;
241 		sctp_clog.x.mb.refcnt = 0;
242 	}
243 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 	    SCTP_LOG_EVENT_MBUF,
245 	    from,
246 	    sctp_clog.x.misc.log1,
247 	    sctp_clog.x.misc.log2,
248 	    sctp_clog.x.misc.log3,
249 	    sctp_clog.x.misc.log4);
250 }
251 
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 	struct mbuf *mat;
256 
257 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 		sctp_log_mb(mat, from);
259 	}
260 }
261 #endif
262 
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 	struct sctp_cwnd_log sctp_clog;
267 
268 	if (control == NULL) {
269 		SCTP_PRINTF("Gak log of NULL?\n");
270 		return;
271 	}
272 	sctp_clog.x.strlog.stcb = control->stcb;
273 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 	sctp_clog.x.strlog.strm = control->sinfo_stream;
276 	if (poschk != NULL) {
277 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 	} else {
280 		sctp_clog.x.strlog.e_tsn = 0;
281 		sctp_clog.x.strlog.e_sseq = 0;
282 	}
283 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 	    SCTP_LOG_EVENT_STRM,
285 	    from,
286 	    sctp_clog.x.misc.log1,
287 	    sctp_clog.x.misc.log2,
288 	    sctp_clog.x.misc.log3,
289 	    sctp_clog.x.misc.log4);
290 }
291 
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 	struct sctp_cwnd_log sctp_clog;
296 
297 	sctp_clog.x.cwnd.net = net;
298 	if (stcb->asoc.send_queue_cnt > 255)
299 		sctp_clog.x.cwnd.cnt_in_send = 255;
300 	else
301 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 	if (stcb->asoc.stream_queue_cnt > 255)
303 		sctp_clog.x.cwnd.cnt_in_str = 255;
304 	else
305 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306 
307 	if (net) {
308 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 		sctp_clog.x.cwnd.inflight = net->flight_size;
310 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 	}
314 	if (SCTP_CWNDLOG_PRESEND == from) {
315 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 	}
317 	sctp_clog.x.cwnd.cwnd_augment = augment;
318 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 	    SCTP_LOG_EVENT_CWND,
320 	    from,
321 	    sctp_clog.x.misc.log1,
322 	    sctp_clog.x.misc.log2,
323 	    sctp_clog.x.misc.log3,
324 	    sctp_clog.x.misc.log4);
325 }
326 
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 	struct sctp_cwnd_log sctp_clog;
331 
332 	memset(&sctp_clog, 0, sizeof(sctp_clog));
333 	if (inp) {
334 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335 
336 	} else {
337 		sctp_clog.x.lock.sock = (void *)NULL;
338 	}
339 	sctp_clog.x.lock.inp = (void *)inp;
340 	if (stcb) {
341 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 	} else {
343 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 	}
345 	if (inp) {
346 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 	} else {
349 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 	if (inp && (inp->sctp_socket)) {
354 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 	} else {
358 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 	}
362 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 	    SCTP_LOG_LOCK_EVENT,
364 	    from,
365 	    sctp_clog.x.misc.log1,
366 	    sctp_clog.x.misc.log2,
367 	    sctp_clog.x.misc.log3,
368 	    sctp_clog.x.misc.log4);
369 }
370 
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 	struct sctp_cwnd_log sctp_clog;
375 
376 	memset(&sctp_clog, 0, sizeof(sctp_clog));
377 	sctp_clog.x.cwnd.net = net;
378 	sctp_clog.x.cwnd.cwnd_new_value = error;
379 	sctp_clog.x.cwnd.inflight = net->flight_size;
380 	sctp_clog.x.cwnd.cwnd_augment = burst;
381 	if (stcb->asoc.send_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_send = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 	if (stcb->asoc.stream_queue_cnt > 255)
386 		sctp_clog.x.cwnd.cnt_in_str = 255;
387 	else
388 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_MAXBURST,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = snd_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = 0;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 	sctp_clog.x.rwnd.send_size = flight_size;
423 	sctp_clog.x.rwnd.overhead = overhead;
424 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_RWND,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 	struct sctp_cwnd_log sctp_clog;
439 
440 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 	sctp_clog.x.mbcnt.size_change = book;
442 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_EVENT_MBCNT,
446 	    from,
447 	    sctp_clog.x.misc.log1,
448 	    sctp_clog.x.misc.log2,
449 	    sctp_clog.x.misc.log3,
450 	    sctp_clog.x.misc.log4);
451 }
452 #endif
453 
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_MISC_EVENT,
459 	    from,
460 	    a, b, c, d);
461 }
462 
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 	struct sctp_cwnd_log sctp_clog;
467 
468 	sctp_clog.x.wake.stcb = (void *)stcb;
469 	sctp_clog.x.wake.wake_cnt = wake_cnt;
470 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473 
474 	if (stcb->asoc.stream_queue_cnt < 0xff)
475 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 	else
477 		sctp_clog.x.wake.stream_qcnt = 0xff;
478 
479 	if (stcb->asoc.chunks_on_out_queue < 0xff)
480 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 	else
482 		sctp_clog.x.wake.chunks_on_oque = 0xff;
483 
484 	sctp_clog.x.wake.sctpflags = 0;
485 	/* set in the defered mode stuff */
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 		sctp_clog.x.wake.sctpflags |= 1;
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 		sctp_clog.x.wake.sctpflags |= 2;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 		sctp_clog.x.wake.sctpflags |= 4;
492 	/* what about the sb */
493 	if (stcb->sctp_socket) {
494 		struct socket *so = stcb->sctp_socket;
495 
496 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 	} else {
498 		sctp_clog.x.wake.sbflags = 0xff;
499 	}
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	    SCTP_LOG_EVENT_WAKE,
502 	    from,
503 	    sctp_clog.x.misc.log1,
504 	    sctp_clog.x.misc.log2,
505 	    sctp_clog.x.misc.log3,
506 	    sctp_clog.x.misc.log4);
507 }
508 
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 	    SCTP_LOG_EVENT_BLOCK,
523 	    from,
524 	    sctp_clog.x.misc.log1,
525 	    sctp_clog.x.misc.log2,
526 	    sctp_clog.x.misc.log3,
527 	    sctp_clog.x.misc.log4);
528 }
529 
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 	/* May need to fix this if ktrdump does not work */
534 	return (0);
535 }
536 
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540 
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 	int i;
546 	int cnt;
547 
548 	cnt = 0;
549 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 		if ((sctp_audit_data[i][0] == 0xe0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if (sctp_audit_data[i][0] == 0xf0) {
555 			cnt = 0;
556 			SCTP_PRINTF("\n");
557 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558 		    (sctp_audit_data[i][1] == 0x01)) {
559 			SCTP_PRINTF("\n");
560 			cnt = 0;
561 		}
562 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 		    (uint32_t)sctp_audit_data[i][1]);
564 		cnt++;
565 		if ((cnt % 14) == 0)
566 			SCTP_PRINTF("\n");
567 	}
568 	for (i = 0; i < sctp_audit_indx; i++) {
569 		if ((sctp_audit_data[i][0] == 0xe0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if (sctp_audit_data[i][0] == 0xf0) {
574 			cnt = 0;
575 			SCTP_PRINTF("\n");
576 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577 		    (sctp_audit_data[i][1] == 0x01)) {
578 			SCTP_PRINTF("\n");
579 			cnt = 0;
580 		}
581 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 		    (uint32_t)sctp_audit_data[i][1]);
583 		cnt++;
584 		if ((cnt % 14) == 0)
585 			SCTP_PRINTF("\n");
586 	}
587 	SCTP_PRINTF("\n");
588 }
589 
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592     struct sctp_nets *net)
593 {
594 	int resend_cnt, tot_out, rep, tot_book_cnt;
595 	struct sctp_nets *lnet;
596 	struct sctp_tmit_chunk *chk;
597 
598 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 	sctp_audit_indx++;
601 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 		sctp_audit_indx = 0;
603 	}
604 	if (inp == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	if (stcb == NULL) {
614 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 		sctp_audit_indx++;
617 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 			sctp_audit_indx = 0;
619 		}
620 		return;
621 	}
622 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 	sctp_audit_data[sctp_audit_indx][1] =
624 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 	sctp_audit_indx++;
626 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 		sctp_audit_indx = 0;
628 	}
629 	rep = 0;
630 	tot_book_cnt = 0;
631 	resend_cnt = tot_out = 0;
632 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 			resend_cnt++;
635 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 			tot_out += chk->book_size;
637 			tot_book_cnt++;
638 		}
639 	}
640 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 		rep = 1;
650 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 		sctp_audit_data[sctp_audit_indx][1] =
653 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 	}
659 	if (tot_out != stcb->asoc.total_flight) {
660 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 		sctp_audit_indx++;
663 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 			sctp_audit_indx = 0;
665 		}
666 		rep = 1;
667 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 		    (int)stcb->asoc.total_flight);
669 		stcb->asoc.total_flight = tot_out;
670 	}
671 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 		sctp_audit_indx++;
675 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 			sctp_audit_indx = 0;
677 		}
678 		rep = 1;
679 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680 
681 		stcb->asoc.total_flight_count = tot_book_cnt;
682 	}
683 	tot_out = 0;
684 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 		tot_out += lnet->flight_size;
686 	}
687 	if (tot_out != stcb->asoc.total_flight) {
688 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 		sctp_audit_indx++;
691 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 			sctp_audit_indx = 0;
693 		}
694 		rep = 1;
695 		SCTP_PRINTF("real flight:%d net total was %d\n",
696 		    stcb->asoc.total_flight, tot_out);
697 		/* now corrective action */
698 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699 
700 			tot_out = 0;
701 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 				if ((chk->whoTo == lnet) &&
703 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 					tot_out += chk->book_size;
705 				}
706 			}
707 			if (lnet->flight_size != tot_out) {
708 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 				    (void *)lnet, lnet->flight_size,
710 				    tot_out);
711 				lnet->flight_size = tot_out;
712 			}
713 		}
714 	}
715 	if (rep) {
716 		sctp_print_audit_report();
717 	}
718 }
719 
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723 
724 	sctp_audit_data[sctp_audit_indx][0] = ev;
725 	sctp_audit_data[sctp_audit_indx][1] = fd;
726 	sctp_audit_indx++;
727 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 		sctp_audit_indx = 0;
729 	}
730 }
731 
732 #endif
733 
734 /*
735  * sctp_stop_timers_for_shutdown() should be called
736  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737  * state to make sure that all timers are stopped.
738  */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 	struct sctp_association *asoc;
743 	struct sctp_nets *net;
744 
745 	asoc = &stcb->asoc;
746 
747 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 	}
756 }
757 
758 /*
759  * a list of sizes based on typical mtu's, used only if next hop size not
760  * returned.
761  */
762 static uint32_t sctp_mtu_sizes[] = {
763 	68,
764 	296,
765 	508,
766 	512,
767 	544,
768 	576,
769 	1006,
770 	1492,
771 	1500,
772 	1536,
773 	2002,
774 	2048,
775 	4352,
776 	4464,
777 	8166,
778 	17914,
779 	32000,
780 	65535
781 };
782 
783 /*
784  * Return the largest MTU smaller than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 	uint32_t i;
791 
792 	if (val <= sctp_mtu_sizes[0]) {
793 		return (val);
794 	}
795 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val <= sctp_mtu_sizes[i]) {
797 			break;
798 		}
799 	}
800 	return (sctp_mtu_sizes[i - 1]);
801 }
802 
803 /*
804  * Return the smallest MTU larger than val. If there is no
805  * entry, just return val.
806  */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 	/* select another MTU that is just bigger than this one */
811 	uint32_t i;
812 
813 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 		if (val < sctp_mtu_sizes[i]) {
815 			return (sctp_mtu_sizes[i]);
816 		}
817 	}
818 	return (val);
819 }
820 
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 	/*
825 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 	 * our counter. The result becomes our good random numbers and we
827 	 * then setup to give these out. Note that we do no locking to
828 	 * protect this. This is ok, since if competing folks call this we
829 	 * will get more gobbled gook in the random store which is what we
830 	 * want. There is a danger that two guys will use the same random
831 	 * numbers, but thats ok too since that is random as well :->
832 	 */
833 	m->store_at = 0;
834 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837 	m->random_counter++;
838 }
839 
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 	/*
844 	 * A true implementation should use random selection process to get
845 	 * the initial stream sequence number, using RFC1750 as a good
846 	 * guideline
847 	 */
848 	uint32_t x, *xp;
849 	uint8_t *p;
850 	int store_at, new_store;
851 
852 	if (inp->initial_sequence_debug != 0) {
853 		uint32_t ret;
854 
855 		ret = inp->initial_sequence_debug;
856 		inp->initial_sequence_debug++;
857 		return (ret);
858 	}
859 retry:
860 	store_at = inp->store_at;
861 	new_store = store_at + sizeof(uint32_t);
862 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 		new_store = 0;
864 	}
865 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 		goto retry;
867 	}
868 	if (new_store == 0) {
869 		/* Refill the random store */
870 		sctp_fill_random_store(inp);
871 	}
872 	p = &inp->random_store[store_at];
873 	xp = (uint32_t *)p;
874 	x = *xp;
875 	return (x);
876 }
877 
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 	uint32_t x;
882 	struct timeval now;
883 
884 	if (check) {
885 		(void)SCTP_GETTIME_TIMEVAL(&now);
886 	}
887 	for (;;) {
888 		x = sctp_select_initial_TSN(&inp->sctp_ep);
889 		if (x == 0) {
890 			/* we never use 0 */
891 			continue;
892 		}
893 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 			break;
895 		}
896 	}
897 	return (x);
898 }
899 
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 	int32_t user_state;
904 
905 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 		user_state = SCTP_CLOSED;
907 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 		user_state = SCTP_SHUTDOWN_PENDING;
909 	} else {
910 		switch (kernel_state & SCTP_STATE_MASK) {
911 		case SCTP_STATE_EMPTY:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_INUSE:
915 			user_state = SCTP_CLOSED;
916 			break;
917 		case SCTP_STATE_COOKIE_WAIT:
918 			user_state = SCTP_COOKIE_WAIT;
919 			break;
920 		case SCTP_STATE_COOKIE_ECHOED:
921 			user_state = SCTP_COOKIE_ECHOED;
922 			break;
923 		case SCTP_STATE_OPEN:
924 			user_state = SCTP_ESTABLISHED;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_SENT:
927 			user_state = SCTP_SHUTDOWN_SENT;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_RECEIVED:
930 			user_state = SCTP_SHUTDOWN_RECEIVED;
931 			break;
932 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 			user_state = SCTP_SHUTDOWN_ACK_SENT;
934 			break;
935 		default:
936 			user_state = SCTP_CLOSED;
937 			break;
938 		}
939 	}
940 	return (user_state);
941 }
942 
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 	struct sctp_association *asoc;
948 
949 	/*
950 	 * Anything set to zero is taken care of by the allocation routine's
951 	 * bzero
952 	 */
953 
954 	/*
955 	 * Up front select what scoping to apply on addresses I tell my peer
956 	 * Not sure what to do with these right now, we will need to come up
957 	 * with a way to set them. We may need to pass them through from the
958 	 * caller in the sctp_aloc_assoc() function.
959 	 */
960 	int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 #endif
964 
965 	asoc = &stcb->asoc;
966 	/* init all variables to a known value. */
967 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 	asoc->max_burst = inp->sctp_ep.max_burst;
969 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 	asoc->ecn_supported = inp->ecn_supported;
974 	asoc->prsctp_supported = inp->prsctp_supported;
975 	asoc->idata_supported = inp->idata_supported;
976 	asoc->auth_supported = inp->auth_supported;
977 	asoc->asconf_supported = inp->asconf_supported;
978 	asoc->reconfig_supported = inp->reconfig_supported;
979 	asoc->nrsack_supported = inp->nrsack_supported;
980 	asoc->pktdrop_supported = inp->pktdrop_supported;
981 	asoc->idata_supported = inp->idata_supported;
982 	asoc->sctp_cmt_pf = (uint8_t)0;
983 	asoc->sctp_frag_point = inp->sctp_frag_point;
984 	asoc->sctp_features = inp->sctp_features;
985 	asoc->default_dscp = inp->sctp_ep.default_dscp;
986 	asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 	if (inp->sctp_ep.default_flowlabel) {
989 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 	} else {
991 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 			asoc->default_flowlabel &= 0x000fffff;
994 			asoc->default_flowlabel |= 0x80000000;
995 		} else {
996 			asoc->default_flowlabel = 0;
997 		}
998 	}
999 #endif
1000 	asoc->sb_send_resv = 0;
1001 	if (override_tag) {
1002 		asoc->my_vtag = override_tag;
1003 	} else {
1004 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 	}
1006 	/* Get the nonce tags */
1007 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->vrf_id = vrf_id;
1010 
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 	asoc->tsn_in_at = 0;
1013 	asoc->tsn_out_at = 0;
1014 	asoc->tsn_in_wrapped = 0;
1015 	asoc->tsn_out_wrapped = 0;
1016 	asoc->cumack_log_at = 0;
1017 	asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 	asoc->fs_index = 0;
1021 #endif
1022 	asoc->refcnt = 0;
1023 	asoc->assoc_up_sent = 0;
1024 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 	    sctp_select_initial_TSN(&inp->sctp_ep);
1026 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 	/* we are optimisitic here */
1028 	asoc->peer_supports_nat = 0;
1029 	asoc->sent_queue_retran_cnt = 0;
1030 
1031 	/* for CMT */
1032 	asoc->last_net_cmt_send_started = NULL;
1033 
1034 	/* This will need to be adjusted */
1035 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 	asoc->asconf_seq_in = asoc->last_acked_seq;
1038 
1039 	/* here we are different, we hold the next one we expect */
1040 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041 
1042 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044 
1045 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1046 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1047 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1048 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1049 	asoc->free_chunk_cnt = 0;
1050 
1051 	asoc->iam_blocking = 0;
1052 	asoc->context = inp->sctp_context;
1053 	asoc->local_strreset_support = inp->local_strreset_support;
1054 	asoc->def_send = inp->def_send;
1055 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1056 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1057 	asoc->pr_sctp_cnt = 0;
1058 	asoc->total_output_queue_size = 0;
1059 
1060 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1061 		asoc->scope.ipv6_addr_legal = 1;
1062 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1063 			asoc->scope.ipv4_addr_legal = 1;
1064 		} else {
1065 			asoc->scope.ipv4_addr_legal = 0;
1066 		}
1067 	} else {
1068 		asoc->scope.ipv6_addr_legal = 0;
1069 		asoc->scope.ipv4_addr_legal = 1;
1070 	}
1071 
1072 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1073 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1074 
1075 	asoc->smallest_mtu = inp->sctp_frag_point;
1076 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1077 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1078 
1079 	asoc->stream_locked_on = 0;
1080 	asoc->ecn_echo_cnt_onq = 0;
1081 	asoc->stream_locked = 0;
1082 
1083 	asoc->send_sack = 1;
1084 
1085 	LIST_INIT(&asoc->sctp_restricted_addrs);
1086 
1087 	TAILQ_INIT(&asoc->nets);
1088 	TAILQ_INIT(&asoc->pending_reply_queue);
1089 	TAILQ_INIT(&asoc->asconf_ack_sent);
1090 	/* Setup to fill the hb random cache at first HB */
1091 	asoc->hb_random_idx = 4;
1092 
1093 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1094 
1095 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1096 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1097 
1098 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1099 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    o_strms;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_mid_ordered = 0;
1125 		asoc->strmout[i].next_mid_unordered = 0;
1126 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127 		asoc->strmout[i].chunks_on_queues = 0;
1128 #if defined(SCTP_DETAILED_STR_STATS)
1129 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1130 			asoc->strmout[i].abandoned_sent[j] = 0;
1131 			asoc->strmout[i].abandoned_unsent[j] = 0;
1132 		}
1133 #else
1134 		asoc->strmout[i].abandoned_sent[0] = 0;
1135 		asoc->strmout[i].abandoned_unsent[0] = 0;
1136 #endif
1137 		asoc->strmout[i].sid = i;
1138 		asoc->strmout[i].last_msg_incomplete = 0;
1139 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1140 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1141 	}
1142 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1143 
1144 	/* Now the mapping array */
1145 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1146 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1147 	    SCTP_M_MAP);
1148 	if (asoc->mapping_array == NULL) {
1149 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1150 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1151 		return (ENOMEM);
1152 	}
1153 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1154 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1155 	    SCTP_M_MAP);
1156 	if (asoc->nr_mapping_array == NULL) {
1157 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1158 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1159 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1160 		return (ENOMEM);
1161 	}
1162 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1163 
1164 	/* Now the init of the other outqueues */
1165 	TAILQ_INIT(&asoc->free_chunks);
1166 	TAILQ_INIT(&asoc->control_send_queue);
1167 	TAILQ_INIT(&asoc->asconf_send_queue);
1168 	TAILQ_INIT(&asoc->send_queue);
1169 	TAILQ_INIT(&asoc->sent_queue);
1170 	TAILQ_INIT(&asoc->resetHead);
1171 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1172 	TAILQ_INIT(&asoc->asconf_queue);
1173 	/* authentication fields */
1174 	asoc->authinfo.random = NULL;
1175 	asoc->authinfo.active_keyid = 0;
1176 	asoc->authinfo.assoc_key = NULL;
1177 	asoc->authinfo.assoc_keyid = 0;
1178 	asoc->authinfo.recv_key = NULL;
1179 	asoc->authinfo.recv_keyid = 0;
1180 	LIST_INIT(&asoc->shared_keys);
1181 	asoc->marked_retrans = 0;
1182 	asoc->port = inp->sctp_ep.port;
1183 	asoc->timoinit = 0;
1184 	asoc->timodata = 0;
1185 	asoc->timosack = 0;
1186 	asoc->timoshutdown = 0;
1187 	asoc->timoheartbeat = 0;
1188 	asoc->timocookie = 0;
1189 	asoc->timoshutdownack = 0;
1190 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1191 	asoc->discontinuity_time = asoc->start_time;
1192 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1193 		asoc->abandoned_unsent[i] = 0;
1194 		asoc->abandoned_sent[i] = 0;
1195 	}
1196 	/*
1197 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1198 	 * freed later when the association is freed.
1199 	 */
1200 	return (0);
1201 }
1202 
1203 void
1204 sctp_print_mapping_array(struct sctp_association *asoc)
1205 {
1206 	unsigned int i, limit;
1207 
1208 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1209 	    asoc->mapping_array_size,
1210 	    asoc->mapping_array_base_tsn,
1211 	    asoc->cumulative_tsn,
1212 	    asoc->highest_tsn_inside_map,
1213 	    asoc->highest_tsn_inside_nr_map);
1214 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1215 		if (asoc->mapping_array[limit - 1] != 0) {
1216 			break;
1217 		}
1218 	}
1219 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1220 	for (i = 0; i < limit; i++) {
1221 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1222 	}
1223 	if (limit % 16)
1224 		SCTP_PRINTF("\n");
1225 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1226 		if (asoc->nr_mapping_array[limit - 1]) {
1227 			break;
1228 		}
1229 	}
1230 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1231 	for (i = 0; i < limit; i++) {
1232 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1233 	}
1234 	if (limit % 16)
1235 		SCTP_PRINTF("\n");
1236 }
1237 
1238 int
1239 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1240 {
1241 	/* mapping array needs to grow */
1242 	uint8_t *new_array1, *new_array2;
1243 	uint32_t new_size;
1244 
1245 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1246 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1247 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1248 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1249 		/* can't get more, forget it */
1250 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1251 		if (new_array1) {
1252 			SCTP_FREE(new_array1, SCTP_M_MAP);
1253 		}
1254 		if (new_array2) {
1255 			SCTP_FREE(new_array2, SCTP_M_MAP);
1256 		}
1257 		return (-1);
1258 	}
1259 	memset(new_array1, 0, new_size);
1260 	memset(new_array2, 0, new_size);
1261 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1262 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1263 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1264 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1265 	asoc->mapping_array = new_array1;
1266 	asoc->nr_mapping_array = new_array2;
1267 	asoc->mapping_array_size = new_size;
1268 	return (0);
1269 }
1270 
1271 
1272 static void
1273 sctp_iterator_work(struct sctp_iterator *it)
1274 {
1275 	int iteration_count = 0;
1276 	int inp_skip = 0;
1277 	int first_in = 1;
1278 	struct sctp_inpcb *tinp;
1279 
1280 	SCTP_INP_INFO_RLOCK();
1281 	SCTP_ITERATOR_LOCK();
1282 	sctp_it_ctl.cur_it = it;
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		sctp_it_ctl.cur_it = NULL;
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		CURVNET_RESTORE();
1437 		SCTP_IPI_ITERATOR_WQ_LOCK();
1438 		/* sa_ignore FREED_MEMORY */
1439 	}
1440 	sctp_it_ctl.iterator_running = 0;
1441 	return;
1442 }
1443 
1444 
1445 static void
1446 sctp_handle_addr_wq(void)
1447 {
1448 	/* deal with the ADDR wq from the rtsock calls */
1449 	struct sctp_laddr *wi, *nwi;
1450 	struct sctp_asconf_iterator *asc;
1451 
1452 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1453 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1454 	if (asc == NULL) {
1455 		/* Try later, no memory */
1456 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1457 		    (struct sctp_inpcb *)NULL,
1458 		    (struct sctp_tcb *)NULL,
1459 		    (struct sctp_nets *)NULL);
1460 		return;
1461 	}
1462 	LIST_INIT(&asc->list_of_work);
1463 	asc->cnt = 0;
1464 
1465 	SCTP_WQ_ADDR_LOCK();
1466 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1467 		LIST_REMOVE(wi, sctp_nxt_addr);
1468 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1469 		asc->cnt++;
1470 	}
1471 	SCTP_WQ_ADDR_UNLOCK();
1472 
1473 	if (asc->cnt == 0) {
1474 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1475 	} else {
1476 		int ret;
1477 
1478 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1479 		    sctp_asconf_iterator_stcb,
1480 		    NULL,	/* No ep end for boundall */
1481 		    SCTP_PCB_FLAGS_BOUNDALL,
1482 		    SCTP_PCB_ANY_FEATURES,
1483 		    SCTP_ASOC_ANY_STATE,
1484 		    (void *)asc, 0,
1485 		    sctp_asconf_iterator_end, NULL, 0);
1486 		if (ret) {
1487 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1488 			/*
1489 			 * Freeing if we are stopping or put back on the
1490 			 * addr_wq.
1491 			 */
1492 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1493 				sctp_asconf_iterator_end(asc, 0);
1494 			} else {
1495 				SCTP_WQ_ADDR_LOCK();
1496 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1497 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1498 				}
1499 				SCTP_WQ_ADDR_UNLOCK();
1500 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1501 			}
1502 		}
1503 	}
1504 }
1505 
1506 void
1507 sctp_timeout_handler(void *t)
1508 {
1509 	struct sctp_inpcb *inp;
1510 	struct sctp_tcb *stcb;
1511 	struct sctp_nets *net;
1512 	struct sctp_timer *tmr;
1513 	struct mbuf *op_err;
1514 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1515 	struct socket *so;
1516 #endif
1517 	int did_output;
1518 	int type;
1519 
1520 	tmr = (struct sctp_timer *)t;
1521 	inp = (struct sctp_inpcb *)tmr->ep;
1522 	stcb = (struct sctp_tcb *)tmr->tcb;
1523 	net = (struct sctp_nets *)tmr->net;
1524 	CURVNET_SET((struct vnet *)tmr->vnet);
1525 	did_output = 1;
1526 
1527 #ifdef SCTP_AUDITING_ENABLED
1528 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1529 	sctp_auditing(3, inp, stcb, net);
1530 #endif
1531 
1532 	/* sanity checks... */
1533 	if (tmr->self != (void *)tmr) {
1534 		/*
1535 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536 		 * (void *)tmr);
1537 		 */
1538 		CURVNET_RESTORE();
1539 		return;
1540 	}
1541 	tmr->stopped_from = 0xa001;
1542 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543 		/*
1544 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545 		 * tmr->type);
1546 		 */
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	tmr->stopped_from = 0xa002;
1551 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552 		CURVNET_RESTORE();
1553 		return;
1554 	}
1555 	/* if this is an iterator timeout, get the struct and clear inp */
1556 	tmr->stopped_from = 0xa003;
1557 	if (inp) {
1558 		SCTP_INP_INCR_REF(inp);
1559 		if ((inp->sctp_socket == NULL) &&
1560 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569 		    ) {
1570 			SCTP_INP_DECR_REF(inp);
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 			CURVNET_RESTORE();
1584 			return;
1585 		}
1586 	}
1587 	type = tmr->type;
1588 	tmr->stopped_from = 0xa005;
1589 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591 		if (inp) {
1592 			SCTP_INP_DECR_REF(inp);
1593 		}
1594 		if (stcb) {
1595 			atomic_add_int(&stcb->asoc.refcnt, -1);
1596 		}
1597 		CURVNET_RESTORE();
1598 		return;
1599 	}
1600 	tmr->stopped_from = 0xa006;
1601 
1602 	if (stcb) {
1603 		SCTP_TCB_LOCK(stcb);
1604 		atomic_add_int(&stcb->asoc.refcnt, -1);
1605 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606 		    ((stcb->asoc.state == 0) ||
1607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608 			SCTP_TCB_UNLOCK(stcb);
1609 			if (inp) {
1610 				SCTP_INP_DECR_REF(inp);
1611 			}
1612 			CURVNET_RESTORE();
1613 			return;
1614 		}
1615 	}
1616 	/* record in stopped what t-o occurred */
1617 	tmr->stopped_from = type;
1618 
1619 	/* mark as being serviced now */
1620 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621 		/*
1622 		 * Callout has been rescheduled.
1623 		 */
1624 		goto get_out;
1625 	}
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		/*
1628 		 * Not active, so no action.
1629 		 */
1630 		goto get_out;
1631 	}
1632 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633 
1634 	/* call the handler for the appropriate timer type */
1635 	switch (type) {
1636 	case SCTP_TIMER_TYPE_ZERO_COPY:
1637 		if (inp == NULL) {
1638 			break;
1639 		}
1640 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1641 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1645 		if (inp == NULL) {
1646 			break;
1647 		}
1648 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1649 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_ADDR_WQ:
1653 		sctp_handle_addr_wq();
1654 		break;
1655 	case SCTP_TIMER_TYPE_SEND:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		}
1659 		SCTP_STAT_INCR(sctps_timodata);
1660 		stcb->asoc.timodata++;
1661 		stcb->asoc.num_send_timers_up--;
1662 		if (stcb->asoc.num_send_timers_up < 0) {
1663 			stcb->asoc.num_send_timers_up = 0;
1664 		}
1665 		SCTP_TCB_LOCK_ASSERT(stcb);
1666 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1667 			/* no need to unlock on tcb its gone */
1668 
1669 			goto out_decr;
1670 		}
1671 		SCTP_TCB_LOCK_ASSERT(stcb);
1672 #ifdef SCTP_AUDITING_ENABLED
1673 		sctp_auditing(4, inp, stcb, net);
1674 #endif
1675 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1676 		if ((stcb->asoc.num_send_timers_up == 0) &&
1677 		    (stcb->asoc.sent_queue_cnt > 0)) {
1678 			struct sctp_tmit_chunk *chk;
1679 
1680 			/*
1681 			 * safeguard. If there on some on the sent queue
1682 			 * somewhere but no timers running something is
1683 			 * wrong... so we start a timer on the first chunk
1684 			 * on the send queue on whatever net it is sent to.
1685 			 */
1686 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1687 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1688 			    chk->whoTo);
1689 		}
1690 		break;
1691 	case SCTP_TIMER_TYPE_INIT:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timoinit);
1696 		stcb->asoc.timoinit++;
1697 		if (sctp_t1init_timer(inp, stcb, net)) {
1698 			/* no need to unlock on tcb its gone */
1699 			goto out_decr;
1700 		}
1701 		/* We do output but not here */
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_RECV:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timosack);
1709 		stcb->asoc.timosack++;
1710 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_SHUTDOWN:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		if (sctp_shutdown_timer(inp, stcb, net)) {
1721 			/* no need to unlock on tcb its gone */
1722 			goto out_decr;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timoshutdown);
1725 		stcb->asoc.timoshutdown++;
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1730 		break;
1731 	case SCTP_TIMER_TYPE_HEARTBEAT:
1732 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoheartbeat);
1736 		stcb->asoc.timoheartbeat++;
1737 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 #ifdef SCTP_AUDITING_ENABLED
1742 		sctp_auditing(4, inp, stcb, net);
1743 #endif
1744 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1745 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1746 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1747 		}
1748 		break;
1749 	case SCTP_TIMER_TYPE_COOKIE:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		if (sctp_cookie_timer(inp, stcb, net)) {
1754 			/* no need to unlock on tcb its gone */
1755 			goto out_decr;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timocookie);
1758 		stcb->asoc.timocookie++;
1759 #ifdef SCTP_AUDITING_ENABLED
1760 		sctp_auditing(4, inp, stcb, net);
1761 #endif
1762 		/*
1763 		 * We consider T3 and Cookie timer pretty much the same with
1764 		 * respect to where from in chunk_output.
1765 		 */
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1769 		{
1770 			struct timeval tv;
1771 			int i, secret;
1772 
1773 			if (inp == NULL) {
1774 				break;
1775 			}
1776 			SCTP_STAT_INCR(sctps_timosecret);
1777 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1778 			SCTP_INP_WLOCK(inp);
1779 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1780 			inp->sctp_ep.last_secret_number =
1781 			    inp->sctp_ep.current_secret_number;
1782 			inp->sctp_ep.current_secret_number++;
1783 			if (inp->sctp_ep.current_secret_number >=
1784 			    SCTP_HOW_MANY_SECRETS) {
1785 				inp->sctp_ep.current_secret_number = 0;
1786 			}
1787 			secret = (int)inp->sctp_ep.current_secret_number;
1788 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1789 				inp->sctp_ep.secret_key[secret][i] =
1790 				    sctp_select_initial_TSN(&inp->sctp_ep);
1791 			}
1792 			SCTP_INP_WUNLOCK(inp);
1793 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1794 		}
1795 		did_output = 0;
1796 		break;
1797 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timopathmtu);
1802 		sctp_pathmtu_timer(inp, stcb, net);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdownack);
1814 		stcb->asoc.timoshutdownack++;
1815 #ifdef SCTP_AUDITING_ENABLED
1816 		sctp_auditing(4, inp, stcb, net);
1817 #endif
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1821 		if ((stcb == NULL) || (inp == NULL)) {
1822 			break;
1823 		}
1824 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1825 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1826 		    "Shutdown guard timer expired");
1827 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828 		/* no need to unlock on tcb its gone */
1829 		goto out_decr;
1830 
1831 	case SCTP_TIMER_TYPE_STRRESET:
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		if (sctp_strreset_timer(inp, stcb, net)) {
1836 			/* no need to unlock on tcb its gone */
1837 			goto out_decr;
1838 		}
1839 		SCTP_STAT_INCR(sctps_timostrmrst);
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASCONF:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_asconf_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		sctp_delete_prim_timer(inp, stcb, net);
1861 		SCTP_STAT_INCR(sctps_timodelprim);
1862 		break;
1863 
1864 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 		if ((stcb == NULL) || (inp == NULL)) {
1866 			break;
1867 		}
1868 		SCTP_STAT_INCR(sctps_timoautoclose);
1869 		sctp_autoclose_timer(inp, stcb, net);
1870 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 		did_output = 0;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ASOCKILL:
1874 		if ((stcb == NULL) || (inp == NULL)) {
1875 			break;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timoassockill);
1878 		/* Can we free it yet? */
1879 		SCTP_INP_DECR_REF(inp);
1880 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1881 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1883 		so = SCTP_INP_SO(inp);
1884 		atomic_add_int(&stcb->asoc.refcnt, 1);
1885 		SCTP_TCB_UNLOCK(stcb);
1886 		SCTP_SOCKET_LOCK(so, 1);
1887 		SCTP_TCB_LOCK(stcb);
1888 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1889 #endif
1890 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1893 		SCTP_SOCKET_UNLOCK(so, 1);
1894 #endif
1895 		/*
1896 		 * free asoc, always unlocks (or destroy's) so prevent
1897 		 * duplicate unlock or unlock of a free mtx :-0
1898 		 */
1899 		stcb = NULL;
1900 		goto out_no_decr;
1901 	case SCTP_TIMER_TYPE_INPKILL:
1902 		SCTP_STAT_INCR(sctps_timoinpkill);
1903 		if (inp == NULL) {
1904 			break;
1905 		}
1906 		/*
1907 		 * special case, take away our increment since WE are the
1908 		 * killer
1909 		 */
1910 		SCTP_INP_DECR_REF(inp);
1911 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1913 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1914 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1915 		inp = NULL;
1916 		goto out_no_decr;
1917 	default:
1918 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1919 		    type);
1920 		break;
1921 	}
1922 #ifdef SCTP_AUDITING_ENABLED
1923 	sctp_audit_log(0xF1, (uint8_t)type);
1924 	if (inp)
1925 		sctp_auditing(5, inp, stcb, net);
1926 #endif
1927 	if ((did_output) && stcb) {
1928 		/*
1929 		 * Now we need to clean up the control chunk chain if an
1930 		 * ECNE is on it. It must be marked as UNSENT again so next
1931 		 * call will continue to send it until such time that we get
1932 		 * a CWR, to remove it. It is, however, less likely that we
1933 		 * will find a ecn echo on the chain though.
1934 		 */
1935 		sctp_fix_ecn_echo(&stcb->asoc);
1936 	}
1937 get_out:
1938 	if (stcb) {
1939 		SCTP_TCB_UNLOCK(stcb);
1940 	}
1941 out_decr:
1942 	if (inp) {
1943 		SCTP_INP_DECR_REF(inp);
1944 	}
1945 out_no_decr:
1946 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1947 	CURVNET_RESTORE();
1948 }
1949 
1950 void
1951 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1952     struct sctp_nets *net)
1953 {
1954 	uint32_t to_ticks;
1955 	struct sctp_timer *tmr;
1956 
1957 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1958 		return;
1959 
1960 	tmr = NULL;
1961 	if (stcb) {
1962 		SCTP_TCB_LOCK_ASSERT(stcb);
1963 	}
1964 	switch (t_type) {
1965 	case SCTP_TIMER_TYPE_ZERO_COPY:
1966 		tmr = &inp->sctp_ep.zero_copy_timer;
1967 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ADDR_WQ:
1974 		/* Only 1 tick away :-) */
1975 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 		break;
1978 	case SCTP_TIMER_TYPE_SEND:
1979 		/* Here we use the RTO timer */
1980 		{
1981 			int rto_val;
1982 
1983 			if ((stcb == NULL) || (net == NULL)) {
1984 				return;
1985 			}
1986 			tmr = &net->rxt_timer;
1987 			if (net->RTO == 0) {
1988 				rto_val = stcb->asoc.initial_rto;
1989 			} else {
1990 				rto_val = net->RTO;
1991 			}
1992 			to_ticks = MSEC_TO_TICKS(rto_val);
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_INIT:
1996 		/*
1997 		 * Here we use the INIT timer default usually about 1
1998 		 * minute.
1999 		 */
2000 		if ((stcb == NULL) || (net == NULL)) {
2001 			return;
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		break;
2010 	case SCTP_TIMER_TYPE_RECV:
2011 		/*
2012 		 * Here we use the Delayed-Ack timer value from the inp
2013 		 * ususually about 200ms.
2014 		 */
2015 		if (stcb == NULL) {
2016 			return;
2017 		}
2018 		tmr = &stcb->asoc.dack_timer;
2019 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 		break;
2021 	case SCTP_TIMER_TYPE_SHUTDOWN:
2022 		/* Here we use the RTO of the destination. */
2023 		if ((stcb == NULL) || (net == NULL)) {
2024 			return;
2025 		}
2026 		if (net->RTO == 0) {
2027 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 		} else {
2029 			to_ticks = MSEC_TO_TICKS(net->RTO);
2030 		}
2031 		tmr = &net->rxt_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_HEARTBEAT:
2034 		/*
2035 		 * the net is used here so that we can add in the RTO. Even
2036 		 * though we use a different timer. We also add the HB timer
2037 		 * PLUS a random jitter.
2038 		 */
2039 		if ((stcb == NULL) || (net == NULL)) {
2040 			return;
2041 		} else {
2042 			uint32_t rndval;
2043 			uint32_t jitter;
2044 
2045 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2046 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2047 				return;
2048 			}
2049 			if (net->RTO == 0) {
2050 				to_ticks = stcb->asoc.initial_rto;
2051 			} else {
2052 				to_ticks = net->RTO;
2053 			}
2054 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 			jitter = rndval % to_ticks;
2056 			if (jitter >= (to_ticks >> 1)) {
2057 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2058 			} else {
2059 				to_ticks = to_ticks - jitter;
2060 			}
2061 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2062 			    !(net->dest_state & SCTP_ADDR_PF)) {
2063 				to_ticks += net->heart_beat_delay;
2064 			}
2065 			/*
2066 			 * Now we must convert the to_ticks that are now in
2067 			 * ms to ticks.
2068 			 */
2069 			to_ticks = MSEC_TO_TICKS(to_ticks);
2070 			tmr = &net->hb_timer;
2071 		}
2072 		break;
2073 	case SCTP_TIMER_TYPE_COOKIE:
2074 		/*
2075 		 * Here we can use the RTO timer from the network since one
2076 		 * RTT was compelete. If a retran happened then we will be
2077 		 * using the RTO initial value.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &net->rxt_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2090 		/*
2091 		 * nothing needed but the endpoint here ususually about 60
2092 		 * minutes.
2093 		 */
2094 		tmr = &inp->sctp_ep.signature_change;
2095 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2096 		break;
2097 	case SCTP_TIMER_TYPE_ASOCKILL:
2098 		if (stcb == NULL) {
2099 			return;
2100 		}
2101 		tmr = &stcb->asoc.strreset_timer;
2102 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2103 		break;
2104 	case SCTP_TIMER_TYPE_INPKILL:
2105 		/*
2106 		 * The inp is setup to die. We re-use the signature_chage
2107 		 * timer since that has stopped and we are in the GONE
2108 		 * state.
2109 		 */
2110 		tmr = &inp->sctp_ep.signature_change;
2111 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2112 		break;
2113 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2114 		/*
2115 		 * Here we use the value found in the EP for PMTU ususually
2116 		 * about 10 minutes.
2117 		 */
2118 		if ((stcb == NULL) || (net == NULL)) {
2119 			return;
2120 		}
2121 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2122 			return;
2123 		}
2124 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2125 		tmr = &net->pmtu_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2128 		/* Here we use the RTO of the destination */
2129 		if ((stcb == NULL) || (net == NULL)) {
2130 			return;
2131 		}
2132 		if (net->RTO == 0) {
2133 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2134 		} else {
2135 			to_ticks = MSEC_TO_TICKS(net->RTO);
2136 		}
2137 		tmr = &net->rxt_timer;
2138 		break;
2139 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2140 		/*
2141 		 * Here we use the endpoints shutdown guard timer usually
2142 		 * about 3 minutes.
2143 		 */
2144 		if (stcb == NULL) {
2145 			return;
2146 		}
2147 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2148 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2149 		} else {
2150 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2151 		}
2152 		tmr = &stcb->asoc.shut_guard_timer;
2153 		break;
2154 	case SCTP_TIMER_TYPE_STRRESET:
2155 		/*
2156 		 * Here the timer comes from the stcb but its value is from
2157 		 * the net's RTO.
2158 		 */
2159 		if ((stcb == NULL) || (net == NULL)) {
2160 			return;
2161 		}
2162 		if (net->RTO == 0) {
2163 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2164 		} else {
2165 			to_ticks = MSEC_TO_TICKS(net->RTO);
2166 		}
2167 		tmr = &stcb->asoc.strreset_timer;
2168 		break;
2169 	case SCTP_TIMER_TYPE_ASCONF:
2170 		/*
2171 		 * Here the timer comes from the stcb but its value is from
2172 		 * the net's RTO.
2173 		 */
2174 		if ((stcb == NULL) || (net == NULL)) {
2175 			return;
2176 		}
2177 		if (net->RTO == 0) {
2178 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2179 		} else {
2180 			to_ticks = MSEC_TO_TICKS(net->RTO);
2181 		}
2182 		tmr = &stcb->asoc.asconf_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2185 		if ((stcb == NULL) || (net != NULL)) {
2186 			return;
2187 		}
2188 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189 		tmr = &stcb->asoc.delete_prim_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2192 		if (stcb == NULL) {
2193 			return;
2194 		}
2195 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2196 			/*
2197 			 * Really an error since stcb is NOT set to
2198 			 * autoclose
2199 			 */
2200 			return;
2201 		}
2202 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2203 		tmr = &stcb->asoc.autoclose_timer;
2204 		break;
2205 	default:
2206 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2207 		    __func__, t_type);
2208 		return;
2209 		break;
2210 	}
2211 	if ((to_ticks <= 0) || (tmr == NULL)) {
2212 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2213 		    __func__, t_type, to_ticks, (void *)tmr);
2214 		return;
2215 	}
2216 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2217 		/*
2218 		 * we do NOT allow you to have it already running. if it is
2219 		 * we leave the current one up unchanged
2220 		 */
2221 		return;
2222 	}
2223 	/* At this point we can proceed */
2224 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2225 		stcb->asoc.num_send_timers_up++;
2226 	}
2227 	tmr->stopped_from = 0;
2228 	tmr->type = t_type;
2229 	tmr->ep = (void *)inp;
2230 	tmr->tcb = (void *)stcb;
2231 	tmr->net = (void *)net;
2232 	tmr->self = (void *)tmr;
2233 	tmr->vnet = (void *)curvnet;
2234 	tmr->ticks = sctp_get_tick_count();
2235 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236 	return;
2237 }
2238 
2239 void
2240 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241     struct sctp_nets *net, uint32_t from)
2242 {
2243 	struct sctp_timer *tmr;
2244 
2245 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246 	    (inp == NULL))
2247 		return;
2248 
2249 	tmr = NULL;
2250 	if (stcb) {
2251 		SCTP_TCB_LOCK_ASSERT(stcb);
2252 	}
2253 	switch (t_type) {
2254 	case SCTP_TIMER_TYPE_ZERO_COPY:
2255 		tmr = &inp->sctp_ep.zero_copy_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ADDR_WQ:
2261 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262 		break;
2263 	case SCTP_TIMER_TYPE_SEND:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_INIT:
2270 		if ((stcb == NULL) || (net == NULL)) {
2271 			return;
2272 		}
2273 		tmr = &net->rxt_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_RECV:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.dack_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_SHUTDOWN:
2282 		if ((stcb == NULL) || (net == NULL)) {
2283 			return;
2284 		}
2285 		tmr = &net->rxt_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_HEARTBEAT:
2288 		if ((stcb == NULL) || (net == NULL)) {
2289 			return;
2290 		}
2291 		tmr = &net->hb_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_COOKIE:
2294 		if ((stcb == NULL) || (net == NULL)) {
2295 			return;
2296 		}
2297 		tmr = &net->rxt_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2300 		/* nothing needed but the endpoint here */
2301 		tmr = &inp->sctp_ep.signature_change;
2302 		/*
2303 		 * We re-use the newcookie timer for the INP kill timer. We
2304 		 * must assure that we do not kill it by accident.
2305 		 */
2306 		break;
2307 	case SCTP_TIMER_TYPE_ASOCKILL:
2308 		/*
2309 		 * Stop the asoc kill timer.
2310 		 */
2311 		if (stcb == NULL) {
2312 			return;
2313 		}
2314 		tmr = &stcb->asoc.strreset_timer;
2315 		break;
2316 
2317 	case SCTP_TIMER_TYPE_INPKILL:
2318 		/*
2319 		 * The inp is setup to die. We re-use the signature_chage
2320 		 * timer since that has stopped and we are in the GONE
2321 		 * state.
2322 		 */
2323 		tmr = &inp->sctp_ep.signature_change;
2324 		break;
2325 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2326 		if ((stcb == NULL) || (net == NULL)) {
2327 			return;
2328 		}
2329 		tmr = &net->pmtu_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2332 		if ((stcb == NULL) || (net == NULL)) {
2333 			return;
2334 		}
2335 		tmr = &net->rxt_timer;
2336 		break;
2337 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.shut_guard_timer;
2342 		break;
2343 	case SCTP_TIMER_TYPE_STRRESET:
2344 		if (stcb == NULL) {
2345 			return;
2346 		}
2347 		tmr = &stcb->asoc.strreset_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_ASCONF:
2350 		if (stcb == NULL) {
2351 			return;
2352 		}
2353 		tmr = &stcb->asoc.asconf_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2356 		if (stcb == NULL) {
2357 			return;
2358 		}
2359 		tmr = &stcb->asoc.delete_prim_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.autoclose_timer;
2366 		break;
2367 	default:
2368 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2369 		    __func__, t_type);
2370 		break;
2371 	}
2372 	if (tmr == NULL) {
2373 		return;
2374 	}
2375 	if ((tmr->type != t_type) && tmr->type) {
2376 		/*
2377 		 * Ok we have a timer that is under joint use. Cookie timer
2378 		 * per chance with the SEND timer. We therefore are NOT
2379 		 * running the timer that the caller wants stopped.  So just
2380 		 * return.
2381 		 */
2382 		return;
2383 	}
2384 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2385 		stcb->asoc.num_send_timers_up--;
2386 		if (stcb->asoc.num_send_timers_up < 0) {
2387 			stcb->asoc.num_send_timers_up = 0;
2388 		}
2389 	}
2390 	tmr->self = NULL;
2391 	tmr->stopped_from = from;
2392 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2393 	return;
2394 }
2395 
2396 uint32_t
2397 sctp_calculate_len(struct mbuf *m)
2398 {
2399 	uint32_t tlen = 0;
2400 	struct mbuf *at;
2401 
2402 	at = m;
2403 	while (at) {
2404 		tlen += SCTP_BUF_LEN(at);
2405 		at = SCTP_BUF_NEXT(at);
2406 	}
2407 	return (tlen);
2408 }
2409 
2410 void
2411 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2412     struct sctp_association *asoc, uint32_t mtu)
2413 {
2414 	/*
2415 	 * Reset the P-MTU size on this association, this involves changing
2416 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2417 	 * allow the DF flag to be cleared.
2418 	 */
2419 	struct sctp_tmit_chunk *chk;
2420 	unsigned int eff_mtu, ovh;
2421 
2422 	asoc->smallest_mtu = mtu;
2423 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2424 		ovh = SCTP_MIN_OVERHEAD;
2425 	} else {
2426 		ovh = SCTP_MIN_V4_OVERHEAD;
2427 	}
2428 	eff_mtu = mtu - ovh;
2429 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2430 		if (chk->send_size > eff_mtu) {
2431 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432 		}
2433 	}
2434 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2435 		if (chk->send_size > eff_mtu) {
2436 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2437 		}
2438 	}
2439 }
2440 
2441 
2442 /*
2443  * given an association and starting time of the current RTT period return
2444  * RTO in number of msecs net should point to the current network
2445  */
2446 
2447 uint32_t
2448 sctp_calculate_rto(struct sctp_tcb *stcb,
2449     struct sctp_association *asoc,
2450     struct sctp_nets *net,
2451     struct timeval *told,
2452     int safe, int rtt_from_sack)
2453 {
2454 	/*-
2455 	 * given an association and the starting time of the current RTT
2456 	 * period (in value1/value2) return RTO in number of msecs.
2457 	 */
2458 	int32_t rtt;		/* RTT in ms */
2459 	uint32_t new_rto;
2460 	int first_measure = 0;
2461 	struct timeval now, then, *old;
2462 
2463 	/* Copy it out for sparc64 */
2464 	if (safe == sctp_align_unsafe_makecopy) {
2465 		old = &then;
2466 		memcpy(&then, told, sizeof(struct timeval));
2467 	} else if (safe == sctp_align_safe_nocopy) {
2468 		old = told;
2469 	} else {
2470 		/* error */
2471 		SCTP_PRINTF("Huh, bad rto calc call\n");
2472 		return (0);
2473 	}
2474 	/************************/
2475 	/* 1. calculate new RTT */
2476 	/************************/
2477 	/* get the current time */
2478 	if (stcb->asoc.use_precise_time) {
2479 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2480 	} else {
2481 		(void)SCTP_GETTIME_TIMEVAL(&now);
2482 	}
2483 	timevalsub(&now, old);
2484 	/* store the current RTT in us */
2485 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2486 	        (uint64_t)now.tv_usec;
2487 
2488 	/* compute rtt in ms */
2489 	rtt = (int32_t)(net->rtt / 1000);
2490 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2491 		/*
2492 		 * Tell the CC module that a new update has just occurred
2493 		 * from a sack
2494 		 */
2495 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2496 	}
2497 	/*
2498 	 * Do we need to determine the lan? We do this only on sacks i.e.
2499 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2500 	 */
2501 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2502 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2503 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2504 			net->lan_type = SCTP_LAN_INTERNET;
2505 		} else {
2506 			net->lan_type = SCTP_LAN_LOCAL;
2507 		}
2508 	}
2509 	/***************************/
2510 	/* 2. update RTTVAR & SRTT */
2511 	/***************************/
2512 	/*-
2513 	 * Compute the scaled average lastsa and the
2514 	 * scaled variance lastsv as described in van Jacobson
2515 	 * Paper "Congestion Avoidance and Control", Annex A.
2516 	 *
2517 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2518 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2519 	 */
2520 	if (net->RTO_measured) {
2521 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2522 		net->lastsa += rtt;
2523 		if (rtt < 0) {
2524 			rtt = -rtt;
2525 		}
2526 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2527 		net->lastsv += rtt;
2528 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2529 			rto_logging(net, SCTP_LOG_RTTVAR);
2530 		}
2531 	} else {
2532 		/* First RTO measurment */
2533 		net->RTO_measured = 1;
2534 		first_measure = 1;
2535 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2536 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2538 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2539 		}
2540 	}
2541 	if (net->lastsv == 0) {
2542 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2543 	}
2544 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2545 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2546 	    (stcb->asoc.sat_network_lockout == 0)) {
2547 		stcb->asoc.sat_network = 1;
2548 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2549 		stcb->asoc.sat_network = 0;
2550 		stcb->asoc.sat_network_lockout = 1;
2551 	}
2552 	/* bound it, per C6/C7 in Section 5.3.1 */
2553 	if (new_rto < stcb->asoc.minrto) {
2554 		new_rto = stcb->asoc.minrto;
2555 	}
2556 	if (new_rto > stcb->asoc.maxrto) {
2557 		new_rto = stcb->asoc.maxrto;
2558 	}
2559 	/* we are now returning the RTO */
2560 	return (new_rto);
2561 }
2562 
2563 /*
2564  * return a pointer to a contiguous piece of data from the given mbuf chain
2565  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2566  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2567  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2568  */
2569 caddr_t
2570 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2571 {
2572 	uint32_t count;
2573 	uint8_t *ptr;
2574 
2575 	ptr = in_ptr;
2576 	if ((off < 0) || (len <= 0))
2577 		return (NULL);
2578 
2579 	/* find the desired start location */
2580 	while ((m != NULL) && (off > 0)) {
2581 		if (off < SCTP_BUF_LEN(m))
2582 			break;
2583 		off -= SCTP_BUF_LEN(m);
2584 		m = SCTP_BUF_NEXT(m);
2585 	}
2586 	if (m == NULL)
2587 		return (NULL);
2588 
2589 	/* is the current mbuf large enough (eg. contiguous)? */
2590 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2591 		return (mtod(m, caddr_t)+off);
2592 	} else {
2593 		/* else, it spans more than one mbuf, so save a temp copy... */
2594 		while ((m != NULL) && (len > 0)) {
2595 			count = min(SCTP_BUF_LEN(m) - off, len);
2596 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2597 			len -= count;
2598 			ptr += count;
2599 			off = 0;
2600 			m = SCTP_BUF_NEXT(m);
2601 		}
2602 		if ((m == NULL) && (len > 0))
2603 			return (NULL);
2604 		else
2605 			return ((caddr_t)in_ptr);
2606 	}
2607 }
2608 
2609 
2610 
2611 struct sctp_paramhdr *
2612 sctp_get_next_param(struct mbuf *m,
2613     int offset,
2614     struct sctp_paramhdr *pull,
2615     int pull_limit)
2616 {
2617 	/* This just provides a typed signature to Peter's Pull routine */
2618 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2619 	    (uint8_t *)pull));
2620 }
2621 
2622 
2623 struct mbuf *
2624 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2625 {
2626 	struct mbuf *m_last;
2627 	caddr_t dp;
2628 
2629 	if (padlen > 3) {
2630 		return (NULL);
2631 	}
2632 	if (padlen <= M_TRAILINGSPACE(m)) {
2633 		/*
2634 		 * The easy way. We hope the majority of the time we hit
2635 		 * here :)
2636 		 */
2637 		m_last = m;
2638 	} else {
2639 		/* Hard way we must grow the mbuf chain */
2640 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2641 		if (m_last == NULL) {
2642 			return (NULL);
2643 		}
2644 		SCTP_BUF_LEN(m_last) = 0;
2645 		SCTP_BUF_NEXT(m_last) = NULL;
2646 		SCTP_BUF_NEXT(m) = m_last;
2647 	}
2648 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2649 	SCTP_BUF_LEN(m_last) += padlen;
2650 	memset(dp, 0, padlen);
2651 	return (m_last);
2652 }
2653 
2654 struct mbuf *
2655 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2656 {
2657 	/* find the last mbuf in chain and pad it */
2658 	struct mbuf *m_at;
2659 
2660 	if (last_mbuf != NULL) {
2661 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2662 	} else {
2663 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2664 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2665 				return (sctp_add_pad_tombuf(m_at, padval));
2666 			}
2667 		}
2668 	}
2669 	return (NULL);
2670 }
2671 
2672 static void
2673 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2674     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2675 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2676     SCTP_UNUSED
2677 #endif
2678 )
2679 {
2680 	struct mbuf *m_notify;
2681 	struct sctp_assoc_change *sac;
2682 	struct sctp_queued_to_read *control;
2683 	unsigned int notif_len;
2684 	uint16_t abort_len;
2685 	unsigned int i;
2686 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2687 	struct socket *so;
2688 #endif
2689 
2690 	if (stcb == NULL) {
2691 		return;
2692 	}
2693 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2694 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2695 		if (abort != NULL) {
2696 			abort_len = ntohs(abort->ch.chunk_length);
2697 		} else {
2698 			abort_len = 0;
2699 		}
2700 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2701 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2702 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2703 			notif_len += abort_len;
2704 		}
2705 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2706 		if (m_notify == NULL) {
2707 			/* Retry with smaller value. */
2708 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2709 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2710 			if (m_notify == NULL) {
2711 				goto set_error;
2712 			}
2713 		}
2714 		SCTP_BUF_NEXT(m_notify) = NULL;
2715 		sac = mtod(m_notify, struct sctp_assoc_change *);
2716 		memset(sac, 0, notif_len);
2717 		sac->sac_type = SCTP_ASSOC_CHANGE;
2718 		sac->sac_flags = 0;
2719 		sac->sac_length = sizeof(struct sctp_assoc_change);
2720 		sac->sac_state = state;
2721 		sac->sac_error = error;
2722 		/* XXX verify these stream counts */
2723 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2724 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2725 		sac->sac_assoc_id = sctp_get_associd(stcb);
2726 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2727 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2728 				i = 0;
2729 				if (stcb->asoc.prsctp_supported == 1) {
2730 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2731 				}
2732 				if (stcb->asoc.auth_supported == 1) {
2733 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2734 				}
2735 				if (stcb->asoc.asconf_supported == 1) {
2736 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2737 				}
2738 				if (stcb->asoc.idata_supported == 1) {
2739 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2740 				}
2741 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2742 				if (stcb->asoc.reconfig_supported == 1) {
2743 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2744 				}
2745 				sac->sac_length += i;
2746 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2747 				memcpy(sac->sac_info, abort, abort_len);
2748 				sac->sac_length += abort_len;
2749 			}
2750 		}
2751 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2752 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2753 		    0, 0, stcb->asoc.context, 0, 0, 0,
2754 		    m_notify);
2755 		if (control != NULL) {
2756 			control->length = SCTP_BUF_LEN(m_notify);
2757 			/* not that we need this */
2758 			control->tail_mbuf = m_notify;
2759 			control->spec_flags = M_NOTIFICATION;
2760 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2761 			    control,
2762 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2763 			    so_locked);
2764 		} else {
2765 			sctp_m_freem(m_notify);
2766 		}
2767 	}
2768 	/*
2769 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2770 	 * comes in.
2771 	 */
2772 set_error:
2773 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2774 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2775 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2776 		SOCK_LOCK(stcb->sctp_socket);
2777 		if (from_peer) {
2778 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2779 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2780 				stcb->sctp_socket->so_error = ECONNREFUSED;
2781 			} else {
2782 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2783 				stcb->sctp_socket->so_error = ECONNRESET;
2784 			}
2785 		} else {
2786 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2787 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2788 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2789 				stcb->sctp_socket->so_error = ETIMEDOUT;
2790 			} else {
2791 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2792 				stcb->sctp_socket->so_error = ECONNABORTED;
2793 			}
2794 		}
2795 	}
2796 	/* Wake ANY sleepers */
2797 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2798 	so = SCTP_INP_SO(stcb->sctp_ep);
2799 	if (!so_locked) {
2800 		atomic_add_int(&stcb->asoc.refcnt, 1);
2801 		SCTP_TCB_UNLOCK(stcb);
2802 		SCTP_SOCKET_LOCK(so, 1);
2803 		SCTP_TCB_LOCK(stcb);
2804 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2805 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2806 			SCTP_SOCKET_UNLOCK(so, 1);
2807 			return;
2808 		}
2809 	}
2810 #endif
2811 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2812 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2813 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2814 		socantrcvmore_locked(stcb->sctp_socket);
2815 	}
2816 	sorwakeup(stcb->sctp_socket);
2817 	sowwakeup(stcb->sctp_socket);
2818 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2819 	if (!so_locked) {
2820 		SCTP_SOCKET_UNLOCK(so, 1);
2821 	}
2822 #endif
2823 }
2824 
2825 static void
2826 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2827     struct sockaddr *sa, uint32_t error, int so_locked
2828 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2829     SCTP_UNUSED
2830 #endif
2831 )
2832 {
2833 	struct mbuf *m_notify;
2834 	struct sctp_paddr_change *spc;
2835 	struct sctp_queued_to_read *control;
2836 
2837 	if ((stcb == NULL) ||
2838 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2839 		/* event not enabled */
2840 		return;
2841 	}
2842 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2843 	if (m_notify == NULL)
2844 		return;
2845 	SCTP_BUF_LEN(m_notify) = 0;
2846 	spc = mtod(m_notify, struct sctp_paddr_change *);
2847 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2848 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2849 	spc->spc_flags = 0;
2850 	spc->spc_length = sizeof(struct sctp_paddr_change);
2851 	switch (sa->sa_family) {
2852 #ifdef INET
2853 	case AF_INET:
2854 #ifdef INET6
2855 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2856 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2857 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2858 		} else {
2859 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2860 		}
2861 #else
2862 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2863 #endif
2864 		break;
2865 #endif
2866 #ifdef INET6
2867 	case AF_INET6:
2868 		{
2869 			struct sockaddr_in6 *sin6;
2870 
2871 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2872 
2873 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2874 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2875 				if (sin6->sin6_scope_id == 0) {
2876 					/* recover scope_id for user */
2877 					(void)sa6_recoverscope(sin6);
2878 				} else {
2879 					/* clear embedded scope_id for user */
2880 					in6_clearscope(&sin6->sin6_addr);
2881 				}
2882 			}
2883 			break;
2884 		}
2885 #endif
2886 	default:
2887 		/* TSNH */
2888 		break;
2889 	}
2890 	spc->spc_state = state;
2891 	spc->spc_error = error;
2892 	spc->spc_assoc_id = sctp_get_associd(stcb);
2893 
2894 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2895 	SCTP_BUF_NEXT(m_notify) = NULL;
2896 
2897 	/* append to socket */
2898 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2899 	    0, 0, stcb->asoc.context, 0, 0, 0,
2900 	    m_notify);
2901 	if (control == NULL) {
2902 		/* no memory */
2903 		sctp_m_freem(m_notify);
2904 		return;
2905 	}
2906 	control->length = SCTP_BUF_LEN(m_notify);
2907 	control->spec_flags = M_NOTIFICATION;
2908 	/* not that we need this */
2909 	control->tail_mbuf = m_notify;
2910 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2911 	    control,
2912 	    &stcb->sctp_socket->so_rcv, 1,
2913 	    SCTP_READ_LOCK_NOT_HELD,
2914 	    so_locked);
2915 }
2916 
2917 
2918 static void
2919 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2920     struct sctp_tmit_chunk *chk, int so_locked
2921 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2922     SCTP_UNUSED
2923 #endif
2924 )
2925 {
2926 	struct mbuf *m_notify;
2927 	struct sctp_send_failed *ssf;
2928 	struct sctp_send_failed_event *ssfe;
2929 	struct sctp_queued_to_read *control;
2930 	struct sctp_chunkhdr *chkhdr;
2931 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2932 
2933 	if ((stcb == NULL) ||
2934 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2935 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2936 		/* event not enabled */
2937 		return;
2938 	}
2939 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2940 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2941 	} else {
2942 		notifhdr_len = sizeof(struct sctp_send_failed);
2943 	}
2944 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2945 	if (m_notify == NULL)
2946 		/* no space left */
2947 		return;
2948 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2949 	if (stcb->asoc.idata_supported) {
2950 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2951 	} else {
2952 		chkhdr_len = sizeof(struct sctp_data_chunk);
2953 	}
2954 	/* Use some defaults in case we can't access the chunk header */
2955 	if (chk->send_size >= chkhdr_len) {
2956 		payload_len = chk->send_size - chkhdr_len;
2957 	} else {
2958 		payload_len = 0;
2959 	}
2960 	padding_len = 0;
2961 	if (chk->data != NULL) {
2962 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2963 		if (chkhdr != NULL) {
2964 			chk_len = ntohs(chkhdr->chunk_length);
2965 			if ((chk_len >= chkhdr_len) &&
2966 			    (chk->send_size >= chk_len) &&
2967 			    (chk->send_size - chk_len < 4)) {
2968 				padding_len = chk->send_size - chk_len;
2969 				payload_len = chk->send_size - chkhdr_len - padding_len;
2970 			}
2971 		}
2972 	}
2973 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2974 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2975 		memset(ssfe, 0, notifhdr_len);
2976 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2977 		if (sent) {
2978 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2979 		} else {
2980 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2981 		}
2982 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2983 		ssfe->ssfe_error = error;
2984 		/* not exactly what the user sent in, but should be close :) */
2985 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2986 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2987 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2988 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2989 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2990 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2991 	} else {
2992 		ssf = mtod(m_notify, struct sctp_send_failed *);
2993 		memset(ssf, 0, notifhdr_len);
2994 		ssf->ssf_type = SCTP_SEND_FAILED;
2995 		if (sent) {
2996 			ssf->ssf_flags = SCTP_DATA_SENT;
2997 		} else {
2998 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2999 		}
3000 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3001 		ssf->ssf_error = error;
3002 		/* not exactly what the user sent in, but should be close :) */
3003 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3004 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3005 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3006 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3007 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3008 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3009 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3010 	}
3011 	if (chk->data != NULL) {
3012 		/* Trim off the sctp chunk header (it should be there) */
3013 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3014 			m_adj(chk->data, chkhdr_len);
3015 			m_adj(chk->data, -padding_len);
3016 			sctp_mbuf_crush(chk->data);
3017 			chk->send_size -= (chkhdr_len + padding_len);
3018 		}
3019 	}
3020 	SCTP_BUF_NEXT(m_notify) = chk->data;
3021 	/* Steal off the mbuf */
3022 	chk->data = NULL;
3023 	/*
3024 	 * For this case, we check the actual socket buffer, since the assoc
3025 	 * is going away we don't want to overfill the socket buffer for a
3026 	 * non-reader
3027 	 */
3028 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3029 		sctp_m_freem(m_notify);
3030 		return;
3031 	}
3032 	/* append to socket */
3033 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3034 	    0, 0, stcb->asoc.context, 0, 0, 0,
3035 	    m_notify);
3036 	if (control == NULL) {
3037 		/* no memory */
3038 		sctp_m_freem(m_notify);
3039 		return;
3040 	}
3041 	control->spec_flags = M_NOTIFICATION;
3042 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3043 	    control,
3044 	    &stcb->sctp_socket->so_rcv, 1,
3045 	    SCTP_READ_LOCK_NOT_HELD,
3046 	    so_locked);
3047 }
3048 
3049 
3050 static void
3051 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3052     struct sctp_stream_queue_pending *sp, int so_locked
3053 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3054     SCTP_UNUSED
3055 #endif
3056 )
3057 {
3058 	struct mbuf *m_notify;
3059 	struct sctp_send_failed *ssf;
3060 	struct sctp_send_failed_event *ssfe;
3061 	struct sctp_queued_to_read *control;
3062 	int notifhdr_len;
3063 
3064 	if ((stcb == NULL) ||
3065 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3066 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3067 		/* event not enabled */
3068 		return;
3069 	}
3070 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3071 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3072 	} else {
3073 		notifhdr_len = sizeof(struct sctp_send_failed);
3074 	}
3075 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3076 	if (m_notify == NULL) {
3077 		/* no space left */
3078 		return;
3079 	}
3080 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3081 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3082 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3083 		memset(ssfe, 0, notifhdr_len);
3084 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3085 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3086 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3087 		ssfe->ssfe_error = error;
3088 		/* not exactly what the user sent in, but should be close :) */
3089 		ssfe->ssfe_info.snd_sid = sp->sid;
3090 		if (sp->some_taken) {
3091 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3092 		} else {
3093 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3094 		}
3095 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3096 		ssfe->ssfe_info.snd_context = sp->context;
3097 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3098 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3099 	} else {
3100 		ssf = mtod(m_notify, struct sctp_send_failed *);
3101 		memset(ssf, 0, notifhdr_len);
3102 		ssf->ssf_type = SCTP_SEND_FAILED;
3103 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3104 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3105 		ssf->ssf_error = error;
3106 		/* not exactly what the user sent in, but should be close :) */
3107 		ssf->ssf_info.sinfo_stream = sp->sid;
3108 		ssf->ssf_info.sinfo_ssn = 0;
3109 		if (sp->some_taken) {
3110 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3111 		} else {
3112 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3113 		}
3114 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3115 		ssf->ssf_info.sinfo_context = sp->context;
3116 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3117 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3118 	}
3119 	SCTP_BUF_NEXT(m_notify) = sp->data;
3120 
3121 	/* Steal off the mbuf */
3122 	sp->data = NULL;
3123 	/*
3124 	 * For this case, we check the actual socket buffer, since the assoc
3125 	 * is going away we don't want to overfill the socket buffer for a
3126 	 * non-reader
3127 	 */
3128 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3129 		sctp_m_freem(m_notify);
3130 		return;
3131 	}
3132 	/* append to socket */
3133 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3134 	    0, 0, stcb->asoc.context, 0, 0, 0,
3135 	    m_notify);
3136 	if (control == NULL) {
3137 		/* no memory */
3138 		sctp_m_freem(m_notify);
3139 		return;
3140 	}
3141 	control->spec_flags = M_NOTIFICATION;
3142 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3143 	    control,
3144 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3145 }
3146 
3147 
3148 
3149 static void
3150 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3151 {
3152 	struct mbuf *m_notify;
3153 	struct sctp_adaptation_event *sai;
3154 	struct sctp_queued_to_read *control;
3155 
3156 	if ((stcb == NULL) ||
3157 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3158 		/* event not enabled */
3159 		return;
3160 	}
3161 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3162 	if (m_notify == NULL)
3163 		/* no space left */
3164 		return;
3165 	SCTP_BUF_LEN(m_notify) = 0;
3166 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3167 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3168 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3169 	sai->sai_flags = 0;
3170 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3171 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3172 	sai->sai_assoc_id = sctp_get_associd(stcb);
3173 
3174 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3175 	SCTP_BUF_NEXT(m_notify) = NULL;
3176 
3177 	/* append to socket */
3178 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3179 	    0, 0, stcb->asoc.context, 0, 0, 0,
3180 	    m_notify);
3181 	if (control == NULL) {
3182 		/* no memory */
3183 		sctp_m_freem(m_notify);
3184 		return;
3185 	}
3186 	control->length = SCTP_BUF_LEN(m_notify);
3187 	control->spec_flags = M_NOTIFICATION;
3188 	/* not that we need this */
3189 	control->tail_mbuf = m_notify;
3190 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3191 	    control,
3192 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3193 }
3194 
3195 /* This always must be called with the read-queue LOCKED in the INP */
3196 static void
3197 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3198     uint32_t val, int so_locked
3199 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3200     SCTP_UNUSED
3201 #endif
3202 )
3203 {
3204 	struct mbuf *m_notify;
3205 	struct sctp_pdapi_event *pdapi;
3206 	struct sctp_queued_to_read *control;
3207 	struct sockbuf *sb;
3208 
3209 	if ((stcb == NULL) ||
3210 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3211 		/* event not enabled */
3212 		return;
3213 	}
3214 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3215 		return;
3216 	}
3217 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3218 	if (m_notify == NULL)
3219 		/* no space left */
3220 		return;
3221 	SCTP_BUF_LEN(m_notify) = 0;
3222 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3223 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3224 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3225 	pdapi->pdapi_flags = 0;
3226 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3227 	pdapi->pdapi_indication = error;
3228 	pdapi->pdapi_stream = (val >> 16);
3229 	pdapi->pdapi_seq = (val & 0x0000ffff);
3230 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3231 
3232 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3233 	SCTP_BUF_NEXT(m_notify) = NULL;
3234 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3235 	    0, 0, stcb->asoc.context, 0, 0, 0,
3236 	    m_notify);
3237 	if (control == NULL) {
3238 		/* no memory */
3239 		sctp_m_freem(m_notify);
3240 		return;
3241 	}
3242 	control->spec_flags = M_NOTIFICATION;
3243 	control->length = SCTP_BUF_LEN(m_notify);
3244 	/* not that we need this */
3245 	control->tail_mbuf = m_notify;
3246 	control->held_length = 0;
3247 	control->length = 0;
3248 	sb = &stcb->sctp_socket->so_rcv;
3249 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3250 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3251 	}
3252 	sctp_sballoc(stcb, sb, m_notify);
3253 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3254 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3255 	}
3256 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3257 	control->end_added = 1;
3258 	if (stcb->asoc.control_pdapi)
3259 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3260 	else {
3261 		/* we really should not see this case */
3262 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3263 	}
3264 	if (stcb->sctp_ep && stcb->sctp_socket) {
3265 		/* This should always be the case */
3266 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3267 		struct socket *so;
3268 
3269 		so = SCTP_INP_SO(stcb->sctp_ep);
3270 		if (!so_locked) {
3271 			atomic_add_int(&stcb->asoc.refcnt, 1);
3272 			SCTP_TCB_UNLOCK(stcb);
3273 			SCTP_SOCKET_LOCK(so, 1);
3274 			SCTP_TCB_LOCK(stcb);
3275 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3276 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3277 				SCTP_SOCKET_UNLOCK(so, 1);
3278 				return;
3279 			}
3280 		}
3281 #endif
3282 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3283 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3284 		if (!so_locked) {
3285 			SCTP_SOCKET_UNLOCK(so, 1);
3286 		}
3287 #endif
3288 	}
3289 }
3290 
3291 static void
3292 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3293 {
3294 	struct mbuf *m_notify;
3295 	struct sctp_shutdown_event *sse;
3296 	struct sctp_queued_to_read *control;
3297 
3298 	/*
3299 	 * For TCP model AND UDP connected sockets we will send an error up
3300 	 * when an SHUTDOWN completes
3301 	 */
3302 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3303 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3304 		/* mark socket closed for read/write and wakeup! */
3305 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3306 		struct socket *so;
3307 
3308 		so = SCTP_INP_SO(stcb->sctp_ep);
3309 		atomic_add_int(&stcb->asoc.refcnt, 1);
3310 		SCTP_TCB_UNLOCK(stcb);
3311 		SCTP_SOCKET_LOCK(so, 1);
3312 		SCTP_TCB_LOCK(stcb);
3313 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3314 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3315 			SCTP_SOCKET_UNLOCK(so, 1);
3316 			return;
3317 		}
3318 #endif
3319 		socantsendmore(stcb->sctp_socket);
3320 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3321 		SCTP_SOCKET_UNLOCK(so, 1);
3322 #endif
3323 	}
3324 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3325 		/* event not enabled */
3326 		return;
3327 	}
3328 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3329 	if (m_notify == NULL)
3330 		/* no space left */
3331 		return;
3332 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3333 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3334 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3335 	sse->sse_flags = 0;
3336 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3337 	sse->sse_assoc_id = sctp_get_associd(stcb);
3338 
3339 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3340 	SCTP_BUF_NEXT(m_notify) = NULL;
3341 
3342 	/* append to socket */
3343 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3344 	    0, 0, stcb->asoc.context, 0, 0, 0,
3345 	    m_notify);
3346 	if (control == NULL) {
3347 		/* no memory */
3348 		sctp_m_freem(m_notify);
3349 		return;
3350 	}
3351 	control->spec_flags = M_NOTIFICATION;
3352 	control->length = SCTP_BUF_LEN(m_notify);
3353 	/* not that we need this */
3354 	control->tail_mbuf = m_notify;
3355 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3356 	    control,
3357 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3358 }
3359 
3360 static void
3361 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3362     int so_locked
3363 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3364     SCTP_UNUSED
3365 #endif
3366 )
3367 {
3368 	struct mbuf *m_notify;
3369 	struct sctp_sender_dry_event *event;
3370 	struct sctp_queued_to_read *control;
3371 
3372 	if ((stcb == NULL) ||
3373 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3374 		/* event not enabled */
3375 		return;
3376 	}
3377 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3378 	if (m_notify == NULL) {
3379 		/* no space left */
3380 		return;
3381 	}
3382 	SCTP_BUF_LEN(m_notify) = 0;
3383 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3384 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3385 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3386 	event->sender_dry_flags = 0;
3387 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3388 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3389 
3390 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3391 	SCTP_BUF_NEXT(m_notify) = NULL;
3392 
3393 	/* append to socket */
3394 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3395 	    0, 0, stcb->asoc.context, 0, 0, 0,
3396 	    m_notify);
3397 	if (control == NULL) {
3398 		/* no memory */
3399 		sctp_m_freem(m_notify);
3400 		return;
3401 	}
3402 	control->length = SCTP_BUF_LEN(m_notify);
3403 	control->spec_flags = M_NOTIFICATION;
3404 	/* not that we need this */
3405 	control->tail_mbuf = m_notify;
3406 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3407 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3408 }
3409 
3410 
3411 void
3412 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3413 {
3414 	struct mbuf *m_notify;
3415 	struct sctp_queued_to_read *control;
3416 	struct sctp_stream_change_event *stradd;
3417 
3418 	if ((stcb == NULL) ||
3419 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3420 		/* event not enabled */
3421 		return;
3422 	}
3423 	if ((stcb->asoc.peer_req_out) && flag) {
3424 		/* Peer made the request, don't tell the local user */
3425 		stcb->asoc.peer_req_out = 0;
3426 		return;
3427 	}
3428 	stcb->asoc.peer_req_out = 0;
3429 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3430 	if (m_notify == NULL)
3431 		/* no space left */
3432 		return;
3433 	SCTP_BUF_LEN(m_notify) = 0;
3434 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3435 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3436 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3437 	stradd->strchange_flags = flag;
3438 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3439 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3440 	stradd->strchange_instrms = numberin;
3441 	stradd->strchange_outstrms = numberout;
3442 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3443 	SCTP_BUF_NEXT(m_notify) = NULL;
3444 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3445 		/* no space */
3446 		sctp_m_freem(m_notify);
3447 		return;
3448 	}
3449 	/* append to socket */
3450 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3451 	    0, 0, stcb->asoc.context, 0, 0, 0,
3452 	    m_notify);
3453 	if (control == NULL) {
3454 		/* no memory */
3455 		sctp_m_freem(m_notify);
3456 		return;
3457 	}
3458 	control->spec_flags = M_NOTIFICATION;
3459 	control->length = SCTP_BUF_LEN(m_notify);
3460 	/* not that we need this */
3461 	control->tail_mbuf = m_notify;
3462 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3463 	    control,
3464 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3465 }
3466 
3467 void
3468 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3469 {
3470 	struct mbuf *m_notify;
3471 	struct sctp_queued_to_read *control;
3472 	struct sctp_assoc_reset_event *strasoc;
3473 
3474 	if ((stcb == NULL) ||
3475 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3476 		/* event not enabled */
3477 		return;
3478 	}
3479 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3480 	if (m_notify == NULL)
3481 		/* no space left */
3482 		return;
3483 	SCTP_BUF_LEN(m_notify) = 0;
3484 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3485 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3486 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3487 	strasoc->assocreset_flags = flag;
3488 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3489 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3490 	strasoc->assocreset_local_tsn = sending_tsn;
3491 	strasoc->assocreset_remote_tsn = recv_tsn;
3492 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3493 	SCTP_BUF_NEXT(m_notify) = NULL;
3494 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3495 		/* no space */
3496 		sctp_m_freem(m_notify);
3497 		return;
3498 	}
3499 	/* append to socket */
3500 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3501 	    0, 0, stcb->asoc.context, 0, 0, 0,
3502 	    m_notify);
3503 	if (control == NULL) {
3504 		/* no memory */
3505 		sctp_m_freem(m_notify);
3506 		return;
3507 	}
3508 	control->spec_flags = M_NOTIFICATION;
3509 	control->length = SCTP_BUF_LEN(m_notify);
3510 	/* not that we need this */
3511 	control->tail_mbuf = m_notify;
3512 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3513 	    control,
3514 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3515 }
3516 
3517 
3518 
3519 static void
3520 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3521     int number_entries, uint16_t *list, int flag)
3522 {
3523 	struct mbuf *m_notify;
3524 	struct sctp_queued_to_read *control;
3525 	struct sctp_stream_reset_event *strreset;
3526 	int len;
3527 
3528 	if ((stcb == NULL) ||
3529 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3530 		/* event not enabled */
3531 		return;
3532 	}
3533 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3534 	if (m_notify == NULL)
3535 		/* no space left */
3536 		return;
3537 	SCTP_BUF_LEN(m_notify) = 0;
3538 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3539 	if (len > M_TRAILINGSPACE(m_notify)) {
3540 		/* never enough room */
3541 		sctp_m_freem(m_notify);
3542 		return;
3543 	}
3544 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3545 	memset(strreset, 0, len);
3546 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3547 	strreset->strreset_flags = flag;
3548 	strreset->strreset_length = len;
3549 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3550 	if (number_entries) {
3551 		int i;
3552 
3553 		for (i = 0; i < number_entries; i++) {
3554 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3555 		}
3556 	}
3557 	SCTP_BUF_LEN(m_notify) = len;
3558 	SCTP_BUF_NEXT(m_notify) = NULL;
3559 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3560 		/* no space */
3561 		sctp_m_freem(m_notify);
3562 		return;
3563 	}
3564 	/* append to socket */
3565 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3566 	    0, 0, stcb->asoc.context, 0, 0, 0,
3567 	    m_notify);
3568 	if (control == NULL) {
3569 		/* no memory */
3570 		sctp_m_freem(m_notify);
3571 		return;
3572 	}
3573 	control->spec_flags = M_NOTIFICATION;
3574 	control->length = SCTP_BUF_LEN(m_notify);
3575 	/* not that we need this */
3576 	control->tail_mbuf = m_notify;
3577 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3578 	    control,
3579 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3580 }
3581 
3582 
3583 static void
3584 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3585 {
3586 	struct mbuf *m_notify;
3587 	struct sctp_remote_error *sre;
3588 	struct sctp_queued_to_read *control;
3589 	unsigned int notif_len;
3590 	uint16_t chunk_len;
3591 
3592 	if ((stcb == NULL) ||
3593 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3594 		return;
3595 	}
3596 	if (chunk != NULL) {
3597 		chunk_len = ntohs(chunk->ch.chunk_length);
3598 	} else {
3599 		chunk_len = 0;
3600 	}
3601 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3602 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3603 	if (m_notify == NULL) {
3604 		/* Retry with smaller value. */
3605 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3606 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3607 		if (m_notify == NULL) {
3608 			return;
3609 		}
3610 	}
3611 	SCTP_BUF_NEXT(m_notify) = NULL;
3612 	sre = mtod(m_notify, struct sctp_remote_error *);
3613 	memset(sre, 0, notif_len);
3614 	sre->sre_type = SCTP_REMOTE_ERROR;
3615 	sre->sre_flags = 0;
3616 	sre->sre_length = sizeof(struct sctp_remote_error);
3617 	sre->sre_error = error;
3618 	sre->sre_assoc_id = sctp_get_associd(stcb);
3619 	if (notif_len > sizeof(struct sctp_remote_error)) {
3620 		memcpy(sre->sre_data, chunk, chunk_len);
3621 		sre->sre_length += chunk_len;
3622 	}
3623 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3624 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3625 	    0, 0, stcb->asoc.context, 0, 0, 0,
3626 	    m_notify);
3627 	if (control != NULL) {
3628 		control->length = SCTP_BUF_LEN(m_notify);
3629 		/* not that we need this */
3630 		control->tail_mbuf = m_notify;
3631 		control->spec_flags = M_NOTIFICATION;
3632 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3633 		    control,
3634 		    &stcb->sctp_socket->so_rcv, 1,
3635 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3636 	} else {
3637 		sctp_m_freem(m_notify);
3638 	}
3639 }
3640 
3641 
3642 void
3643 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3644     uint32_t error, void *data, int so_locked
3645 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3646     SCTP_UNUSED
3647 #endif
3648 )
3649 {
3650 	if ((stcb == NULL) ||
3651 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3652 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3653 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3654 		/* If the socket is gone we are out of here */
3655 		return;
3656 	}
3657 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3658 		return;
3659 	}
3660 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3661 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3662 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3663 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3664 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3665 			/* Don't report these in front states */
3666 			return;
3667 		}
3668 	}
3669 	switch (notification) {
3670 	case SCTP_NOTIFY_ASSOC_UP:
3671 		if (stcb->asoc.assoc_up_sent == 0) {
3672 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3673 			stcb->asoc.assoc_up_sent = 1;
3674 		}
3675 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3676 			sctp_notify_adaptation_layer(stcb);
3677 		}
3678 		if (stcb->asoc.auth_supported == 0) {
3679 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3680 			    NULL, so_locked);
3681 		}
3682 		break;
3683 	case SCTP_NOTIFY_ASSOC_DOWN:
3684 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3685 		break;
3686 	case SCTP_NOTIFY_INTERFACE_DOWN:
3687 		{
3688 			struct sctp_nets *net;
3689 
3690 			net = (struct sctp_nets *)data;
3691 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3692 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3693 			break;
3694 		}
3695 	case SCTP_NOTIFY_INTERFACE_UP:
3696 		{
3697 			struct sctp_nets *net;
3698 
3699 			net = (struct sctp_nets *)data;
3700 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3701 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3702 			break;
3703 		}
3704 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3705 		{
3706 			struct sctp_nets *net;
3707 
3708 			net = (struct sctp_nets *)data;
3709 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3710 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3711 			break;
3712 		}
3713 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3714 		sctp_notify_send_failed2(stcb, error,
3715 		    (struct sctp_stream_queue_pending *)data, so_locked);
3716 		break;
3717 	case SCTP_NOTIFY_SENT_DG_FAIL:
3718 		sctp_notify_send_failed(stcb, 1, error,
3719 		    (struct sctp_tmit_chunk *)data, so_locked);
3720 		break;
3721 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3722 		sctp_notify_send_failed(stcb, 0, error,
3723 		    (struct sctp_tmit_chunk *)data, so_locked);
3724 		break;
3725 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3726 		{
3727 			uint32_t val;
3728 
3729 			val = *((uint32_t *)data);
3730 
3731 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3732 			break;
3733 		}
3734 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3735 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3736 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3737 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3738 		} else {
3739 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3740 		}
3741 		break;
3742 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3743 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3744 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3745 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3746 		} else {
3747 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3748 		}
3749 		break;
3750 	case SCTP_NOTIFY_ASSOC_RESTART:
3751 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3752 		if (stcb->asoc.auth_supported == 0) {
3753 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3754 			    NULL, so_locked);
3755 		}
3756 		break;
3757 	case SCTP_NOTIFY_STR_RESET_SEND:
3758 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3759 		break;
3760 	case SCTP_NOTIFY_STR_RESET_RECV:
3761 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3762 		break;
3763 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3764 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3765 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3766 		break;
3767 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3768 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3769 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3770 		break;
3771 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3772 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3773 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3774 		break;
3775 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3776 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3777 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3778 		break;
3779 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3780 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3781 		    error, so_locked);
3782 		break;
3783 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3784 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3785 		    error, so_locked);
3786 		break;
3787 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3788 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3789 		    error, so_locked);
3790 		break;
3791 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3792 		sctp_notify_shutdown_event(stcb);
3793 		break;
3794 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3795 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3796 		    (uint16_t)(uintptr_t)data,
3797 		    so_locked);
3798 		break;
3799 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3800 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3801 		    (uint16_t)(uintptr_t)data,
3802 		    so_locked);
3803 		break;
3804 	case SCTP_NOTIFY_NO_PEER_AUTH:
3805 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3806 		    (uint16_t)(uintptr_t)data,
3807 		    so_locked);
3808 		break;
3809 	case SCTP_NOTIFY_SENDER_DRY:
3810 		sctp_notify_sender_dry_event(stcb, so_locked);
3811 		break;
3812 	case SCTP_NOTIFY_REMOTE_ERROR:
3813 		sctp_notify_remote_error(stcb, error, data);
3814 		break;
3815 	default:
3816 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3817 		    __func__, notification, notification);
3818 		break;
3819 	}			/* end switch */
3820 }
3821 
3822 void
3823 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3824 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3825     SCTP_UNUSED
3826 #endif
3827 )
3828 {
3829 	struct sctp_association *asoc;
3830 	struct sctp_stream_out *outs;
3831 	struct sctp_tmit_chunk *chk, *nchk;
3832 	struct sctp_stream_queue_pending *sp, *nsp;
3833 	int i;
3834 
3835 	if (stcb == NULL) {
3836 		return;
3837 	}
3838 	asoc = &stcb->asoc;
3839 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3840 		/* already being freed */
3841 		return;
3842 	}
3843 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3844 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3845 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3846 		return;
3847 	}
3848 	/* now through all the gunk freeing chunks */
3849 	if (holds_lock == 0) {
3850 		SCTP_TCB_SEND_LOCK(stcb);
3851 	}
3852 	/* sent queue SHOULD be empty */
3853 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3854 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3855 		asoc->sent_queue_cnt--;
3856 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3857 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3858 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3859 #ifdef INVARIANTS
3860 			} else {
3861 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3862 #endif
3863 			}
3864 		}
3865 		if (chk->data != NULL) {
3866 			sctp_free_bufspace(stcb, asoc, chk, 1);
3867 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3868 			    error, chk, so_locked);
3869 			if (chk->data) {
3870 				sctp_m_freem(chk->data);
3871 				chk->data = NULL;
3872 			}
3873 		}
3874 		sctp_free_a_chunk(stcb, chk, so_locked);
3875 		/* sa_ignore FREED_MEMORY */
3876 	}
3877 	/* pending send queue SHOULD be empty */
3878 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3879 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3880 		asoc->send_queue_cnt--;
3881 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3882 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3883 #ifdef INVARIANTS
3884 		} else {
3885 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3886 #endif
3887 		}
3888 		if (chk->data != NULL) {
3889 			sctp_free_bufspace(stcb, asoc, chk, 1);
3890 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3891 			    error, chk, so_locked);
3892 			if (chk->data) {
3893 				sctp_m_freem(chk->data);
3894 				chk->data = NULL;
3895 			}
3896 		}
3897 		sctp_free_a_chunk(stcb, chk, so_locked);
3898 		/* sa_ignore FREED_MEMORY */
3899 	}
3900 	for (i = 0; i < asoc->streamoutcnt; i++) {
3901 		/* For each stream */
3902 		outs = &asoc->strmout[i];
3903 		/* clean up any sends there */
3904 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3905 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3906 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3907 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3908 			sctp_free_spbufspace(stcb, asoc, sp);
3909 			if (sp->data) {
3910 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3911 				    error, (void *)sp, so_locked);
3912 				if (sp->data) {
3913 					sctp_m_freem(sp->data);
3914 					sp->data = NULL;
3915 					sp->tail_mbuf = NULL;
3916 					sp->length = 0;
3917 				}
3918 			}
3919 			if (sp->net) {
3920 				sctp_free_remote_addr(sp->net);
3921 				sp->net = NULL;
3922 			}
3923 			/* Free the chunk */
3924 			sctp_free_a_strmoq(stcb, sp, so_locked);
3925 			/* sa_ignore FREED_MEMORY */
3926 		}
3927 	}
3928 
3929 	if (holds_lock == 0) {
3930 		SCTP_TCB_SEND_UNLOCK(stcb);
3931 	}
3932 }
3933 
3934 void
3935 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3936     struct sctp_abort_chunk *abort, int so_locked
3937 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3938     SCTP_UNUSED
3939 #endif
3940 )
3941 {
3942 	if (stcb == NULL) {
3943 		return;
3944 	}
3945 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3946 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3947 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3948 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3949 	}
3950 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3951 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3952 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3953 		return;
3954 	}
3955 	/* Tell them we lost the asoc */
3956 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3957 	if (from_peer) {
3958 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3959 	} else {
3960 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3961 	}
3962 }
3963 
3964 void
3965 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3966     struct mbuf *m, int iphlen,
3967     struct sockaddr *src, struct sockaddr *dst,
3968     struct sctphdr *sh, struct mbuf *op_err,
3969     uint8_t mflowtype, uint32_t mflowid,
3970     uint32_t vrf_id, uint16_t port)
3971 {
3972 	uint32_t vtag;
3973 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3974 	struct socket *so;
3975 #endif
3976 
3977 	vtag = 0;
3978 	if (stcb != NULL) {
3979 		vtag = stcb->asoc.peer_vtag;
3980 		vrf_id = stcb->asoc.vrf_id;
3981 	}
3982 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3983 	    mflowtype, mflowid, inp->fibnum,
3984 	    vrf_id, port);
3985 	if (stcb != NULL) {
3986 		/* We have a TCB to abort, send notification too */
3987 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3988 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3989 		/* Ok, now lets free it */
3990 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 		so = SCTP_INP_SO(inp);
3992 		atomic_add_int(&stcb->asoc.refcnt, 1);
3993 		SCTP_TCB_UNLOCK(stcb);
3994 		SCTP_SOCKET_LOCK(so, 1);
3995 		SCTP_TCB_LOCK(stcb);
3996 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3997 #endif
3998 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3999 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4000 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4001 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4002 		}
4003 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4004 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4005 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4006 		SCTP_SOCKET_UNLOCK(so, 1);
4007 #endif
4008 	}
4009 }
4010 #ifdef SCTP_ASOCLOG_OF_TSNS
4011 void
4012 sctp_print_out_track_log(struct sctp_tcb *stcb)
4013 {
4014 #ifdef NOSIY_PRINTS
4015 	int i;
4016 
4017 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4018 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4019 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4020 		SCTP_PRINTF("None rcvd\n");
4021 		goto none_in;
4022 	}
4023 	if (stcb->asoc.tsn_in_wrapped) {
4024 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4025 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4026 			    stcb->asoc.in_tsnlog[i].tsn,
4027 			    stcb->asoc.in_tsnlog[i].strm,
4028 			    stcb->asoc.in_tsnlog[i].seq,
4029 			    stcb->asoc.in_tsnlog[i].flgs,
4030 			    stcb->asoc.in_tsnlog[i].sz);
4031 		}
4032 	}
4033 	if (stcb->asoc.tsn_in_at) {
4034 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4035 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4036 			    stcb->asoc.in_tsnlog[i].tsn,
4037 			    stcb->asoc.in_tsnlog[i].strm,
4038 			    stcb->asoc.in_tsnlog[i].seq,
4039 			    stcb->asoc.in_tsnlog[i].flgs,
4040 			    stcb->asoc.in_tsnlog[i].sz);
4041 		}
4042 	}
4043 none_in:
4044 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4045 	if ((stcb->asoc.tsn_out_at == 0) &&
4046 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4047 		SCTP_PRINTF("None sent\n");
4048 	}
4049 	if (stcb->asoc.tsn_out_wrapped) {
4050 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4051 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4052 			    stcb->asoc.out_tsnlog[i].tsn,
4053 			    stcb->asoc.out_tsnlog[i].strm,
4054 			    stcb->asoc.out_tsnlog[i].seq,
4055 			    stcb->asoc.out_tsnlog[i].flgs,
4056 			    stcb->asoc.out_tsnlog[i].sz);
4057 		}
4058 	}
4059 	if (stcb->asoc.tsn_out_at) {
4060 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4061 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4062 			    stcb->asoc.out_tsnlog[i].tsn,
4063 			    stcb->asoc.out_tsnlog[i].strm,
4064 			    stcb->asoc.out_tsnlog[i].seq,
4065 			    stcb->asoc.out_tsnlog[i].flgs,
4066 			    stcb->asoc.out_tsnlog[i].sz);
4067 		}
4068 	}
4069 #endif
4070 }
4071 #endif
4072 
4073 void
4074 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4075     struct mbuf *op_err,
4076     int so_locked
4077 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4078     SCTP_UNUSED
4079 #endif
4080 )
4081 {
4082 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4083 	struct socket *so;
4084 #endif
4085 
4086 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4087 	so = SCTP_INP_SO(inp);
4088 #endif
4089 	if (stcb == NULL) {
4090 		/* Got to have a TCB */
4091 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4092 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4093 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4094 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4095 			}
4096 		}
4097 		return;
4098 	} else {
4099 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4100 	}
4101 	/* notify the peer */
4102 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4103 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4104 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4105 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4106 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4107 	}
4108 	/* notify the ulp */
4109 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4110 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4111 	}
4112 	/* now free the asoc */
4113 #ifdef SCTP_ASOCLOG_OF_TSNS
4114 	sctp_print_out_track_log(stcb);
4115 #endif
4116 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4117 	if (!so_locked) {
4118 		atomic_add_int(&stcb->asoc.refcnt, 1);
4119 		SCTP_TCB_UNLOCK(stcb);
4120 		SCTP_SOCKET_LOCK(so, 1);
4121 		SCTP_TCB_LOCK(stcb);
4122 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4123 	}
4124 #endif
4125 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4126 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4127 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4128 	if (!so_locked) {
4129 		SCTP_SOCKET_UNLOCK(so, 1);
4130 	}
4131 #endif
4132 }
4133 
4134 void
4135 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4136     struct sockaddr *src, struct sockaddr *dst,
4137     struct sctphdr *sh, struct sctp_inpcb *inp,
4138     struct mbuf *cause,
4139     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4140     uint32_t vrf_id, uint16_t port)
4141 {
4142 	struct sctp_chunkhdr *ch, chunk_buf;
4143 	unsigned int chk_length;
4144 	int contains_init_chunk;
4145 
4146 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4147 	/* Generate a TO address for future reference */
4148 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4149 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4150 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4151 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4152 		}
4153 	}
4154 	contains_init_chunk = 0;
4155 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4156 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4157 	while (ch != NULL) {
4158 		chk_length = ntohs(ch->chunk_length);
4159 		if (chk_length < sizeof(*ch)) {
4160 			/* break to abort land */
4161 			break;
4162 		}
4163 		switch (ch->chunk_type) {
4164 		case SCTP_INIT:
4165 			contains_init_chunk = 1;
4166 			break;
4167 		case SCTP_PACKET_DROPPED:
4168 			/* we don't respond to pkt-dropped */
4169 			return;
4170 		case SCTP_ABORT_ASSOCIATION:
4171 			/* we don't respond with an ABORT to an ABORT */
4172 			return;
4173 		case SCTP_SHUTDOWN_COMPLETE:
4174 			/*
4175 			 * we ignore it since we are not waiting for it and
4176 			 * peer is gone
4177 			 */
4178 			return;
4179 		case SCTP_SHUTDOWN_ACK:
4180 			sctp_send_shutdown_complete2(src, dst, sh,
4181 			    mflowtype, mflowid, fibnum,
4182 			    vrf_id, port);
4183 			return;
4184 		default:
4185 			break;
4186 		}
4187 		offset += SCTP_SIZE32(chk_length);
4188 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4189 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4190 	}
4191 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4192 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4193 	    (contains_init_chunk == 0))) {
4194 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4195 		    mflowtype, mflowid, fibnum,
4196 		    vrf_id, port);
4197 	}
4198 }
4199 
4200 /*
4201  * check the inbound datagram to make sure there is not an abort inside it,
4202  * if there is return 1, else return 0.
4203  */
4204 int
4205 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4206 {
4207 	struct sctp_chunkhdr *ch;
4208 	struct sctp_init_chunk *init_chk, chunk_buf;
4209 	int offset;
4210 	unsigned int chk_length;
4211 
4212 	offset = iphlen + sizeof(struct sctphdr);
4213 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4214 	    (uint8_t *)&chunk_buf);
4215 	while (ch != NULL) {
4216 		chk_length = ntohs(ch->chunk_length);
4217 		if (chk_length < sizeof(*ch)) {
4218 			/* packet is probably corrupt */
4219 			break;
4220 		}
4221 		/* we seem to be ok, is it an abort? */
4222 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4223 			/* yep, tell them */
4224 			return (1);
4225 		}
4226 		if (ch->chunk_type == SCTP_INITIATION) {
4227 			/* need to update the Vtag */
4228 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4229 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4230 			if (init_chk != NULL) {
4231 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4232 			}
4233 		}
4234 		/* Nope, move to the next chunk */
4235 		offset += SCTP_SIZE32(chk_length);
4236 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4237 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4238 	}
4239 	return (0);
4240 }
4241 
4242 /*
4243  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4244  * set (i.e. it's 0) so, create this function to compare link local scopes
4245  */
4246 #ifdef INET6
4247 uint32_t
4248 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4249 {
4250 	struct sockaddr_in6 a, b;
4251 
4252 	/* save copies */
4253 	a = *addr1;
4254 	b = *addr2;
4255 
4256 	if (a.sin6_scope_id == 0)
4257 		if (sa6_recoverscope(&a)) {
4258 			/* can't get scope, so can't match */
4259 			return (0);
4260 		}
4261 	if (b.sin6_scope_id == 0)
4262 		if (sa6_recoverscope(&b)) {
4263 			/* can't get scope, so can't match */
4264 			return (0);
4265 		}
4266 	if (a.sin6_scope_id != b.sin6_scope_id)
4267 		return (0);
4268 
4269 	return (1);
4270 }
4271 
4272 /*
4273  * returns a sockaddr_in6 with embedded scope recovered and removed
4274  */
4275 struct sockaddr_in6 *
4276 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4277 {
4278 	/* check and strip embedded scope junk */
4279 	if (addr->sin6_family == AF_INET6) {
4280 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4281 			if (addr->sin6_scope_id == 0) {
4282 				*store = *addr;
4283 				if (!sa6_recoverscope(store)) {
4284 					/* use the recovered scope */
4285 					addr = store;
4286 				}
4287 			} else {
4288 				/* else, return the original "to" addr */
4289 				in6_clearscope(&addr->sin6_addr);
4290 			}
4291 		}
4292 	}
4293 	return (addr);
4294 }
4295 #endif
4296 
4297 /*
4298  * are the two addresses the same?  currently a "scopeless" check returns: 1
4299  * if same, 0 if not
4300  */
4301 int
4302 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4303 {
4304 
4305 	/* must be valid */
4306 	if (sa1 == NULL || sa2 == NULL)
4307 		return (0);
4308 
4309 	/* must be the same family */
4310 	if (sa1->sa_family != sa2->sa_family)
4311 		return (0);
4312 
4313 	switch (sa1->sa_family) {
4314 #ifdef INET6
4315 	case AF_INET6:
4316 		{
4317 			/* IPv6 addresses */
4318 			struct sockaddr_in6 *sin6_1, *sin6_2;
4319 
4320 			sin6_1 = (struct sockaddr_in6 *)sa1;
4321 			sin6_2 = (struct sockaddr_in6 *)sa2;
4322 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4323 			    sin6_2));
4324 		}
4325 #endif
4326 #ifdef INET
4327 	case AF_INET:
4328 		{
4329 			/* IPv4 addresses */
4330 			struct sockaddr_in *sin_1, *sin_2;
4331 
4332 			sin_1 = (struct sockaddr_in *)sa1;
4333 			sin_2 = (struct sockaddr_in *)sa2;
4334 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4335 		}
4336 #endif
4337 	default:
4338 		/* we don't do these... */
4339 		return (0);
4340 	}
4341 }
4342 
4343 void
4344 sctp_print_address(struct sockaddr *sa)
4345 {
4346 #ifdef INET6
4347 	char ip6buf[INET6_ADDRSTRLEN];
4348 #endif
4349 
4350 	switch (sa->sa_family) {
4351 #ifdef INET6
4352 	case AF_INET6:
4353 		{
4354 			struct sockaddr_in6 *sin6;
4355 
4356 			sin6 = (struct sockaddr_in6 *)sa;
4357 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4358 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4359 			    ntohs(sin6->sin6_port),
4360 			    sin6->sin6_scope_id);
4361 			break;
4362 		}
4363 #endif
4364 #ifdef INET
4365 	case AF_INET:
4366 		{
4367 			struct sockaddr_in *sin;
4368 			unsigned char *p;
4369 
4370 			sin = (struct sockaddr_in *)sa;
4371 			p = (unsigned char *)&sin->sin_addr;
4372 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4373 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4374 			break;
4375 		}
4376 #endif
4377 	default:
4378 		SCTP_PRINTF("?\n");
4379 		break;
4380 	}
4381 }
4382 
4383 void
4384 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4385     struct sctp_inpcb *new_inp,
4386     struct sctp_tcb *stcb,
4387     int waitflags)
4388 {
4389 	/*
4390 	 * go through our old INP and pull off any control structures that
4391 	 * belong to stcb and move then to the new inp.
4392 	 */
4393 	struct socket *old_so, *new_so;
4394 	struct sctp_queued_to_read *control, *nctl;
4395 	struct sctp_readhead tmp_queue;
4396 	struct mbuf *m;
4397 	int error = 0;
4398 
4399 	old_so = old_inp->sctp_socket;
4400 	new_so = new_inp->sctp_socket;
4401 	TAILQ_INIT(&tmp_queue);
4402 	error = sblock(&old_so->so_rcv, waitflags);
4403 	if (error) {
4404 		/*
4405 		 * Gak, can't get sblock, we have a problem. data will be
4406 		 * left stranded.. and we don't dare look at it since the
4407 		 * other thread may be reading something. Oh well, its a
4408 		 * screwed up app that does a peeloff OR a accept while
4409 		 * reading from the main socket... actually its only the
4410 		 * peeloff() case, since I think read will fail on a
4411 		 * listening socket..
4412 		 */
4413 		return;
4414 	}
4415 	/* lock the socket buffers */
4416 	SCTP_INP_READ_LOCK(old_inp);
4417 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4418 		/* Pull off all for out target stcb */
4419 		if (control->stcb == stcb) {
4420 			/* remove it we want it */
4421 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4422 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4423 			m = control->data;
4424 			while (m) {
4425 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4426 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4427 				}
4428 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4429 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4430 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4431 				}
4432 				m = SCTP_BUF_NEXT(m);
4433 			}
4434 		}
4435 	}
4436 	SCTP_INP_READ_UNLOCK(old_inp);
4437 	/* Remove the sb-lock on the old socket */
4438 
4439 	sbunlock(&old_so->so_rcv);
4440 	/* Now we move them over to the new socket buffer */
4441 	SCTP_INP_READ_LOCK(new_inp);
4442 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4443 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4444 		m = control->data;
4445 		while (m) {
4446 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4447 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4448 			}
4449 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4450 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4451 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4452 			}
4453 			m = SCTP_BUF_NEXT(m);
4454 		}
4455 	}
4456 	SCTP_INP_READ_UNLOCK(new_inp);
4457 }
4458 
4459 void
4460 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4461     struct sctp_tcb *stcb,
4462     int so_locked
4463 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4464     SCTP_UNUSED
4465 #endif
4466 )
4467 {
4468 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4469 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4470 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4471 		} else {
4472 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4473 			struct socket *so;
4474 
4475 			so = SCTP_INP_SO(inp);
4476 			if (!so_locked) {
4477 				if (stcb) {
4478 					atomic_add_int(&stcb->asoc.refcnt, 1);
4479 					SCTP_TCB_UNLOCK(stcb);
4480 				}
4481 				SCTP_SOCKET_LOCK(so, 1);
4482 				if (stcb) {
4483 					SCTP_TCB_LOCK(stcb);
4484 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4485 				}
4486 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4487 					SCTP_SOCKET_UNLOCK(so, 1);
4488 					return;
4489 				}
4490 			}
4491 #endif
4492 			sctp_sorwakeup(inp, inp->sctp_socket);
4493 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4494 			if (!so_locked) {
4495 				SCTP_SOCKET_UNLOCK(so, 1);
4496 			}
4497 #endif
4498 		}
4499 	}
4500 }
4501 
4502 void
4503 sctp_add_to_readq(struct sctp_inpcb *inp,
4504     struct sctp_tcb *stcb,
4505     struct sctp_queued_to_read *control,
4506     struct sockbuf *sb,
4507     int end,
4508     int inp_read_lock_held,
4509     int so_locked
4510 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4511     SCTP_UNUSED
4512 #endif
4513 )
4514 {
4515 	/*
4516 	 * Here we must place the control on the end of the socket read
4517 	 * queue AND increment sb_cc so that select will work properly on
4518 	 * read.
4519 	 */
4520 	struct mbuf *m, *prev = NULL;
4521 
4522 	if (inp == NULL) {
4523 		/* Gak, TSNH!! */
4524 #ifdef INVARIANTS
4525 		panic("Gak, inp NULL on add_to_readq");
4526 #endif
4527 		return;
4528 	}
4529 	if (inp_read_lock_held == 0)
4530 		SCTP_INP_READ_LOCK(inp);
4531 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4532 		sctp_free_remote_addr(control->whoFrom);
4533 		if (control->data) {
4534 			sctp_m_freem(control->data);
4535 			control->data = NULL;
4536 		}
4537 		sctp_free_a_readq(stcb, control);
4538 		if (inp_read_lock_held == 0)
4539 			SCTP_INP_READ_UNLOCK(inp);
4540 		return;
4541 	}
4542 	if (!(control->spec_flags & M_NOTIFICATION)) {
4543 		atomic_add_int(&inp->total_recvs, 1);
4544 		if (!control->do_not_ref_stcb) {
4545 			atomic_add_int(&stcb->total_recvs, 1);
4546 		}
4547 	}
4548 	m = control->data;
4549 	control->held_length = 0;
4550 	control->length = 0;
4551 	while (m) {
4552 		if (SCTP_BUF_LEN(m) == 0) {
4553 			/* Skip mbufs with NO length */
4554 			if (prev == NULL) {
4555 				/* First one */
4556 				control->data = sctp_m_free(m);
4557 				m = control->data;
4558 			} else {
4559 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4560 				m = SCTP_BUF_NEXT(prev);
4561 			}
4562 			if (m == NULL) {
4563 				control->tail_mbuf = prev;
4564 			}
4565 			continue;
4566 		}
4567 		prev = m;
4568 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4569 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4570 		}
4571 		sctp_sballoc(stcb, sb, m);
4572 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4573 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4574 		}
4575 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4576 		m = SCTP_BUF_NEXT(m);
4577 	}
4578 	if (prev != NULL) {
4579 		control->tail_mbuf = prev;
4580 	} else {
4581 		/* Everything got collapsed out?? */
4582 		sctp_free_remote_addr(control->whoFrom);
4583 		sctp_free_a_readq(stcb, control);
4584 		if (inp_read_lock_held == 0)
4585 			SCTP_INP_READ_UNLOCK(inp);
4586 		return;
4587 	}
4588 	if (end) {
4589 		control->end_added = 1;
4590 	}
4591 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4592 	control->on_read_q = 1;
4593 	if (inp_read_lock_held == 0)
4594 		SCTP_INP_READ_UNLOCK(inp);
4595 	if (inp && inp->sctp_socket) {
4596 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4597 	}
4598 }
4599 
4600 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4601  *************ALTERNATE ROUTING CODE
4602  */
4603 
4604 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4605  *************ALTERNATE ROUTING CODE
4606  */
4607 
4608 struct mbuf *
4609 sctp_generate_cause(uint16_t code, char *info)
4610 {
4611 	struct mbuf *m;
4612 	struct sctp_gen_error_cause *cause;
4613 	size_t info_len;
4614 	uint16_t len;
4615 
4616 	if ((code == 0) || (info == NULL)) {
4617 		return (NULL);
4618 	}
4619 	info_len = strlen(info);
4620 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4621 		return (NULL);
4622 	}
4623 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4624 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4625 	if (m != NULL) {
4626 		SCTP_BUF_LEN(m) = len;
4627 		cause = mtod(m, struct sctp_gen_error_cause *);
4628 		cause->code = htons(code);
4629 		cause->length = htons(len);
4630 		memcpy(cause->info, info, info_len);
4631 	}
4632 	return (m);
4633 }
4634 
4635 struct mbuf *
4636 sctp_generate_no_user_data_cause(uint32_t tsn)
4637 {
4638 	struct mbuf *m;
4639 	struct sctp_error_no_user_data *no_user_data_cause;
4640 	uint16_t len;
4641 
4642 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4643 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4644 	if (m != NULL) {
4645 		SCTP_BUF_LEN(m) = len;
4646 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4647 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4648 		no_user_data_cause->cause.length = htons(len);
4649 		no_user_data_cause->tsn = htonl(tsn);
4650 	}
4651 	return (m);
4652 }
4653 
4654 #ifdef SCTP_MBCNT_LOGGING
4655 void
4656 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4657     struct sctp_tmit_chunk *tp1, int chk_cnt)
4658 {
4659 	if (tp1->data == NULL) {
4660 		return;
4661 	}
4662 	asoc->chunks_on_out_queue -= chk_cnt;
4663 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4664 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4665 		    asoc->total_output_queue_size,
4666 		    tp1->book_size,
4667 		    0,
4668 		    tp1->mbcnt);
4669 	}
4670 	if (asoc->total_output_queue_size >= tp1->book_size) {
4671 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4672 	} else {
4673 		asoc->total_output_queue_size = 0;
4674 	}
4675 
4676 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4677 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4678 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4679 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4680 		} else {
4681 			stcb->sctp_socket->so_snd.sb_cc = 0;
4682 
4683 		}
4684 	}
4685 }
4686 
4687 #endif
4688 
4689 int
4690 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4691     uint8_t sent, int so_locked
4692 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4693     SCTP_UNUSED
4694 #endif
4695 )
4696 {
4697 	struct sctp_stream_out *strq;
4698 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4699 	struct sctp_stream_queue_pending *sp;
4700 	uint32_t mid;
4701 	uint16_t sid;
4702 	uint8_t foundeom = 0;
4703 	int ret_sz = 0;
4704 	int notdone;
4705 	int do_wakeup_routine = 0;
4706 
4707 	sid = tp1->rec.data.sid;
4708 	mid = tp1->rec.data.mid;
4709 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4710 		stcb->asoc.abandoned_sent[0]++;
4711 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4712 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4713 #if defined(SCTP_DETAILED_STR_STATS)
4714 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4715 #endif
4716 	} else {
4717 		stcb->asoc.abandoned_unsent[0]++;
4718 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4719 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4720 #if defined(SCTP_DETAILED_STR_STATS)
4721 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4722 #endif
4723 	}
4724 	do {
4725 		ret_sz += tp1->book_size;
4726 		if (tp1->data != NULL) {
4727 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4728 				sctp_flight_size_decrease(tp1);
4729 				sctp_total_flight_decrease(stcb, tp1);
4730 			}
4731 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4732 			stcb->asoc.peers_rwnd += tp1->send_size;
4733 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4734 			if (sent) {
4735 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4736 			} else {
4737 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4738 			}
4739 			if (tp1->data) {
4740 				sctp_m_freem(tp1->data);
4741 				tp1->data = NULL;
4742 			}
4743 			do_wakeup_routine = 1;
4744 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4745 				stcb->asoc.sent_queue_cnt_removeable--;
4746 			}
4747 		}
4748 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4749 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4750 		    SCTP_DATA_NOT_FRAG) {
4751 			/* not frag'ed we ae done   */
4752 			notdone = 0;
4753 			foundeom = 1;
4754 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4755 			/* end of frag, we are done */
4756 			notdone = 0;
4757 			foundeom = 1;
4758 		} else {
4759 			/*
4760 			 * Its a begin or middle piece, we must mark all of
4761 			 * it
4762 			 */
4763 			notdone = 1;
4764 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4765 		}
4766 	} while (tp1 && notdone);
4767 	if (foundeom == 0) {
4768 		/*
4769 		 * The multi-part message was scattered across the send and
4770 		 * sent queue.
4771 		 */
4772 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4773 			if ((tp1->rec.data.sid != sid) ||
4774 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4775 				break;
4776 			}
4777 			/*
4778 			 * save to chk in case we have some on stream out
4779 			 * queue. If so and we have an un-transmitted one we
4780 			 * don't have to fudge the TSN.
4781 			 */
4782 			chk = tp1;
4783 			ret_sz += tp1->book_size;
4784 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4785 			if (sent) {
4786 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4787 			} else {
4788 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4789 			}
4790 			if (tp1->data) {
4791 				sctp_m_freem(tp1->data);
4792 				tp1->data = NULL;
4793 			}
4794 			/* No flight involved here book the size to 0 */
4795 			tp1->book_size = 0;
4796 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4797 				foundeom = 1;
4798 			}
4799 			do_wakeup_routine = 1;
4800 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4801 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4802 			/*
4803 			 * on to the sent queue so we can wait for it to be
4804 			 * passed by.
4805 			 */
4806 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4807 			    sctp_next);
4808 			stcb->asoc.send_queue_cnt--;
4809 			stcb->asoc.sent_queue_cnt++;
4810 		}
4811 	}
4812 	if (foundeom == 0) {
4813 		/*
4814 		 * Still no eom found. That means there is stuff left on the
4815 		 * stream out queue.. yuck.
4816 		 */
4817 		SCTP_TCB_SEND_LOCK(stcb);
4818 		strq = &stcb->asoc.strmout[sid];
4819 		sp = TAILQ_FIRST(&strq->outqueue);
4820 		if (sp != NULL) {
4821 			sp->discard_rest = 1;
4822 			/*
4823 			 * We may need to put a chunk on the queue that
4824 			 * holds the TSN that would have been sent with the
4825 			 * LAST bit.
4826 			 */
4827 			if (chk == NULL) {
4828 				/* Yep, we have to */
4829 				sctp_alloc_a_chunk(stcb, chk);
4830 				if (chk == NULL) {
4831 					/*
4832 					 * we are hosed. All we can do is
4833 					 * nothing.. which will cause an
4834 					 * abort if the peer is paying
4835 					 * attention.
4836 					 */
4837 					goto oh_well;
4838 				}
4839 				memset(chk, 0, sizeof(*chk));
4840 				chk->rec.data.rcv_flags = 0;
4841 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4842 				chk->asoc = &stcb->asoc;
4843 				if (stcb->asoc.idata_supported == 0) {
4844 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4845 						chk->rec.data.mid = 0;
4846 					} else {
4847 						chk->rec.data.mid = strq->next_mid_ordered;
4848 					}
4849 				} else {
4850 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4851 						chk->rec.data.mid = strq->next_mid_unordered;
4852 					} else {
4853 						chk->rec.data.mid = strq->next_mid_ordered;
4854 					}
4855 				}
4856 				chk->rec.data.sid = sp->sid;
4857 				chk->rec.data.ppid = sp->ppid;
4858 				chk->rec.data.context = sp->context;
4859 				chk->flags = sp->act_flags;
4860 				chk->whoTo = NULL;
4861 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4862 				strq->chunks_on_queues++;
4863 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4864 				stcb->asoc.sent_queue_cnt++;
4865 				stcb->asoc.pr_sctp_cnt++;
4866 			}
4867 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4868 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4869 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4870 			}
4871 			if (stcb->asoc.idata_supported == 0) {
4872 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4873 					strq->next_mid_ordered++;
4874 				}
4875 			} else {
4876 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4877 					strq->next_mid_unordered++;
4878 				} else {
4879 					strq->next_mid_ordered++;
4880 				}
4881 			}
4882 	oh_well:
4883 			if (sp->data) {
4884 				/*
4885 				 * Pull any data to free up the SB and allow
4886 				 * sender to "add more" while we will throw
4887 				 * away :-)
4888 				 */
4889 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4890 				ret_sz += sp->length;
4891 				do_wakeup_routine = 1;
4892 				sp->some_taken = 1;
4893 				sctp_m_freem(sp->data);
4894 				sp->data = NULL;
4895 				sp->tail_mbuf = NULL;
4896 				sp->length = 0;
4897 			}
4898 		}
4899 		SCTP_TCB_SEND_UNLOCK(stcb);
4900 	}
4901 	if (do_wakeup_routine) {
4902 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4903 		struct socket *so;
4904 
4905 		so = SCTP_INP_SO(stcb->sctp_ep);
4906 		if (!so_locked) {
4907 			atomic_add_int(&stcb->asoc.refcnt, 1);
4908 			SCTP_TCB_UNLOCK(stcb);
4909 			SCTP_SOCKET_LOCK(so, 1);
4910 			SCTP_TCB_LOCK(stcb);
4911 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4912 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4913 				/* assoc was freed while we were unlocked */
4914 				SCTP_SOCKET_UNLOCK(so, 1);
4915 				return (ret_sz);
4916 			}
4917 		}
4918 #endif
4919 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4920 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4921 		if (!so_locked) {
4922 			SCTP_SOCKET_UNLOCK(so, 1);
4923 		}
4924 #endif
4925 	}
4926 	return (ret_sz);
4927 }
4928 
4929 /*
4930  * checks to see if the given address, sa, is one that is currently known by
4931  * the kernel note: can't distinguish the same address on multiple interfaces
4932  * and doesn't handle multiple addresses with different zone/scope id's note:
4933  * ifa_ifwithaddr() compares the entire sockaddr struct
4934  */
4935 struct sctp_ifa *
4936 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4937     int holds_lock)
4938 {
4939 	struct sctp_laddr *laddr;
4940 
4941 	if (holds_lock == 0) {
4942 		SCTP_INP_RLOCK(inp);
4943 	}
4944 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4945 		if (laddr->ifa == NULL)
4946 			continue;
4947 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4948 			continue;
4949 #ifdef INET
4950 		if (addr->sa_family == AF_INET) {
4951 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4952 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4953 				/* found him. */
4954 				if (holds_lock == 0) {
4955 					SCTP_INP_RUNLOCK(inp);
4956 				}
4957 				return (laddr->ifa);
4958 				break;
4959 			}
4960 		}
4961 #endif
4962 #ifdef INET6
4963 		if (addr->sa_family == AF_INET6) {
4964 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4965 			    &laddr->ifa->address.sin6)) {
4966 				/* found him. */
4967 				if (holds_lock == 0) {
4968 					SCTP_INP_RUNLOCK(inp);
4969 				}
4970 				return (laddr->ifa);
4971 				break;
4972 			}
4973 		}
4974 #endif
4975 	}
4976 	if (holds_lock == 0) {
4977 		SCTP_INP_RUNLOCK(inp);
4978 	}
4979 	return (NULL);
4980 }
4981 
4982 uint32_t
4983 sctp_get_ifa_hash_val(struct sockaddr *addr)
4984 {
4985 	switch (addr->sa_family) {
4986 #ifdef INET
4987 	case AF_INET:
4988 		{
4989 			struct sockaddr_in *sin;
4990 
4991 			sin = (struct sockaddr_in *)addr;
4992 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4993 		}
4994 #endif
4995 #ifdef INET6
4996 	case AF_INET6:
4997 		{
4998 			struct sockaddr_in6 *sin6;
4999 			uint32_t hash_of_addr;
5000 
5001 			sin6 = (struct sockaddr_in6 *)addr;
5002 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5003 			    sin6->sin6_addr.s6_addr32[1] +
5004 			    sin6->sin6_addr.s6_addr32[2] +
5005 			    sin6->sin6_addr.s6_addr32[3]);
5006 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5007 			return (hash_of_addr);
5008 		}
5009 #endif
5010 	default:
5011 		break;
5012 	}
5013 	return (0);
5014 }
5015 
5016 struct sctp_ifa *
5017 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5018 {
5019 	struct sctp_ifa *sctp_ifap;
5020 	struct sctp_vrf *vrf;
5021 	struct sctp_ifalist *hash_head;
5022 	uint32_t hash_of_addr;
5023 
5024 	if (holds_lock == 0)
5025 		SCTP_IPI_ADDR_RLOCK();
5026 
5027 	vrf = sctp_find_vrf(vrf_id);
5028 	if (vrf == NULL) {
5029 		if (holds_lock == 0)
5030 			SCTP_IPI_ADDR_RUNLOCK();
5031 		return (NULL);
5032 	}
5033 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5034 
5035 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5036 	if (hash_head == NULL) {
5037 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5038 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5039 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5040 		sctp_print_address(addr);
5041 		SCTP_PRINTF("No such bucket for address\n");
5042 		if (holds_lock == 0)
5043 			SCTP_IPI_ADDR_RUNLOCK();
5044 
5045 		return (NULL);
5046 	}
5047 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5048 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5049 			continue;
5050 #ifdef INET
5051 		if (addr->sa_family == AF_INET) {
5052 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5053 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5054 				/* found him. */
5055 				if (holds_lock == 0)
5056 					SCTP_IPI_ADDR_RUNLOCK();
5057 				return (sctp_ifap);
5058 				break;
5059 			}
5060 		}
5061 #endif
5062 #ifdef INET6
5063 		if (addr->sa_family == AF_INET6) {
5064 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5065 			    &sctp_ifap->address.sin6)) {
5066 				/* found him. */
5067 				if (holds_lock == 0)
5068 					SCTP_IPI_ADDR_RUNLOCK();
5069 				return (sctp_ifap);
5070 				break;
5071 			}
5072 		}
5073 #endif
5074 	}
5075 	if (holds_lock == 0)
5076 		SCTP_IPI_ADDR_RUNLOCK();
5077 	return (NULL);
5078 }
5079 
5080 static void
5081 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5082     uint32_t rwnd_req)
5083 {
5084 	/* User pulled some data, do we need a rwnd update? */
5085 	int r_unlocked = 0;
5086 	uint32_t dif, rwnd;
5087 	struct socket *so = NULL;
5088 
5089 	if (stcb == NULL)
5090 		return;
5091 
5092 	atomic_add_int(&stcb->asoc.refcnt, 1);
5093 
5094 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5095 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5096 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5097 		/* Pre-check If we are freeing no update */
5098 		goto no_lock;
5099 	}
5100 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5101 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5102 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5103 		goto out;
5104 	}
5105 	so = stcb->sctp_socket;
5106 	if (so == NULL) {
5107 		goto out;
5108 	}
5109 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5110 	/* Have you have freed enough to look */
5111 	*freed_so_far = 0;
5112 	/* Yep, its worth a look and the lock overhead */
5113 
5114 	/* Figure out what the rwnd would be */
5115 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5116 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5117 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5118 	} else {
5119 		dif = 0;
5120 	}
5121 	if (dif >= rwnd_req) {
5122 		if (hold_rlock) {
5123 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5124 			r_unlocked = 1;
5125 		}
5126 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5127 			/*
5128 			 * One last check before we allow the guy possibly
5129 			 * to get in. There is a race, where the guy has not
5130 			 * reached the gate. In that case
5131 			 */
5132 			goto out;
5133 		}
5134 		SCTP_TCB_LOCK(stcb);
5135 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5136 			/* No reports here */
5137 			SCTP_TCB_UNLOCK(stcb);
5138 			goto out;
5139 		}
5140 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5141 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5142 
5143 		sctp_chunk_output(stcb->sctp_ep, stcb,
5144 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5145 		/* make sure no timer is running */
5146 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5147 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5148 		SCTP_TCB_UNLOCK(stcb);
5149 	} else {
5150 		/* Update how much we have pending */
5151 		stcb->freed_by_sorcv_sincelast = dif;
5152 	}
5153 out:
5154 	if (so && r_unlocked && hold_rlock) {
5155 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5156 	}
5157 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5158 no_lock:
5159 	atomic_add_int(&stcb->asoc.refcnt, -1);
5160 	return;
5161 }
5162 
5163 int
5164 sctp_sorecvmsg(struct socket *so,
5165     struct uio *uio,
5166     struct mbuf **mp,
5167     struct sockaddr *from,
5168     int fromlen,
5169     int *msg_flags,
5170     struct sctp_sndrcvinfo *sinfo,
5171     int filling_sinfo)
5172 {
5173 	/*
5174 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5175 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5176 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5177 	 * On the way out we may send out any combination of:
5178 	 * MSG_NOTIFICATION MSG_EOR
5179 	 *
5180 	 */
5181 	struct sctp_inpcb *inp = NULL;
5182 	int my_len = 0;
5183 	int cp_len = 0, error = 0;
5184 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5185 	struct mbuf *m = NULL;
5186 	struct sctp_tcb *stcb = NULL;
5187 	int wakeup_read_socket = 0;
5188 	int freecnt_applied = 0;
5189 	int out_flags = 0, in_flags = 0;
5190 	int block_allowed = 1;
5191 	uint32_t freed_so_far = 0;
5192 	uint32_t copied_so_far = 0;
5193 	int in_eeor_mode = 0;
5194 	int no_rcv_needed = 0;
5195 	uint32_t rwnd_req = 0;
5196 	int hold_sblock = 0;
5197 	int hold_rlock = 0;
5198 	ssize_t slen = 0;
5199 	uint32_t held_length = 0;
5200 	int sockbuf_lock = 0;
5201 
5202 	if (uio == NULL) {
5203 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5204 		return (EINVAL);
5205 	}
5206 	if (msg_flags) {
5207 		in_flags = *msg_flags;
5208 		if (in_flags & MSG_PEEK)
5209 			SCTP_STAT_INCR(sctps_read_peeks);
5210 	} else {
5211 		in_flags = 0;
5212 	}
5213 	slen = uio->uio_resid;
5214 
5215 	/* Pull in and set up our int flags */
5216 	if (in_flags & MSG_OOB) {
5217 		/* Out of band's NOT supported */
5218 		return (EOPNOTSUPP);
5219 	}
5220 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5221 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5222 		return (EINVAL);
5223 	}
5224 	if ((in_flags & (MSG_DONTWAIT
5225 	    | MSG_NBIO
5226 	    )) ||
5227 	    SCTP_SO_IS_NBIO(so)) {
5228 		block_allowed = 0;
5229 	}
5230 	/* setup the endpoint */
5231 	inp = (struct sctp_inpcb *)so->so_pcb;
5232 	if (inp == NULL) {
5233 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5234 		return (EFAULT);
5235 	}
5236 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5237 	/* Must be at least a MTU's worth */
5238 	if (rwnd_req < SCTP_MIN_RWND)
5239 		rwnd_req = SCTP_MIN_RWND;
5240 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5241 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5242 		sctp_misc_ints(SCTP_SORECV_ENTER,
5243 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5244 	}
5245 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5246 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5247 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5248 	}
5249 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5250 	if (error) {
5251 		goto release_unlocked;
5252 	}
5253 	sockbuf_lock = 1;
5254 restart:
5255 
5256 
5257 restart_nosblocks:
5258 	if (hold_sblock == 0) {
5259 		SOCKBUF_LOCK(&so->so_rcv);
5260 		hold_sblock = 1;
5261 	}
5262 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5263 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5264 		goto out;
5265 	}
5266 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5267 		if (so->so_error) {
5268 			error = so->so_error;
5269 			if ((in_flags & MSG_PEEK) == 0)
5270 				so->so_error = 0;
5271 			goto out;
5272 		} else {
5273 			if (so->so_rcv.sb_cc == 0) {
5274 				/* indicate EOF */
5275 				error = 0;
5276 				goto out;
5277 			}
5278 		}
5279 	}
5280 	if (so->so_rcv.sb_cc <= held_length) {
5281 		if (so->so_error) {
5282 			error = so->so_error;
5283 			if ((in_flags & MSG_PEEK) == 0) {
5284 				so->so_error = 0;
5285 			}
5286 			goto out;
5287 		}
5288 		if ((so->so_rcv.sb_cc == 0) &&
5289 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5290 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5291 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5292 				/*
5293 				 * For active open side clear flags for
5294 				 * re-use passive open is blocked by
5295 				 * connect.
5296 				 */
5297 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5298 					/*
5299 					 * You were aborted, passive side
5300 					 * always hits here
5301 					 */
5302 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5303 					error = ECONNRESET;
5304 				}
5305 				so->so_state &= ~(SS_ISCONNECTING |
5306 				    SS_ISDISCONNECTING |
5307 				    SS_ISCONFIRMING |
5308 				    SS_ISCONNECTED);
5309 				if (error == 0) {
5310 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5311 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5312 						error = ENOTCONN;
5313 					}
5314 				}
5315 				goto out;
5316 			}
5317 		}
5318 		if (block_allowed) {
5319 			error = sbwait(&so->so_rcv);
5320 			if (error) {
5321 				goto out;
5322 			}
5323 			held_length = 0;
5324 			goto restart_nosblocks;
5325 		} else {
5326 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5327 			error = EWOULDBLOCK;
5328 			goto out;
5329 		}
5330 	}
5331 	if (hold_sblock == 1) {
5332 		SOCKBUF_UNLOCK(&so->so_rcv);
5333 		hold_sblock = 0;
5334 	}
5335 	/* we possibly have data we can read */
5336 	/* sa_ignore FREED_MEMORY */
5337 	control = TAILQ_FIRST(&inp->read_queue);
5338 	if (control == NULL) {
5339 		/*
5340 		 * This could be happening since the appender did the
5341 		 * increment but as not yet did the tailq insert onto the
5342 		 * read_queue
5343 		 */
5344 		if (hold_rlock == 0) {
5345 			SCTP_INP_READ_LOCK(inp);
5346 		}
5347 		control = TAILQ_FIRST(&inp->read_queue);
5348 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5349 #ifdef INVARIANTS
5350 			panic("Huh, its non zero and nothing on control?");
5351 #endif
5352 			so->so_rcv.sb_cc = 0;
5353 		}
5354 		SCTP_INP_READ_UNLOCK(inp);
5355 		hold_rlock = 0;
5356 		goto restart;
5357 	}
5358 	if ((control->length == 0) &&
5359 	    (control->do_not_ref_stcb)) {
5360 		/*
5361 		 * Clean up code for freeing assoc that left behind a
5362 		 * pdapi.. maybe a peer in EEOR that just closed after
5363 		 * sending and never indicated a EOR.
5364 		 */
5365 		if (hold_rlock == 0) {
5366 			hold_rlock = 1;
5367 			SCTP_INP_READ_LOCK(inp);
5368 		}
5369 		control->held_length = 0;
5370 		if (control->data) {
5371 			/* Hmm there is data here .. fix */
5372 			struct mbuf *m_tmp;
5373 			int cnt = 0;
5374 
5375 			m_tmp = control->data;
5376 			while (m_tmp) {
5377 				cnt += SCTP_BUF_LEN(m_tmp);
5378 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5379 					control->tail_mbuf = m_tmp;
5380 					control->end_added = 1;
5381 				}
5382 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5383 			}
5384 			control->length = cnt;
5385 		} else {
5386 			/* remove it */
5387 			TAILQ_REMOVE(&inp->read_queue, control, next);
5388 			/* Add back any hiddend data */
5389 			sctp_free_remote_addr(control->whoFrom);
5390 			sctp_free_a_readq(stcb, control);
5391 		}
5392 		if (hold_rlock) {
5393 			hold_rlock = 0;
5394 			SCTP_INP_READ_UNLOCK(inp);
5395 		}
5396 		goto restart;
5397 	}
5398 	if ((control->length == 0) &&
5399 	    (control->end_added == 1)) {
5400 		/*
5401 		 * Do we also need to check for (control->pdapi_aborted ==
5402 		 * 1)?
5403 		 */
5404 		if (hold_rlock == 0) {
5405 			hold_rlock = 1;
5406 			SCTP_INP_READ_LOCK(inp);
5407 		}
5408 		TAILQ_REMOVE(&inp->read_queue, control, next);
5409 		if (control->data) {
5410 #ifdef INVARIANTS
5411 			panic("control->data not null but control->length == 0");
5412 #else
5413 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5414 			sctp_m_freem(control->data);
5415 			control->data = NULL;
5416 #endif
5417 		}
5418 		if (control->aux_data) {
5419 			sctp_m_free(control->aux_data);
5420 			control->aux_data = NULL;
5421 		}
5422 #ifdef INVARIANTS
5423 		if (control->on_strm_q) {
5424 			panic("About to free ctl:%p so:%p and its in %d",
5425 			    control, so, control->on_strm_q);
5426 		}
5427 #endif
5428 		sctp_free_remote_addr(control->whoFrom);
5429 		sctp_free_a_readq(stcb, control);
5430 		if (hold_rlock) {
5431 			hold_rlock = 0;
5432 			SCTP_INP_READ_UNLOCK(inp);
5433 		}
5434 		goto restart;
5435 	}
5436 	if (control->length == 0) {
5437 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5438 		    (filling_sinfo)) {
5439 			/* find a more suitable one then this */
5440 			ctl = TAILQ_NEXT(control, next);
5441 			while (ctl) {
5442 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5443 				    (ctl->some_taken ||
5444 				    (ctl->spec_flags & M_NOTIFICATION) ||
5445 				    ((ctl->do_not_ref_stcb == 0) &&
5446 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5447 				    ) {
5448 					/*-
5449 					 * If we have a different TCB next, and there is data
5450 					 * present. If we have already taken some (pdapi), OR we can
5451 					 * ref the tcb and no delivery as started on this stream, we
5452 					 * take it. Note we allow a notification on a different
5453 					 * assoc to be delivered..
5454 					 */
5455 					control = ctl;
5456 					goto found_one;
5457 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5458 					    (ctl->length) &&
5459 					    ((ctl->some_taken) ||
5460 					    ((ctl->do_not_ref_stcb == 0) &&
5461 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5462 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5463 					/*-
5464 					 * If we have the same tcb, and there is data present, and we
5465 					 * have the strm interleave feature present. Then if we have
5466 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5467 					 * not started a delivery for this stream, we can take it.
5468 					 * Note we do NOT allow a notificaiton on the same assoc to
5469 					 * be delivered.
5470 					 */
5471 					control = ctl;
5472 					goto found_one;
5473 				}
5474 				ctl = TAILQ_NEXT(ctl, next);
5475 			}
5476 		}
5477 		/*
5478 		 * if we reach here, not suitable replacement is available
5479 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5480 		 * into the our held count, and its time to sleep again.
5481 		 */
5482 		held_length = so->so_rcv.sb_cc;
5483 		control->held_length = so->so_rcv.sb_cc;
5484 		goto restart;
5485 	}
5486 	/* Clear the held length since there is something to read */
5487 	control->held_length = 0;
5488 found_one:
5489 	/*
5490 	 * If we reach here, control has a some data for us to read off.
5491 	 * Note that stcb COULD be NULL.
5492 	 */
5493 	if (hold_rlock == 0) {
5494 		hold_rlock = 1;
5495 		SCTP_INP_READ_LOCK(inp);
5496 	}
5497 	control->some_taken++;
5498 	stcb = control->stcb;
5499 	if (stcb) {
5500 		if ((control->do_not_ref_stcb == 0) &&
5501 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5502 			if (freecnt_applied == 0)
5503 				stcb = NULL;
5504 		} else if (control->do_not_ref_stcb == 0) {
5505 			/* you can't free it on me please */
5506 			/*
5507 			 * The lock on the socket buffer protects us so the
5508 			 * free code will stop. But since we used the
5509 			 * socketbuf lock and the sender uses the tcb_lock
5510 			 * to increment, we need to use the atomic add to
5511 			 * the refcnt
5512 			 */
5513 			if (freecnt_applied) {
5514 #ifdef INVARIANTS
5515 				panic("refcnt already incremented");
5516 #else
5517 				SCTP_PRINTF("refcnt already incremented?\n");
5518 #endif
5519 			} else {
5520 				atomic_add_int(&stcb->asoc.refcnt, 1);
5521 				freecnt_applied = 1;
5522 			}
5523 			/*
5524 			 * Setup to remember how much we have not yet told
5525 			 * the peer our rwnd has opened up. Note we grab the
5526 			 * value from the tcb from last time. Note too that
5527 			 * sack sending clears this when a sack is sent,
5528 			 * which is fine. Once we hit the rwnd_req, we then
5529 			 * will go to the sctp_user_rcvd() that will not
5530 			 * lock until it KNOWs it MUST send a WUP-SACK.
5531 			 */
5532 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5533 			stcb->freed_by_sorcv_sincelast = 0;
5534 		}
5535 	}
5536 	if (stcb &&
5537 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5538 	    control->do_not_ref_stcb == 0) {
5539 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5540 	}
5541 	/* First lets get off the sinfo and sockaddr info */
5542 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5543 		sinfo->sinfo_stream = control->sinfo_stream;
5544 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5545 		sinfo->sinfo_flags = control->sinfo_flags;
5546 		sinfo->sinfo_ppid = control->sinfo_ppid;
5547 		sinfo->sinfo_context = control->sinfo_context;
5548 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5549 		sinfo->sinfo_tsn = control->sinfo_tsn;
5550 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5551 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5552 		nxt = TAILQ_NEXT(control, next);
5553 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5554 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5555 			struct sctp_extrcvinfo *s_extra;
5556 
5557 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5558 			if ((nxt) &&
5559 			    (nxt->length)) {
5560 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5561 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5562 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5563 				}
5564 				if (nxt->spec_flags & M_NOTIFICATION) {
5565 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5566 				}
5567 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5568 				s_extra->serinfo_next_length = nxt->length;
5569 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5570 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5571 				if (nxt->tail_mbuf != NULL) {
5572 					if (nxt->end_added) {
5573 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5574 					}
5575 				}
5576 			} else {
5577 				/*
5578 				 * we explicitly 0 this, since the memcpy
5579 				 * got some other things beyond the older
5580 				 * sinfo_ that is on the control's structure
5581 				 * :-D
5582 				 */
5583 				nxt = NULL;
5584 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5585 				s_extra->serinfo_next_aid = 0;
5586 				s_extra->serinfo_next_length = 0;
5587 				s_extra->serinfo_next_ppid = 0;
5588 				s_extra->serinfo_next_stream = 0;
5589 			}
5590 		}
5591 		/*
5592 		 * update off the real current cum-ack, if we have an stcb.
5593 		 */
5594 		if ((control->do_not_ref_stcb == 0) && stcb)
5595 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5596 		/*
5597 		 * mask off the high bits, we keep the actual chunk bits in
5598 		 * there.
5599 		 */
5600 		sinfo->sinfo_flags &= 0x00ff;
5601 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5602 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5603 		}
5604 	}
5605 #ifdef SCTP_ASOCLOG_OF_TSNS
5606 	{
5607 		int index, newindex;
5608 		struct sctp_pcbtsn_rlog *entry;
5609 
5610 		do {
5611 			index = inp->readlog_index;
5612 			newindex = index + 1;
5613 			if (newindex >= SCTP_READ_LOG_SIZE) {
5614 				newindex = 0;
5615 			}
5616 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5617 		entry = &inp->readlog[index];
5618 		entry->vtag = control->sinfo_assoc_id;
5619 		entry->strm = control->sinfo_stream;
5620 		entry->seq = (uint16_t)control->mid;
5621 		entry->sz = control->length;
5622 		entry->flgs = control->sinfo_flags;
5623 	}
5624 #endif
5625 	if ((fromlen > 0) && (from != NULL)) {
5626 		union sctp_sockstore store;
5627 		size_t len;
5628 
5629 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5630 #ifdef INET6
5631 		case AF_INET6:
5632 			len = sizeof(struct sockaddr_in6);
5633 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5634 			store.sin6.sin6_port = control->port_from;
5635 			break;
5636 #endif
5637 #ifdef INET
5638 		case AF_INET:
5639 #ifdef INET6
5640 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5641 				len = sizeof(struct sockaddr_in6);
5642 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5643 				    &store.sin6);
5644 				store.sin6.sin6_port = control->port_from;
5645 			} else {
5646 				len = sizeof(struct sockaddr_in);
5647 				store.sin = control->whoFrom->ro._l_addr.sin;
5648 				store.sin.sin_port = control->port_from;
5649 			}
5650 #else
5651 			len = sizeof(struct sockaddr_in);
5652 			store.sin = control->whoFrom->ro._l_addr.sin;
5653 			store.sin.sin_port = control->port_from;
5654 #endif
5655 			break;
5656 #endif
5657 		default:
5658 			len = 0;
5659 			break;
5660 		}
5661 		memcpy(from, &store, min((size_t)fromlen, len));
5662 #ifdef INET6
5663 		{
5664 			struct sockaddr_in6 lsa6, *from6;
5665 
5666 			from6 = (struct sockaddr_in6 *)from;
5667 			sctp_recover_scope_mac(from6, (&lsa6));
5668 		}
5669 #endif
5670 	}
5671 	if (hold_rlock) {
5672 		SCTP_INP_READ_UNLOCK(inp);
5673 		hold_rlock = 0;
5674 	}
5675 	if (hold_sblock) {
5676 		SOCKBUF_UNLOCK(&so->so_rcv);
5677 		hold_sblock = 0;
5678 	}
5679 	/* now copy out what data we can */
5680 	if (mp == NULL) {
5681 		/* copy out each mbuf in the chain up to length */
5682 get_more_data:
5683 		m = control->data;
5684 		while (m) {
5685 			/* Move out all we can */
5686 			cp_len = (int)uio->uio_resid;
5687 			my_len = (int)SCTP_BUF_LEN(m);
5688 			if (cp_len > my_len) {
5689 				/* not enough in this buf */
5690 				cp_len = my_len;
5691 			}
5692 			if (hold_rlock) {
5693 				SCTP_INP_READ_UNLOCK(inp);
5694 				hold_rlock = 0;
5695 			}
5696 			if (cp_len > 0)
5697 				error = uiomove(mtod(m, char *), cp_len, uio);
5698 			/* re-read */
5699 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5700 				goto release;
5701 			}
5702 			if ((control->do_not_ref_stcb == 0) && stcb &&
5703 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5704 				no_rcv_needed = 1;
5705 			}
5706 			if (error) {
5707 				/* error we are out of here */
5708 				goto release;
5709 			}
5710 			SCTP_INP_READ_LOCK(inp);
5711 			hold_rlock = 1;
5712 			if (cp_len == SCTP_BUF_LEN(m)) {
5713 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5714 				    (control->end_added)) {
5715 					out_flags |= MSG_EOR;
5716 					if ((control->do_not_ref_stcb == 0) &&
5717 					    (control->stcb != NULL) &&
5718 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5719 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5720 				}
5721 				if (control->spec_flags & M_NOTIFICATION) {
5722 					out_flags |= MSG_NOTIFICATION;
5723 				}
5724 				/* we ate up the mbuf */
5725 				if (in_flags & MSG_PEEK) {
5726 					/* just looking */
5727 					m = SCTP_BUF_NEXT(m);
5728 					copied_so_far += cp_len;
5729 				} else {
5730 					/* dispose of the mbuf */
5731 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5732 						sctp_sblog(&so->so_rcv,
5733 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5734 					}
5735 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5736 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5737 						sctp_sblog(&so->so_rcv,
5738 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5739 					}
5740 					copied_so_far += cp_len;
5741 					freed_so_far += cp_len;
5742 					freed_so_far += MSIZE;
5743 					atomic_subtract_int(&control->length, cp_len);
5744 					control->data = sctp_m_free(m);
5745 					m = control->data;
5746 					/*
5747 					 * been through it all, must hold sb
5748 					 * lock ok to null tail
5749 					 */
5750 					if (control->data == NULL) {
5751 #ifdef INVARIANTS
5752 						if ((control->end_added == 0) ||
5753 						    (TAILQ_NEXT(control, next) == NULL)) {
5754 							/*
5755 							 * If the end is not
5756 							 * added, OR the
5757 							 * next is NOT null
5758 							 * we MUST have the
5759 							 * lock.
5760 							 */
5761 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5762 								panic("Hmm we don't own the lock?");
5763 							}
5764 						}
5765 #endif
5766 						control->tail_mbuf = NULL;
5767 #ifdef INVARIANTS
5768 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5769 							panic("end_added, nothing left and no MSG_EOR");
5770 						}
5771 #endif
5772 					}
5773 				}
5774 			} else {
5775 				/* Do we need to trim the mbuf? */
5776 				if (control->spec_flags & M_NOTIFICATION) {
5777 					out_flags |= MSG_NOTIFICATION;
5778 				}
5779 				if ((in_flags & MSG_PEEK) == 0) {
5780 					SCTP_BUF_RESV_UF(m, cp_len);
5781 					SCTP_BUF_LEN(m) -= cp_len;
5782 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5783 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5784 					}
5785 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5786 					if ((control->do_not_ref_stcb == 0) &&
5787 					    stcb) {
5788 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5789 					}
5790 					copied_so_far += cp_len;
5791 					freed_so_far += cp_len;
5792 					freed_so_far += MSIZE;
5793 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5794 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5795 						    SCTP_LOG_SBRESULT, 0);
5796 					}
5797 					atomic_subtract_int(&control->length, cp_len);
5798 				} else {
5799 					copied_so_far += cp_len;
5800 				}
5801 			}
5802 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5803 				break;
5804 			}
5805 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5806 			    (control->do_not_ref_stcb == 0) &&
5807 			    (freed_so_far >= rwnd_req)) {
5808 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5809 			}
5810 		}		/* end while(m) */
5811 		/*
5812 		 * At this point we have looked at it all and we either have
5813 		 * a MSG_EOR/or read all the user wants... <OR>
5814 		 * control->length == 0.
5815 		 */
5816 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5817 			/* we are done with this control */
5818 			if (control->length == 0) {
5819 				if (control->data) {
5820 #ifdef INVARIANTS
5821 					panic("control->data not null at read eor?");
5822 #else
5823 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5824 					sctp_m_freem(control->data);
5825 					control->data = NULL;
5826 #endif
5827 				}
5828 		done_with_control:
5829 				if (hold_rlock == 0) {
5830 					SCTP_INP_READ_LOCK(inp);
5831 					hold_rlock = 1;
5832 				}
5833 				TAILQ_REMOVE(&inp->read_queue, control, next);
5834 				/* Add back any hiddend data */
5835 				if (control->held_length) {
5836 					held_length = 0;
5837 					control->held_length = 0;
5838 					wakeup_read_socket = 1;
5839 				}
5840 				if (control->aux_data) {
5841 					sctp_m_free(control->aux_data);
5842 					control->aux_data = NULL;
5843 				}
5844 				no_rcv_needed = control->do_not_ref_stcb;
5845 				sctp_free_remote_addr(control->whoFrom);
5846 				control->data = NULL;
5847 #ifdef INVARIANTS
5848 				if (control->on_strm_q) {
5849 					panic("About to free ctl:%p so:%p and its in %d",
5850 					    control, so, control->on_strm_q);
5851 				}
5852 #endif
5853 				sctp_free_a_readq(stcb, control);
5854 				control = NULL;
5855 				if ((freed_so_far >= rwnd_req) &&
5856 				    (no_rcv_needed == 0))
5857 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5858 
5859 			} else {
5860 				/*
5861 				 * The user did not read all of this
5862 				 * message, turn off the returned MSG_EOR
5863 				 * since we are leaving more behind on the
5864 				 * control to read.
5865 				 */
5866 #ifdef INVARIANTS
5867 				if (control->end_added &&
5868 				    (control->data == NULL) &&
5869 				    (control->tail_mbuf == NULL)) {
5870 					panic("Gak, control->length is corrupt?");
5871 				}
5872 #endif
5873 				no_rcv_needed = control->do_not_ref_stcb;
5874 				out_flags &= ~MSG_EOR;
5875 			}
5876 		}
5877 		if (out_flags & MSG_EOR) {
5878 			goto release;
5879 		}
5880 		if ((uio->uio_resid == 0) ||
5881 		    ((in_eeor_mode) &&
5882 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5883 			goto release;
5884 		}
5885 		/*
5886 		 * If I hit here the receiver wants more and this message is
5887 		 * NOT done (pd-api). So two questions. Can we block? if not
5888 		 * we are done. Did the user NOT set MSG_WAITALL?
5889 		 */
5890 		if (block_allowed == 0) {
5891 			goto release;
5892 		}
5893 		/*
5894 		 * We need to wait for more data a few things: - We don't
5895 		 * sbunlock() so we don't get someone else reading. - We
5896 		 * must be sure to account for the case where what is added
5897 		 * is NOT to our control when we wakeup.
5898 		 */
5899 
5900 		/*
5901 		 * Do we need to tell the transport a rwnd update might be
5902 		 * needed before we go to sleep?
5903 		 */
5904 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5905 		    ((freed_so_far >= rwnd_req) &&
5906 		    (control->do_not_ref_stcb == 0) &&
5907 		    (no_rcv_needed == 0))) {
5908 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5909 		}
5910 wait_some_more:
5911 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5912 			goto release;
5913 		}
5914 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5915 			goto release;
5916 
5917 		if (hold_rlock == 1) {
5918 			SCTP_INP_READ_UNLOCK(inp);
5919 			hold_rlock = 0;
5920 		}
5921 		if (hold_sblock == 0) {
5922 			SOCKBUF_LOCK(&so->so_rcv);
5923 			hold_sblock = 1;
5924 		}
5925 		if ((copied_so_far) && (control->length == 0) &&
5926 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5927 			goto release;
5928 		}
5929 		if (so->so_rcv.sb_cc <= control->held_length) {
5930 			error = sbwait(&so->so_rcv);
5931 			if (error) {
5932 				goto release;
5933 			}
5934 			control->held_length = 0;
5935 		}
5936 		if (hold_sblock) {
5937 			SOCKBUF_UNLOCK(&so->so_rcv);
5938 			hold_sblock = 0;
5939 		}
5940 		if (control->length == 0) {
5941 			/* still nothing here */
5942 			if (control->end_added == 1) {
5943 				/* he aborted, or is done i.e.did a shutdown */
5944 				out_flags |= MSG_EOR;
5945 				if (control->pdapi_aborted) {
5946 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5947 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5948 
5949 					out_flags |= MSG_TRUNC;
5950 				} else {
5951 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5952 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5953 				}
5954 				goto done_with_control;
5955 			}
5956 			if (so->so_rcv.sb_cc > held_length) {
5957 				control->held_length = so->so_rcv.sb_cc;
5958 				held_length = 0;
5959 			}
5960 			goto wait_some_more;
5961 		} else if (control->data == NULL) {
5962 			/*
5963 			 * we must re-sync since data is probably being
5964 			 * added
5965 			 */
5966 			SCTP_INP_READ_LOCK(inp);
5967 			if ((control->length > 0) && (control->data == NULL)) {
5968 				/*
5969 				 * big trouble.. we have the lock and its
5970 				 * corrupt?
5971 				 */
5972 #ifdef INVARIANTS
5973 				panic("Impossible data==NULL length !=0");
5974 #endif
5975 				out_flags |= MSG_EOR;
5976 				out_flags |= MSG_TRUNC;
5977 				control->length = 0;
5978 				SCTP_INP_READ_UNLOCK(inp);
5979 				goto done_with_control;
5980 			}
5981 			SCTP_INP_READ_UNLOCK(inp);
5982 			/* We will fall around to get more data */
5983 		}
5984 		goto get_more_data;
5985 	} else {
5986 		/*-
5987 		 * Give caller back the mbuf chain,
5988 		 * store in uio_resid the length
5989 		 */
5990 		wakeup_read_socket = 0;
5991 		if ((control->end_added == 0) ||
5992 		    (TAILQ_NEXT(control, next) == NULL)) {
5993 			/* Need to get rlock */
5994 			if (hold_rlock == 0) {
5995 				SCTP_INP_READ_LOCK(inp);
5996 				hold_rlock = 1;
5997 			}
5998 		}
5999 		if (control->end_added) {
6000 			out_flags |= MSG_EOR;
6001 			if ((control->do_not_ref_stcb == 0) &&
6002 			    (control->stcb != NULL) &&
6003 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6004 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6005 		}
6006 		if (control->spec_flags & M_NOTIFICATION) {
6007 			out_flags |= MSG_NOTIFICATION;
6008 		}
6009 		uio->uio_resid = control->length;
6010 		*mp = control->data;
6011 		m = control->data;
6012 		while (m) {
6013 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6014 				sctp_sblog(&so->so_rcv,
6015 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6016 			}
6017 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6018 			freed_so_far += SCTP_BUF_LEN(m);
6019 			freed_so_far += MSIZE;
6020 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6021 				sctp_sblog(&so->so_rcv,
6022 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6023 			}
6024 			m = SCTP_BUF_NEXT(m);
6025 		}
6026 		control->data = control->tail_mbuf = NULL;
6027 		control->length = 0;
6028 		if (out_flags & MSG_EOR) {
6029 			/* Done with this control */
6030 			goto done_with_control;
6031 		}
6032 	}
6033 release:
6034 	if (hold_rlock == 1) {
6035 		SCTP_INP_READ_UNLOCK(inp);
6036 		hold_rlock = 0;
6037 	}
6038 	if (hold_sblock == 1) {
6039 		SOCKBUF_UNLOCK(&so->so_rcv);
6040 		hold_sblock = 0;
6041 	}
6042 	sbunlock(&so->so_rcv);
6043 	sockbuf_lock = 0;
6044 
6045 release_unlocked:
6046 	if (hold_sblock) {
6047 		SOCKBUF_UNLOCK(&so->so_rcv);
6048 		hold_sblock = 0;
6049 	}
6050 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6051 		if ((freed_so_far >= rwnd_req) &&
6052 		    (control && (control->do_not_ref_stcb == 0)) &&
6053 		    (no_rcv_needed == 0))
6054 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6055 	}
6056 out:
6057 	if (msg_flags) {
6058 		*msg_flags = out_flags;
6059 	}
6060 	if (((out_flags & MSG_EOR) == 0) &&
6061 	    ((in_flags & MSG_PEEK) == 0) &&
6062 	    (sinfo) &&
6063 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6064 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6065 		struct sctp_extrcvinfo *s_extra;
6066 
6067 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6068 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6069 	}
6070 	if (hold_rlock == 1) {
6071 		SCTP_INP_READ_UNLOCK(inp);
6072 	}
6073 	if (hold_sblock) {
6074 		SOCKBUF_UNLOCK(&so->so_rcv);
6075 	}
6076 	if (sockbuf_lock) {
6077 		sbunlock(&so->so_rcv);
6078 	}
6079 	if (freecnt_applied) {
6080 		/*
6081 		 * The lock on the socket buffer protects us so the free
6082 		 * code will stop. But since we used the socketbuf lock and
6083 		 * the sender uses the tcb_lock to increment, we need to use
6084 		 * the atomic add to the refcnt.
6085 		 */
6086 		if (stcb == NULL) {
6087 #ifdef INVARIANTS
6088 			panic("stcb for refcnt has gone NULL?");
6089 			goto stage_left;
6090 #else
6091 			goto stage_left;
6092 #endif
6093 		}
6094 		/* Save the value back for next time */
6095 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6096 		atomic_add_int(&stcb->asoc.refcnt, -1);
6097 	}
6098 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6099 		if (stcb) {
6100 			sctp_misc_ints(SCTP_SORECV_DONE,
6101 			    freed_so_far,
6102 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6103 			    stcb->asoc.my_rwnd,
6104 			    so->so_rcv.sb_cc);
6105 		} else {
6106 			sctp_misc_ints(SCTP_SORECV_DONE,
6107 			    freed_so_far,
6108 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6109 			    0,
6110 			    so->so_rcv.sb_cc);
6111 		}
6112 	}
6113 stage_left:
6114 	if (wakeup_read_socket) {
6115 		sctp_sorwakeup(inp, so);
6116 	}
6117 	return (error);
6118 }
6119 
6120 
6121 #ifdef SCTP_MBUF_LOGGING
6122 struct mbuf *
6123 sctp_m_free(struct mbuf *m)
6124 {
6125 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6126 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6127 	}
6128 	return (m_free(m));
6129 }
6130 
6131 void
6132 sctp_m_freem(struct mbuf *mb)
6133 {
6134 	while (mb != NULL)
6135 		mb = sctp_m_free(mb);
6136 }
6137 
6138 #endif
6139 
6140 int
6141 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6142 {
6143 	/*
6144 	 * Given a local address. For all associations that holds the
6145 	 * address, request a peer-set-primary.
6146 	 */
6147 	struct sctp_ifa *ifa;
6148 	struct sctp_laddr *wi;
6149 
6150 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6151 	if (ifa == NULL) {
6152 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6153 		return (EADDRNOTAVAIL);
6154 	}
6155 	/*
6156 	 * Now that we have the ifa we must awaken the iterator with this
6157 	 * message.
6158 	 */
6159 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6160 	if (wi == NULL) {
6161 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6162 		return (ENOMEM);
6163 	}
6164 	/* Now incr the count and int wi structure */
6165 	SCTP_INCR_LADDR_COUNT();
6166 	bzero(wi, sizeof(*wi));
6167 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6168 	wi->ifa = ifa;
6169 	wi->action = SCTP_SET_PRIM_ADDR;
6170 	atomic_add_int(&ifa->refcount, 1);
6171 
6172 	/* Now add it to the work queue */
6173 	SCTP_WQ_ADDR_LOCK();
6174 	/*
6175 	 * Should this really be a tailq? As it is we will process the
6176 	 * newest first :-0
6177 	 */
6178 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6179 	SCTP_WQ_ADDR_UNLOCK();
6180 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6181 	    (struct sctp_inpcb *)NULL,
6182 	    (struct sctp_tcb *)NULL,
6183 	    (struct sctp_nets *)NULL);
6184 	return (0);
6185 }
6186 
6187 
6188 int
6189 sctp_soreceive(struct socket *so,
6190     struct sockaddr **psa,
6191     struct uio *uio,
6192     struct mbuf **mp0,
6193     struct mbuf **controlp,
6194     int *flagsp)
6195 {
6196 	int error, fromlen;
6197 	uint8_t sockbuf[256];
6198 	struct sockaddr *from;
6199 	struct sctp_extrcvinfo sinfo;
6200 	int filling_sinfo = 1;
6201 	struct sctp_inpcb *inp;
6202 
6203 	inp = (struct sctp_inpcb *)so->so_pcb;
6204 	/* pickup the assoc we are reading from */
6205 	if (inp == NULL) {
6206 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6207 		return (EINVAL);
6208 	}
6209 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6210 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6211 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6212 	    (controlp == NULL)) {
6213 		/* user does not want the sndrcv ctl */
6214 		filling_sinfo = 0;
6215 	}
6216 	if (psa) {
6217 		from = (struct sockaddr *)sockbuf;
6218 		fromlen = sizeof(sockbuf);
6219 		from->sa_len = 0;
6220 	} else {
6221 		from = NULL;
6222 		fromlen = 0;
6223 	}
6224 
6225 	if (filling_sinfo) {
6226 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6227 	}
6228 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6229 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6230 	if (controlp != NULL) {
6231 		/* copy back the sinfo in a CMSG format */
6232 		if (filling_sinfo)
6233 			*controlp = sctp_build_ctl_nchunk(inp,
6234 			    (struct sctp_sndrcvinfo *)&sinfo);
6235 		else
6236 			*controlp = NULL;
6237 	}
6238 	if (psa) {
6239 		/* copy back the address info */
6240 		if (from && from->sa_len) {
6241 			*psa = sodupsockaddr(from, M_NOWAIT);
6242 		} else {
6243 			*psa = NULL;
6244 		}
6245 	}
6246 	return (error);
6247 }
6248 
6249 
6250 
6251 
6252 
6253 int
6254 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6255     int totaddr, int *error)
6256 {
6257 	int added = 0;
6258 	int i;
6259 	struct sctp_inpcb *inp;
6260 	struct sockaddr *sa;
6261 	size_t incr = 0;
6262 #ifdef INET
6263 	struct sockaddr_in *sin;
6264 #endif
6265 #ifdef INET6
6266 	struct sockaddr_in6 *sin6;
6267 #endif
6268 
6269 	sa = addr;
6270 	inp = stcb->sctp_ep;
6271 	*error = 0;
6272 	for (i = 0; i < totaddr; i++) {
6273 		switch (sa->sa_family) {
6274 #ifdef INET
6275 		case AF_INET:
6276 			incr = sizeof(struct sockaddr_in);
6277 			sin = (struct sockaddr_in *)sa;
6278 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6279 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6280 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6281 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6282 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6283 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6284 				*error = EINVAL;
6285 				goto out_now;
6286 			}
6287 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6288 			    SCTP_DONOT_SETSCOPE,
6289 			    SCTP_ADDR_IS_CONFIRMED)) {
6290 				/* assoc gone no un-lock */
6291 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6292 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6293 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6294 				*error = ENOBUFS;
6295 				goto out_now;
6296 			}
6297 			added++;
6298 			break;
6299 #endif
6300 #ifdef INET6
6301 		case AF_INET6:
6302 			incr = sizeof(struct sockaddr_in6);
6303 			sin6 = (struct sockaddr_in6 *)sa;
6304 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6305 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6306 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6307 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6308 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6309 				*error = EINVAL;
6310 				goto out_now;
6311 			}
6312 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6313 			    SCTP_DONOT_SETSCOPE,
6314 			    SCTP_ADDR_IS_CONFIRMED)) {
6315 				/* assoc gone no un-lock */
6316 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6317 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6318 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6319 				*error = ENOBUFS;
6320 				goto out_now;
6321 			}
6322 			added++;
6323 			break;
6324 #endif
6325 		default:
6326 			break;
6327 		}
6328 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6329 	}
6330 out_now:
6331 	return (added);
6332 }
6333 
6334 struct sctp_tcb *
6335 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6336     unsigned int *totaddr,
6337     unsigned int *num_v4, unsigned int *num_v6, int *error,
6338     unsigned int limit, int *bad_addr)
6339 {
6340 	struct sockaddr *sa;
6341 	struct sctp_tcb *stcb = NULL;
6342 	unsigned int incr, at, i;
6343 
6344 	at = 0;
6345 	sa = addr;
6346 	*error = *num_v6 = *num_v4 = 0;
6347 	/* account and validate addresses */
6348 	for (i = 0; i < *totaddr; i++) {
6349 		switch (sa->sa_family) {
6350 #ifdef INET
6351 		case AF_INET:
6352 			incr = (unsigned int)sizeof(struct sockaddr_in);
6353 			if (sa->sa_len != incr) {
6354 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6355 				*error = EINVAL;
6356 				*bad_addr = 1;
6357 				return (NULL);
6358 			}
6359 			(*num_v4) += 1;
6360 			break;
6361 #endif
6362 #ifdef INET6
6363 		case AF_INET6:
6364 			{
6365 				struct sockaddr_in6 *sin6;
6366 
6367 				sin6 = (struct sockaddr_in6 *)sa;
6368 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6369 					/* Must be non-mapped for connectx */
6370 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6371 					*error = EINVAL;
6372 					*bad_addr = 1;
6373 					return (NULL);
6374 				}
6375 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6376 				if (sa->sa_len != incr) {
6377 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6378 					*error = EINVAL;
6379 					*bad_addr = 1;
6380 					return (NULL);
6381 				}
6382 				(*num_v6) += 1;
6383 				break;
6384 			}
6385 #endif
6386 		default:
6387 			*totaddr = i;
6388 			incr = 0;
6389 			/* we are done */
6390 			break;
6391 		}
6392 		if (i == *totaddr) {
6393 			break;
6394 		}
6395 		SCTP_INP_INCR_REF(inp);
6396 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6397 		if (stcb != NULL) {
6398 			/* Already have or am bring up an association */
6399 			return (stcb);
6400 		} else {
6401 			SCTP_INP_DECR_REF(inp);
6402 		}
6403 		if ((at + incr) > limit) {
6404 			*totaddr = i;
6405 			break;
6406 		}
6407 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6408 	}
6409 	return ((struct sctp_tcb *)NULL);
6410 }
6411 
6412 /*
6413  * sctp_bindx(ADD) for one address.
6414  * assumes all arguments are valid/checked by caller.
6415  */
6416 void
6417 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6418     struct sockaddr *sa, sctp_assoc_t assoc_id,
6419     uint32_t vrf_id, int *error, void *p)
6420 {
6421 	struct sockaddr *addr_touse;
6422 #if defined(INET) && defined(INET6)
6423 	struct sockaddr_in sin;
6424 #endif
6425 
6426 	/* see if we're bound all already! */
6427 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6428 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6429 		*error = EINVAL;
6430 		return;
6431 	}
6432 	addr_touse = sa;
6433 #ifdef INET6
6434 	if (sa->sa_family == AF_INET6) {
6435 #ifdef INET
6436 		struct sockaddr_in6 *sin6;
6437 
6438 #endif
6439 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6440 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6441 			*error = EINVAL;
6442 			return;
6443 		}
6444 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6445 			/* can only bind v6 on PF_INET6 sockets */
6446 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 			*error = EINVAL;
6448 			return;
6449 		}
6450 #ifdef INET
6451 		sin6 = (struct sockaddr_in6 *)addr_touse;
6452 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6453 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6454 			    SCTP_IPV6_V6ONLY(inp)) {
6455 				/* can't bind v4-mapped on PF_INET sockets */
6456 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6457 				*error = EINVAL;
6458 				return;
6459 			}
6460 			in6_sin6_2_sin(&sin, sin6);
6461 			addr_touse = (struct sockaddr *)&sin;
6462 		}
6463 #endif
6464 	}
6465 #endif
6466 #ifdef INET
6467 	if (sa->sa_family == AF_INET) {
6468 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6469 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6470 			*error = EINVAL;
6471 			return;
6472 		}
6473 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6474 		    SCTP_IPV6_V6ONLY(inp)) {
6475 			/* can't bind v4 on PF_INET sockets */
6476 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6477 			*error = EINVAL;
6478 			return;
6479 		}
6480 	}
6481 #endif
6482 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6483 		if (p == NULL) {
6484 			/* Can't get proc for Net/Open BSD */
6485 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6486 			*error = EINVAL;
6487 			return;
6488 		}
6489 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6490 		return;
6491 	}
6492 	/*
6493 	 * No locks required here since bind and mgmt_ep_sa all do their own
6494 	 * locking. If we do something for the FIX: below we may need to
6495 	 * lock in that case.
6496 	 */
6497 	if (assoc_id == 0) {
6498 		/* add the address */
6499 		struct sctp_inpcb *lep;
6500 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6501 
6502 		/* validate the incoming port */
6503 		if ((lsin->sin_port != 0) &&
6504 		    (lsin->sin_port != inp->sctp_lport)) {
6505 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6506 			*error = EINVAL;
6507 			return;
6508 		} else {
6509 			/* user specified 0 port, set it to existing port */
6510 			lsin->sin_port = inp->sctp_lport;
6511 		}
6512 
6513 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6514 		if (lep != NULL) {
6515 			/*
6516 			 * We must decrement the refcount since we have the
6517 			 * ep already and are binding. No remove going on
6518 			 * here.
6519 			 */
6520 			SCTP_INP_DECR_REF(lep);
6521 		}
6522 		if (lep == inp) {
6523 			/* already bound to it.. ok */
6524 			return;
6525 		} else if (lep == NULL) {
6526 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6527 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6528 			    SCTP_ADD_IP_ADDRESS,
6529 			    vrf_id, NULL);
6530 		} else {
6531 			*error = EADDRINUSE;
6532 		}
6533 		if (*error)
6534 			return;
6535 	} else {
6536 		/*
6537 		 * FIX: decide whether we allow assoc based bindx
6538 		 */
6539 	}
6540 }
6541 
6542 /*
6543  * sctp_bindx(DELETE) for one address.
6544  * assumes all arguments are valid/checked by caller.
6545  */
6546 void
6547 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6548     struct sockaddr *sa, sctp_assoc_t assoc_id,
6549     uint32_t vrf_id, int *error)
6550 {
6551 	struct sockaddr *addr_touse;
6552 #if defined(INET) && defined(INET6)
6553 	struct sockaddr_in sin;
6554 #endif
6555 
6556 	/* see if we're bound all already! */
6557 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6558 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6559 		*error = EINVAL;
6560 		return;
6561 	}
6562 	addr_touse = sa;
6563 #ifdef INET6
6564 	if (sa->sa_family == AF_INET6) {
6565 #ifdef INET
6566 		struct sockaddr_in6 *sin6;
6567 #endif
6568 
6569 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6570 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6571 			*error = EINVAL;
6572 			return;
6573 		}
6574 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6575 			/* can only bind v6 on PF_INET6 sockets */
6576 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 			*error = EINVAL;
6578 			return;
6579 		}
6580 #ifdef INET
6581 		sin6 = (struct sockaddr_in6 *)addr_touse;
6582 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6583 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6584 			    SCTP_IPV6_V6ONLY(inp)) {
6585 				/* can't bind mapped-v4 on PF_INET sockets */
6586 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6587 				*error = EINVAL;
6588 				return;
6589 			}
6590 			in6_sin6_2_sin(&sin, sin6);
6591 			addr_touse = (struct sockaddr *)&sin;
6592 		}
6593 #endif
6594 	}
6595 #endif
6596 #ifdef INET
6597 	if (sa->sa_family == AF_INET) {
6598 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6599 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6600 			*error = EINVAL;
6601 			return;
6602 		}
6603 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6604 		    SCTP_IPV6_V6ONLY(inp)) {
6605 			/* can't bind v4 on PF_INET sockets */
6606 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6607 			*error = EINVAL;
6608 			return;
6609 		}
6610 	}
6611 #endif
6612 	/*
6613 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6614 	 * below is ever changed we may need to lock before calling
6615 	 * association level binding.
6616 	 */
6617 	if (assoc_id == 0) {
6618 		/* delete the address */
6619 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6620 		    SCTP_DEL_IP_ADDRESS,
6621 		    vrf_id, NULL);
6622 	} else {
6623 		/*
6624 		 * FIX: decide whether we allow assoc based bindx
6625 		 */
6626 	}
6627 }
6628 
6629 /*
6630  * returns the valid local address count for an assoc, taking into account
6631  * all scoping rules
6632  */
6633 int
6634 sctp_local_addr_count(struct sctp_tcb *stcb)
6635 {
6636 	int loopback_scope;
6637 #if defined(INET)
6638 	int ipv4_local_scope, ipv4_addr_legal;
6639 #endif
6640 #if defined (INET6)
6641 	int local_scope, site_scope, ipv6_addr_legal;
6642 #endif
6643 	struct sctp_vrf *vrf;
6644 	struct sctp_ifn *sctp_ifn;
6645 	struct sctp_ifa *sctp_ifa;
6646 	int count = 0;
6647 
6648 	/* Turn on all the appropriate scopes */
6649 	loopback_scope = stcb->asoc.scope.loopback_scope;
6650 #if defined(INET)
6651 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6652 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6653 #endif
6654 #if defined(INET6)
6655 	local_scope = stcb->asoc.scope.local_scope;
6656 	site_scope = stcb->asoc.scope.site_scope;
6657 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6658 #endif
6659 	SCTP_IPI_ADDR_RLOCK();
6660 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6661 	if (vrf == NULL) {
6662 		/* no vrf, no addresses */
6663 		SCTP_IPI_ADDR_RUNLOCK();
6664 		return (0);
6665 	}
6666 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6667 		/*
6668 		 * bound all case: go through all ifns on the vrf
6669 		 */
6670 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6671 			if ((loopback_scope == 0) &&
6672 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6673 				continue;
6674 			}
6675 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6676 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6677 					continue;
6678 				switch (sctp_ifa->address.sa.sa_family) {
6679 #ifdef INET
6680 				case AF_INET:
6681 					if (ipv4_addr_legal) {
6682 						struct sockaddr_in *sin;
6683 
6684 						sin = &sctp_ifa->address.sin;
6685 						if (sin->sin_addr.s_addr == 0) {
6686 							/*
6687 							 * skip unspecified
6688 							 * addrs
6689 							 */
6690 							continue;
6691 						}
6692 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6693 						    &sin->sin_addr) != 0) {
6694 							continue;
6695 						}
6696 						if ((ipv4_local_scope == 0) &&
6697 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6698 							continue;
6699 						}
6700 						/* count this one */
6701 						count++;
6702 					} else {
6703 						continue;
6704 					}
6705 					break;
6706 #endif
6707 #ifdef INET6
6708 				case AF_INET6:
6709 					if (ipv6_addr_legal) {
6710 						struct sockaddr_in6 *sin6;
6711 
6712 						sin6 = &sctp_ifa->address.sin6;
6713 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6714 							continue;
6715 						}
6716 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6717 						    &sin6->sin6_addr) != 0) {
6718 							continue;
6719 						}
6720 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6721 							if (local_scope == 0)
6722 								continue;
6723 							if (sin6->sin6_scope_id == 0) {
6724 								if (sa6_recoverscope(sin6) != 0)
6725 									/*
6726 									 *
6727 									 * bad
6728 									 * link
6729 									 *
6730 									 * local
6731 									 *
6732 									 * address
6733 									 */
6734 									continue;
6735 							}
6736 						}
6737 						if ((site_scope == 0) &&
6738 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6739 							continue;
6740 						}
6741 						/* count this one */
6742 						count++;
6743 					}
6744 					break;
6745 #endif
6746 				default:
6747 					/* TSNH */
6748 					break;
6749 				}
6750 			}
6751 		}
6752 	} else {
6753 		/*
6754 		 * subset bound case
6755 		 */
6756 		struct sctp_laddr *laddr;
6757 
6758 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6759 		    sctp_nxt_addr) {
6760 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6761 				continue;
6762 			}
6763 			/* count this one */
6764 			count++;
6765 		}
6766 	}
6767 	SCTP_IPI_ADDR_RUNLOCK();
6768 	return (count);
6769 }
6770 
6771 #if defined(SCTP_LOCAL_TRACE_BUF)
6772 
6773 void
6774 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6775 {
6776 	uint32_t saveindex, newindex;
6777 
6778 	do {
6779 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6780 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6781 			newindex = 1;
6782 		} else {
6783 			newindex = saveindex + 1;
6784 		}
6785 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6786 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6787 		saveindex = 0;
6788 	}
6789 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6790 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6791 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6792 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6793 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6794 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6795 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6796 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6797 }
6798 
6799 #endif
6800 static void
6801 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6802     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6803 {
6804 	struct ip *iph;
6805 #ifdef INET6
6806 	struct ip6_hdr *ip6;
6807 #endif
6808 	struct mbuf *sp, *last;
6809 	struct udphdr *uhdr;
6810 	uint16_t port;
6811 
6812 	if ((m->m_flags & M_PKTHDR) == 0) {
6813 		/* Can't handle one that is not a pkt hdr */
6814 		goto out;
6815 	}
6816 	/* Pull the src port */
6817 	iph = mtod(m, struct ip *);
6818 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6819 	port = uhdr->uh_sport;
6820 	/*
6821 	 * Split out the mbuf chain. Leave the IP header in m, place the
6822 	 * rest in the sp.
6823 	 */
6824 	sp = m_split(m, off, M_NOWAIT);
6825 	if (sp == NULL) {
6826 		/* Gak, drop packet, we can't do a split */
6827 		goto out;
6828 	}
6829 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6830 		/* Gak, packet can't have an SCTP header in it - too small */
6831 		m_freem(sp);
6832 		goto out;
6833 	}
6834 	/* Now pull up the UDP header and SCTP header together */
6835 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6836 	if (sp == NULL) {
6837 		/* Gak pullup failed */
6838 		goto out;
6839 	}
6840 	/* Trim out the UDP header */
6841 	m_adj(sp, sizeof(struct udphdr));
6842 
6843 	/* Now reconstruct the mbuf chain */
6844 	for (last = m; last->m_next; last = last->m_next);
6845 	last->m_next = sp;
6846 	m->m_pkthdr.len += sp->m_pkthdr.len;
6847 	/*
6848 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6849 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6850 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6851 	 * SCTP checksum. Therefore, clear the bit.
6852 	 */
6853 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6854 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6855 	    m->m_pkthdr.len,
6856 	    if_name(m->m_pkthdr.rcvif),
6857 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6858 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6859 	iph = mtod(m, struct ip *);
6860 	switch (iph->ip_v) {
6861 #ifdef INET
6862 	case IPVERSION:
6863 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6864 		sctp_input_with_port(m, off, port);
6865 		break;
6866 #endif
6867 #ifdef INET6
6868 	case IPV6_VERSION >> 4:
6869 		ip6 = mtod(m, struct ip6_hdr *);
6870 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6871 		sctp6_input_with_port(&m, &off, port);
6872 		break;
6873 #endif
6874 	default:
6875 		goto out;
6876 		break;
6877 	}
6878 	return;
6879 out:
6880 	m_freem(m);
6881 }
6882 
6883 #ifdef INET
6884 static void
6885 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6886 {
6887 	struct ip *outer_ip, *inner_ip;
6888 	struct sctphdr *sh;
6889 	struct icmp *icmp;
6890 	struct udphdr *udp;
6891 	struct sctp_inpcb *inp;
6892 	struct sctp_tcb *stcb;
6893 	struct sctp_nets *net;
6894 	struct sctp_init_chunk *ch;
6895 	struct sockaddr_in src, dst;
6896 	uint8_t type, code;
6897 
6898 	inner_ip = (struct ip *)vip;
6899 	icmp = (struct icmp *)((caddr_t)inner_ip -
6900 	    (sizeof(struct icmp) - sizeof(struct ip)));
6901 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6902 	if (ntohs(outer_ip->ip_len) <
6903 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6904 		return;
6905 	}
6906 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6907 	sh = (struct sctphdr *)(udp + 1);
6908 	memset(&src, 0, sizeof(struct sockaddr_in));
6909 	src.sin_family = AF_INET;
6910 	src.sin_len = sizeof(struct sockaddr_in);
6911 	src.sin_port = sh->src_port;
6912 	src.sin_addr = inner_ip->ip_src;
6913 	memset(&dst, 0, sizeof(struct sockaddr_in));
6914 	dst.sin_family = AF_INET;
6915 	dst.sin_len = sizeof(struct sockaddr_in);
6916 	dst.sin_port = sh->dest_port;
6917 	dst.sin_addr = inner_ip->ip_dst;
6918 	/*
6919 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6920 	 * holds our local endpoint address. Thus we reverse the dst and the
6921 	 * src in the lookup.
6922 	 */
6923 	inp = NULL;
6924 	net = NULL;
6925 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6926 	    (struct sockaddr *)&src,
6927 	    &inp, &net, 1,
6928 	    SCTP_DEFAULT_VRFID);
6929 	if ((stcb != NULL) &&
6930 	    (net != NULL) &&
6931 	    (inp != NULL)) {
6932 		/* Check the UDP port numbers */
6933 		if ((udp->uh_dport != net->port) ||
6934 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6935 			SCTP_TCB_UNLOCK(stcb);
6936 			return;
6937 		}
6938 		/* Check the verification tag */
6939 		if (ntohl(sh->v_tag) != 0) {
6940 			/*
6941 			 * This must be the verification tag used for
6942 			 * sending out packets. We don't consider packets
6943 			 * reflecting the verification tag.
6944 			 */
6945 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6946 				SCTP_TCB_UNLOCK(stcb);
6947 				return;
6948 			}
6949 		} else {
6950 			if (ntohs(outer_ip->ip_len) >=
6951 			    sizeof(struct ip) +
6952 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6953 				/*
6954 				 * In this case we can check if we got an
6955 				 * INIT chunk and if the initiate tag
6956 				 * matches.
6957 				 */
6958 				ch = (struct sctp_init_chunk *)(sh + 1);
6959 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6960 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6961 					SCTP_TCB_UNLOCK(stcb);
6962 					return;
6963 				}
6964 			} else {
6965 				SCTP_TCB_UNLOCK(stcb);
6966 				return;
6967 			}
6968 		}
6969 		type = icmp->icmp_type;
6970 		code = icmp->icmp_code;
6971 		if ((type == ICMP_UNREACH) &&
6972 		    (code == ICMP_UNREACH_PORT)) {
6973 			code = ICMP_UNREACH_PROTOCOL;
6974 		}
6975 		sctp_notify(inp, stcb, net, type, code,
6976 		    ntohs(inner_ip->ip_len),
6977 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6978 	} else {
6979 		if ((stcb == NULL) && (inp != NULL)) {
6980 			/* reduce ref-count */
6981 			SCTP_INP_WLOCK(inp);
6982 			SCTP_INP_DECR_REF(inp);
6983 			SCTP_INP_WUNLOCK(inp);
6984 		}
6985 		if (stcb) {
6986 			SCTP_TCB_UNLOCK(stcb);
6987 		}
6988 	}
6989 	return;
6990 }
6991 #endif
6992 
6993 #ifdef INET6
6994 static void
6995 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6996 {
6997 	struct ip6ctlparam *ip6cp;
6998 	struct sctp_inpcb *inp;
6999 	struct sctp_tcb *stcb;
7000 	struct sctp_nets *net;
7001 	struct sctphdr sh;
7002 	struct udphdr udp;
7003 	struct sockaddr_in6 src, dst;
7004 	uint8_t type, code;
7005 
7006 	ip6cp = (struct ip6ctlparam *)d;
7007 	/*
7008 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7009 	 */
7010 	if (ip6cp->ip6c_m == NULL) {
7011 		return;
7012 	}
7013 	/*
7014 	 * Check if we can safely examine the ports and the verification tag
7015 	 * of the SCTP common header.
7016 	 */
7017 	if (ip6cp->ip6c_m->m_pkthdr.len <
7018 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7019 		return;
7020 	}
7021 	/* Copy out the UDP header. */
7022 	memset(&udp, 0, sizeof(struct udphdr));
7023 	m_copydata(ip6cp->ip6c_m,
7024 	    ip6cp->ip6c_off,
7025 	    sizeof(struct udphdr),
7026 	    (caddr_t)&udp);
7027 	/* Copy out the port numbers and the verification tag. */
7028 	memset(&sh, 0, sizeof(struct sctphdr));
7029 	m_copydata(ip6cp->ip6c_m,
7030 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7031 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7032 	    (caddr_t)&sh);
7033 	memset(&src, 0, sizeof(struct sockaddr_in6));
7034 	src.sin6_family = AF_INET6;
7035 	src.sin6_len = sizeof(struct sockaddr_in6);
7036 	src.sin6_port = sh.src_port;
7037 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7038 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7039 		return;
7040 	}
7041 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7042 	dst.sin6_family = AF_INET6;
7043 	dst.sin6_len = sizeof(struct sockaddr_in6);
7044 	dst.sin6_port = sh.dest_port;
7045 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7046 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7047 		return;
7048 	}
7049 	inp = NULL;
7050 	net = NULL;
7051 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7052 	    (struct sockaddr *)&src,
7053 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7054 	if ((stcb != NULL) &&
7055 	    (net != NULL) &&
7056 	    (inp != NULL)) {
7057 		/* Check the UDP port numbers */
7058 		if ((udp.uh_dport != net->port) ||
7059 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7060 			SCTP_TCB_UNLOCK(stcb);
7061 			return;
7062 		}
7063 		/* Check the verification tag */
7064 		if (ntohl(sh.v_tag) != 0) {
7065 			/*
7066 			 * This must be the verification tag used for
7067 			 * sending out packets. We don't consider packets
7068 			 * reflecting the verification tag.
7069 			 */
7070 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7071 				SCTP_TCB_UNLOCK(stcb);
7072 				return;
7073 			}
7074 		} else {
7075 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7076 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7077 			    sizeof(struct sctphdr) +
7078 			    sizeof(struct sctp_chunkhdr) +
7079 			    offsetof(struct sctp_init, a_rwnd)) {
7080 				/*
7081 				 * In this case we can check if we got an
7082 				 * INIT chunk and if the initiate tag
7083 				 * matches.
7084 				 */
7085 				uint32_t initiate_tag;
7086 				uint8_t chunk_type;
7087 
7088 				m_copydata(ip6cp->ip6c_m,
7089 				    ip6cp->ip6c_off +
7090 				    sizeof(struct udphdr) +
7091 				    sizeof(struct sctphdr),
7092 				    sizeof(uint8_t),
7093 				    (caddr_t)&chunk_type);
7094 				m_copydata(ip6cp->ip6c_m,
7095 				    ip6cp->ip6c_off +
7096 				    sizeof(struct udphdr) +
7097 				    sizeof(struct sctphdr) +
7098 				    sizeof(struct sctp_chunkhdr),
7099 				    sizeof(uint32_t),
7100 				    (caddr_t)&initiate_tag);
7101 				if ((chunk_type != SCTP_INITIATION) ||
7102 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7103 					SCTP_TCB_UNLOCK(stcb);
7104 					return;
7105 				}
7106 			} else {
7107 				SCTP_TCB_UNLOCK(stcb);
7108 				return;
7109 			}
7110 		}
7111 		type = ip6cp->ip6c_icmp6->icmp6_type;
7112 		code = ip6cp->ip6c_icmp6->icmp6_code;
7113 		if ((type == ICMP6_DST_UNREACH) &&
7114 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7115 			type = ICMP6_PARAM_PROB;
7116 			code = ICMP6_PARAMPROB_NEXTHEADER;
7117 		}
7118 		sctp6_notify(inp, stcb, net, type, code,
7119 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7120 	} else {
7121 		if ((stcb == NULL) && (inp != NULL)) {
7122 			/* reduce inp's ref-count */
7123 			SCTP_INP_WLOCK(inp);
7124 			SCTP_INP_DECR_REF(inp);
7125 			SCTP_INP_WUNLOCK(inp);
7126 		}
7127 		if (stcb) {
7128 			SCTP_TCB_UNLOCK(stcb);
7129 		}
7130 	}
7131 }
7132 #endif
7133 
7134 void
7135 sctp_over_udp_stop(void)
7136 {
7137 	/*
7138 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7139 	 * for writting!
7140 	 */
7141 #ifdef INET
7142 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7143 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7144 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7145 	}
7146 #endif
7147 #ifdef INET6
7148 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7149 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7150 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7151 	}
7152 #endif
7153 }
7154 
7155 int
7156 sctp_over_udp_start(void)
7157 {
7158 	uint16_t port;
7159 	int ret;
7160 #ifdef INET
7161 	struct sockaddr_in sin;
7162 #endif
7163 #ifdef INET6
7164 	struct sockaddr_in6 sin6;
7165 #endif
7166 	/*
7167 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7168 	 * for writting!
7169 	 */
7170 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7171 	if (ntohs(port) == 0) {
7172 		/* Must have a port set */
7173 		return (EINVAL);
7174 	}
7175 #ifdef INET
7176 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7177 		/* Already running -- must stop first */
7178 		return (EALREADY);
7179 	}
7180 #endif
7181 #ifdef INET6
7182 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7183 		/* Already running -- must stop first */
7184 		return (EALREADY);
7185 	}
7186 #endif
7187 #ifdef INET
7188 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7189 	    SOCK_DGRAM, IPPROTO_UDP,
7190 	    curthread->td_ucred, curthread))) {
7191 		sctp_over_udp_stop();
7192 		return (ret);
7193 	}
7194 	/* Call the special UDP hook. */
7195 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7196 	    sctp_recv_udp_tunneled_packet,
7197 	    sctp_recv_icmp_tunneled_packet,
7198 	    NULL))) {
7199 		sctp_over_udp_stop();
7200 		return (ret);
7201 	}
7202 	/* Ok, we have a socket, bind it to the port. */
7203 	memset(&sin, 0, sizeof(struct sockaddr_in));
7204 	sin.sin_len = sizeof(struct sockaddr_in);
7205 	sin.sin_family = AF_INET;
7206 	sin.sin_port = htons(port);
7207 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7208 	    (struct sockaddr *)&sin, curthread))) {
7209 		sctp_over_udp_stop();
7210 		return (ret);
7211 	}
7212 #endif
7213 #ifdef INET6
7214 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7215 	    SOCK_DGRAM, IPPROTO_UDP,
7216 	    curthread->td_ucred, curthread))) {
7217 		sctp_over_udp_stop();
7218 		return (ret);
7219 	}
7220 	/* Call the special UDP hook. */
7221 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7222 	    sctp_recv_udp_tunneled_packet,
7223 	    sctp_recv_icmp6_tunneled_packet,
7224 	    NULL))) {
7225 		sctp_over_udp_stop();
7226 		return (ret);
7227 	}
7228 	/* Ok, we have a socket, bind it to the port. */
7229 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7230 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7231 	sin6.sin6_family = AF_INET6;
7232 	sin6.sin6_port = htons(port);
7233 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7234 	    (struct sockaddr *)&sin6, curthread))) {
7235 		sctp_over_udp_stop();
7236 		return (ret);
7237 	}
7238 #endif
7239 	return (0);
7240 }
7241 
7242 #if defined(INET6) || defined(INET)
7243 
7244 /*
7245  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7246  * If all arguments are zero, zero is returned.
7247  */
7248 uint32_t
7249 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7250 {
7251 	if (mtu1 > 0) {
7252 		if (mtu2 > 0) {
7253 			if (mtu3 > 0) {
7254 				return (min(mtu1, min(mtu2, mtu3)));
7255 			} else {
7256 				return (min(mtu1, mtu2));
7257 			}
7258 		} else {
7259 			if (mtu3 > 0) {
7260 				return (min(mtu1, mtu3));
7261 			} else {
7262 				return (mtu1);
7263 			}
7264 		}
7265 	} else {
7266 		if (mtu2 > 0) {
7267 			if (mtu3 > 0) {
7268 				return (min(mtu2, mtu3));
7269 			} else {
7270 				return (mtu2);
7271 			}
7272 		} else {
7273 			return (mtu3);
7274 		}
7275 	}
7276 }
7277 
7278 void
7279 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7280 {
7281 	struct in_conninfo inc;
7282 
7283 	memset(&inc, 0, sizeof(struct in_conninfo));
7284 	inc.inc_fibnum = fibnum;
7285 	switch (addr->sa.sa_family) {
7286 #ifdef INET
7287 	case AF_INET:
7288 		inc.inc_faddr = addr->sin.sin_addr;
7289 		break;
7290 #endif
7291 #ifdef INET6
7292 	case AF_INET6:
7293 		inc.inc_flags |= INC_ISIPV6;
7294 		inc.inc6_faddr = addr->sin6.sin6_addr;
7295 		break;
7296 #endif
7297 	default:
7298 		return;
7299 	}
7300 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7301 }
7302 
7303 uint32_t
7304 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7305 {
7306 	struct in_conninfo inc;
7307 
7308 	memset(&inc, 0, sizeof(struct in_conninfo));
7309 	inc.inc_fibnum = fibnum;
7310 	switch (addr->sa.sa_family) {
7311 #ifdef INET
7312 	case AF_INET:
7313 		inc.inc_faddr = addr->sin.sin_addr;
7314 		break;
7315 #endif
7316 #ifdef INET6
7317 	case AF_INET6:
7318 		inc.inc_flags |= INC_ISIPV6;
7319 		inc.inc6_faddr = addr->sin6.sin6_addr;
7320 		break;
7321 #endif
7322 	default:
7323 		return (0);
7324 	}
7325 	return ((uint32_t)tcp_hc_getmtu(&inc));
7326 }
7327 #endif
7328