xref: /freebsd/sys/netinet/sctputil.c (revision 298022457a9a016cbdda4e22d751abb5cd91c919)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int
897 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
898     uint32_t override_tag, uint32_t vrf_id)
899 {
900 	struct sctp_association *asoc;
901 
902 	/*
903 	 * Anything set to zero is taken care of by the allocation routine's
904 	 * bzero
905 	 */
906 
907 	/*
908 	 * Up front select what scoping to apply on addresses I tell my peer
909 	 * Not sure what to do with these right now, we will need to come up
910 	 * with a way to set them. We may need to pass them through from the
911 	 * caller in the sctp_aloc_assoc() function.
912 	 */
913 	int i;
914 
915 #if defined(SCTP_DETAILED_STR_STATS)
916 	int j;
917 
918 #endif
919 
920 	asoc = &stcb->asoc;
921 	/* init all variables to a known value. */
922 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
923 	asoc->max_burst = inp->sctp_ep.max_burst;
924 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
925 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
926 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
927 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
928 	asoc->ecn_supported = inp->ecn_supported;
929 	asoc->prsctp_supported = inp->prsctp_supported;
930 	asoc->auth_supported = inp->auth_supported;
931 	asoc->asconf_supported = inp->asconf_supported;
932 	asoc->reconfig_supported = inp->reconfig_supported;
933 	asoc->nrsack_supported = inp->nrsack_supported;
934 	asoc->pktdrop_supported = inp->pktdrop_supported;
935 	asoc->sctp_cmt_pf = (uint8_t) 0;
936 	asoc->sctp_frag_point = inp->sctp_frag_point;
937 	asoc->sctp_features = inp->sctp_features;
938 	asoc->default_dscp = inp->sctp_ep.default_dscp;
939 	asoc->max_cwnd = inp->max_cwnd;
940 #ifdef INET6
941 	if (inp->sctp_ep.default_flowlabel) {
942 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
943 	} else {
944 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
945 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
946 			asoc->default_flowlabel &= 0x000fffff;
947 			asoc->default_flowlabel |= 0x80000000;
948 		} else {
949 			asoc->default_flowlabel = 0;
950 		}
951 	}
952 #endif
953 	asoc->sb_send_resv = 0;
954 	if (override_tag) {
955 		asoc->my_vtag = override_tag;
956 	} else {
957 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
958 	}
959 	/* Get the nonce tags */
960 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
961 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
962 	asoc->vrf_id = vrf_id;
963 
964 #ifdef SCTP_ASOCLOG_OF_TSNS
965 	asoc->tsn_in_at = 0;
966 	asoc->tsn_out_at = 0;
967 	asoc->tsn_in_wrapped = 0;
968 	asoc->tsn_out_wrapped = 0;
969 	asoc->cumack_log_at = 0;
970 	asoc->cumack_log_atsnt = 0;
971 #endif
972 #ifdef SCTP_FS_SPEC_LOG
973 	asoc->fs_index = 0;
974 #endif
975 	asoc->refcnt = 0;
976 	asoc->assoc_up_sent = 0;
977 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
978 	    sctp_select_initial_TSN(&inp->sctp_ep);
979 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
980 	/* we are optimisitic here */
981 	asoc->peer_supports_nat = 0;
982 	asoc->sent_queue_retran_cnt = 0;
983 
984 	/* for CMT */
985 	asoc->last_net_cmt_send_started = NULL;
986 
987 	/* This will need to be adjusted */
988 	asoc->last_acked_seq = asoc->init_seq_number - 1;
989 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
990 	asoc->asconf_seq_in = asoc->last_acked_seq;
991 
992 	/* here we are different, we hold the next one we expect */
993 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
994 
995 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
996 	asoc->initial_rto = inp->sctp_ep.initial_rto;
997 
998 	asoc->max_init_times = inp->sctp_ep.max_init_times;
999 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1000 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1001 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1002 	asoc->free_chunk_cnt = 0;
1003 
1004 	asoc->iam_blocking = 0;
1005 	asoc->context = inp->sctp_context;
1006 	asoc->local_strreset_support = inp->local_strreset_support;
1007 	asoc->def_send = inp->def_send;
1008 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1009 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1010 	asoc->pr_sctp_cnt = 0;
1011 	asoc->total_output_queue_size = 0;
1012 
1013 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1014 		asoc->scope.ipv6_addr_legal = 1;
1015 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1016 			asoc->scope.ipv4_addr_legal = 1;
1017 		} else {
1018 			asoc->scope.ipv4_addr_legal = 0;
1019 		}
1020 	} else {
1021 		asoc->scope.ipv6_addr_legal = 0;
1022 		asoc->scope.ipv4_addr_legal = 1;
1023 	}
1024 
1025 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1026 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1027 
1028 	asoc->smallest_mtu = inp->sctp_frag_point;
1029 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1030 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1031 
1032 	asoc->locked_on_sending = NULL;
1033 	asoc->stream_locked_on = 0;
1034 	asoc->ecn_echo_cnt_onq = 0;
1035 	asoc->stream_locked = 0;
1036 
1037 	asoc->send_sack = 1;
1038 
1039 	LIST_INIT(&asoc->sctp_restricted_addrs);
1040 
1041 	TAILQ_INIT(&asoc->nets);
1042 	TAILQ_INIT(&asoc->pending_reply_queue);
1043 	TAILQ_INIT(&asoc->asconf_ack_sent);
1044 	/* Setup to fill the hb random cache at first HB */
1045 	asoc->hb_random_idx = 4;
1046 
1047 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1048 
1049 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1050 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1051 
1052 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1053 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1054 
1055 	/*
1056 	 * Now the stream parameters, here we allocate space for all streams
1057 	 * that we request by default.
1058 	 */
1059 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1060 	    inp->sctp_ep.pre_open_stream_count;
1061 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1062 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1063 	    SCTP_M_STRMO);
1064 	if (asoc->strmout == NULL) {
1065 		/* big trouble no memory */
1066 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1067 		return (ENOMEM);
1068 	}
1069 	for (i = 0; i < asoc->streamoutcnt; i++) {
1070 		/*
1071 		 * inbound side must be set to 0xffff, also NOTE when we get
1072 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1073 		 * count (streamoutcnt) but first check if we sent to any of
1074 		 * the upper streams that were dropped (if some were). Those
1075 		 * that were dropped must be notified to the upper layer as
1076 		 * failed to send.
1077 		 */
1078 		asoc->strmout[i].next_sequence_send = 0x0;
1079 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1080 		asoc->strmout[i].chunks_on_queues = 0;
1081 #if defined(SCTP_DETAILED_STR_STATS)
1082 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1083 			asoc->strmout[i].abandoned_sent[j] = 0;
1084 			asoc->strmout[i].abandoned_unsent[j] = 0;
1085 		}
1086 #else
1087 		asoc->strmout[i].abandoned_sent[0] = 0;
1088 		asoc->strmout[i].abandoned_unsent[0] = 0;
1089 #endif
1090 		asoc->strmout[i].stream_no = i;
1091 		asoc->strmout[i].last_msg_incomplete = 0;
1092 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1093 	}
1094 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1095 
1096 	/* Now the mapping array */
1097 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1098 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1099 	    SCTP_M_MAP);
1100 	if (asoc->mapping_array == NULL) {
1101 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1102 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1103 		return (ENOMEM);
1104 	}
1105 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1106 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1107 	    SCTP_M_MAP);
1108 	if (asoc->nr_mapping_array == NULL) {
1109 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1110 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1111 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1112 		return (ENOMEM);
1113 	}
1114 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1115 
1116 	/* Now the init of the other outqueues */
1117 	TAILQ_INIT(&asoc->free_chunks);
1118 	TAILQ_INIT(&asoc->control_send_queue);
1119 	TAILQ_INIT(&asoc->asconf_send_queue);
1120 	TAILQ_INIT(&asoc->send_queue);
1121 	TAILQ_INIT(&asoc->sent_queue);
1122 	TAILQ_INIT(&asoc->reasmqueue);
1123 	TAILQ_INIT(&asoc->resetHead);
1124 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1125 	TAILQ_INIT(&asoc->asconf_queue);
1126 	/* authentication fields */
1127 	asoc->authinfo.random = NULL;
1128 	asoc->authinfo.active_keyid = 0;
1129 	asoc->authinfo.assoc_key = NULL;
1130 	asoc->authinfo.assoc_keyid = 0;
1131 	asoc->authinfo.recv_key = NULL;
1132 	asoc->authinfo.recv_keyid = 0;
1133 	LIST_INIT(&asoc->shared_keys);
1134 	asoc->marked_retrans = 0;
1135 	asoc->port = inp->sctp_ep.port;
1136 	asoc->timoinit = 0;
1137 	asoc->timodata = 0;
1138 	asoc->timosack = 0;
1139 	asoc->timoshutdown = 0;
1140 	asoc->timoheartbeat = 0;
1141 	asoc->timocookie = 0;
1142 	asoc->timoshutdownack = 0;
1143 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1144 	asoc->discontinuity_time = asoc->start_time;
1145 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1146 		asoc->abandoned_unsent[i] = 0;
1147 		asoc->abandoned_sent[i] = 0;
1148 	}
1149 	/*
1150 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1151 	 * freed later when the association is freed.
1152 	 */
1153 	return (0);
1154 }
1155 
1156 void
1157 sctp_print_mapping_array(struct sctp_association *asoc)
1158 {
1159 	unsigned int i, limit;
1160 
1161 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1162 	    asoc->mapping_array_size,
1163 	    asoc->mapping_array_base_tsn,
1164 	    asoc->cumulative_tsn,
1165 	    asoc->highest_tsn_inside_map,
1166 	    asoc->highest_tsn_inside_nr_map);
1167 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1168 		if (asoc->mapping_array[limit - 1] != 0) {
1169 			break;
1170 		}
1171 	}
1172 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1173 	for (i = 0; i < limit; i++) {
1174 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1175 	}
1176 	if (limit % 16)
1177 		SCTP_PRINTF("\n");
1178 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1179 		if (asoc->nr_mapping_array[limit - 1]) {
1180 			break;
1181 		}
1182 	}
1183 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1184 	for (i = 0; i < limit; i++) {
1185 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1186 	}
1187 	if (limit % 16)
1188 		SCTP_PRINTF("\n");
1189 }
1190 
1191 int
1192 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1193 {
1194 	/* mapping array needs to grow */
1195 	uint8_t *new_array1, *new_array2;
1196 	uint32_t new_size;
1197 
1198 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1199 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1200 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1201 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1202 		/* can't get more, forget it */
1203 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1204 		if (new_array1) {
1205 			SCTP_FREE(new_array1, SCTP_M_MAP);
1206 		}
1207 		if (new_array2) {
1208 			SCTP_FREE(new_array2, SCTP_M_MAP);
1209 		}
1210 		return (-1);
1211 	}
1212 	memset(new_array1, 0, new_size);
1213 	memset(new_array2, 0, new_size);
1214 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1215 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1216 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1217 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1218 	asoc->mapping_array = new_array1;
1219 	asoc->nr_mapping_array = new_array2;
1220 	asoc->mapping_array_size = new_size;
1221 	return (0);
1222 }
1223 
1224 
1225 static void
1226 sctp_iterator_work(struct sctp_iterator *it)
1227 {
1228 	int iteration_count = 0;
1229 	int inp_skip = 0;
1230 	int first_in = 1;
1231 	struct sctp_inpcb *tinp;
1232 
1233 	SCTP_INP_INFO_RLOCK();
1234 	SCTP_ITERATOR_LOCK();
1235 	if (it->inp) {
1236 		SCTP_INP_RLOCK(it->inp);
1237 		SCTP_INP_DECR_REF(it->inp);
1238 	}
1239 	if (it->inp == NULL) {
1240 		/* iterator is complete */
1241 done_with_iterator:
1242 		SCTP_ITERATOR_UNLOCK();
1243 		SCTP_INP_INFO_RUNLOCK();
1244 		if (it->function_atend != NULL) {
1245 			(*it->function_atend) (it->pointer, it->val);
1246 		}
1247 		SCTP_FREE(it, SCTP_M_ITER);
1248 		return;
1249 	}
1250 select_a_new_ep:
1251 	if (first_in) {
1252 		first_in = 0;
1253 	} else {
1254 		SCTP_INP_RLOCK(it->inp);
1255 	}
1256 	while (((it->pcb_flags) &&
1257 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1258 	    ((it->pcb_features) &&
1259 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1260 		/* endpoint flags or features don't match, so keep looking */
1261 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1262 			SCTP_INP_RUNLOCK(it->inp);
1263 			goto done_with_iterator;
1264 		}
1265 		tinp = it->inp;
1266 		it->inp = LIST_NEXT(it->inp, sctp_list);
1267 		SCTP_INP_RUNLOCK(tinp);
1268 		if (it->inp == NULL) {
1269 			goto done_with_iterator;
1270 		}
1271 		SCTP_INP_RLOCK(it->inp);
1272 	}
1273 	/* now go through each assoc which is in the desired state */
1274 	if (it->done_current_ep == 0) {
1275 		if (it->function_inp != NULL)
1276 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1277 		it->done_current_ep = 1;
1278 	}
1279 	if (it->stcb == NULL) {
1280 		/* run the per instance function */
1281 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1282 	}
1283 	if ((inp_skip) || it->stcb == NULL) {
1284 		if (it->function_inp_end != NULL) {
1285 			inp_skip = (*it->function_inp_end) (it->inp,
1286 			    it->pointer,
1287 			    it->val);
1288 		}
1289 		SCTP_INP_RUNLOCK(it->inp);
1290 		goto no_stcb;
1291 	}
1292 	while (it->stcb) {
1293 		SCTP_TCB_LOCK(it->stcb);
1294 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1295 			/* not in the right state... keep looking */
1296 			SCTP_TCB_UNLOCK(it->stcb);
1297 			goto next_assoc;
1298 		}
1299 		/* see if we have limited out the iterator loop */
1300 		iteration_count++;
1301 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1302 			/* Pause to let others grab the lock */
1303 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1304 			SCTP_TCB_UNLOCK(it->stcb);
1305 			SCTP_INP_INCR_REF(it->inp);
1306 			SCTP_INP_RUNLOCK(it->inp);
1307 			SCTP_ITERATOR_UNLOCK();
1308 			SCTP_INP_INFO_RUNLOCK();
1309 			SCTP_INP_INFO_RLOCK();
1310 			SCTP_ITERATOR_LOCK();
1311 			if (sctp_it_ctl.iterator_flags) {
1312 				/* We won't be staying here */
1313 				SCTP_INP_DECR_REF(it->inp);
1314 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1315 				if (sctp_it_ctl.iterator_flags &
1316 				    SCTP_ITERATOR_STOP_CUR_IT) {
1317 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1318 					goto done_with_iterator;
1319 				}
1320 				if (sctp_it_ctl.iterator_flags &
1321 				    SCTP_ITERATOR_STOP_CUR_INP) {
1322 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1323 					goto no_stcb;
1324 				}
1325 				/* If we reach here huh? */
1326 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1327 				    sctp_it_ctl.iterator_flags);
1328 				sctp_it_ctl.iterator_flags = 0;
1329 			}
1330 			SCTP_INP_RLOCK(it->inp);
1331 			SCTP_INP_DECR_REF(it->inp);
1332 			SCTP_TCB_LOCK(it->stcb);
1333 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1334 			iteration_count = 0;
1335 		}
1336 		/* run function on this one */
1337 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1338 
1339 		/*
1340 		 * we lie here, it really needs to have its own type but
1341 		 * first I must verify that this won't effect things :-0
1342 		 */
1343 		if (it->no_chunk_output == 0)
1344 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1345 
1346 		SCTP_TCB_UNLOCK(it->stcb);
1347 next_assoc:
1348 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1349 		if (it->stcb == NULL) {
1350 			/* Run last function */
1351 			if (it->function_inp_end != NULL) {
1352 				inp_skip = (*it->function_inp_end) (it->inp,
1353 				    it->pointer,
1354 				    it->val);
1355 			}
1356 		}
1357 	}
1358 	SCTP_INP_RUNLOCK(it->inp);
1359 no_stcb:
1360 	/* done with all assocs on this endpoint, move on to next endpoint */
1361 	it->done_current_ep = 0;
1362 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1363 		it->inp = NULL;
1364 	} else {
1365 		it->inp = LIST_NEXT(it->inp, sctp_list);
1366 	}
1367 	if (it->inp == NULL) {
1368 		goto done_with_iterator;
1369 	}
1370 	goto select_a_new_ep;
1371 }
1372 
1373 void
1374 sctp_iterator_worker(void)
1375 {
1376 	struct sctp_iterator *it, *nit;
1377 
1378 	/* This function is called with the WQ lock in place */
1379 
1380 	sctp_it_ctl.iterator_running = 1;
1381 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1382 		sctp_it_ctl.cur_it = it;
1383 		/* now lets work on this one */
1384 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1385 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1386 		CURVNET_SET(it->vn);
1387 		sctp_iterator_work(it);
1388 		sctp_it_ctl.cur_it = NULL;
1389 		CURVNET_RESTORE();
1390 		SCTP_IPI_ITERATOR_WQ_LOCK();
1391 		/* sa_ignore FREED_MEMORY */
1392 	}
1393 	sctp_it_ctl.iterator_running = 0;
1394 	return;
1395 }
1396 
1397 
1398 static void
1399 sctp_handle_addr_wq(void)
1400 {
1401 	/* deal with the ADDR wq from the rtsock calls */
1402 	struct sctp_laddr *wi, *nwi;
1403 	struct sctp_asconf_iterator *asc;
1404 
1405 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1406 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1407 	if (asc == NULL) {
1408 		/* Try later, no memory */
1409 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1410 		    (struct sctp_inpcb *)NULL,
1411 		    (struct sctp_tcb *)NULL,
1412 		    (struct sctp_nets *)NULL);
1413 		return;
1414 	}
1415 	LIST_INIT(&asc->list_of_work);
1416 	asc->cnt = 0;
1417 
1418 	SCTP_WQ_ADDR_LOCK();
1419 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1420 		LIST_REMOVE(wi, sctp_nxt_addr);
1421 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1422 		asc->cnt++;
1423 	}
1424 	SCTP_WQ_ADDR_UNLOCK();
1425 
1426 	if (asc->cnt == 0) {
1427 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1428 	} else {
1429 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1430 		    sctp_asconf_iterator_stcb,
1431 		    NULL,	/* No ep end for boundall */
1432 		    SCTP_PCB_FLAGS_BOUNDALL,
1433 		    SCTP_PCB_ANY_FEATURES,
1434 		    SCTP_ASOC_ANY_STATE,
1435 		    (void *)asc, 0,
1436 		    sctp_asconf_iterator_end, NULL, 0);
1437 	}
1438 }
1439 
1440 void
1441 sctp_timeout_handler(void *t)
1442 {
1443 	struct sctp_inpcb *inp;
1444 	struct sctp_tcb *stcb;
1445 	struct sctp_nets *net;
1446 	struct sctp_timer *tmr;
1447 
1448 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1449 	struct socket *so;
1450 
1451 #endif
1452 	int did_output;
1453 
1454 	tmr = (struct sctp_timer *)t;
1455 	inp = (struct sctp_inpcb *)tmr->ep;
1456 	stcb = (struct sctp_tcb *)tmr->tcb;
1457 	net = (struct sctp_nets *)tmr->net;
1458 	CURVNET_SET((struct vnet *)tmr->vnet);
1459 	did_output = 1;
1460 
1461 #ifdef SCTP_AUDITING_ENABLED
1462 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1463 	sctp_auditing(3, inp, stcb, net);
1464 #endif
1465 
1466 	/* sanity checks... */
1467 	if (tmr->self != (void *)tmr) {
1468 		/*
1469 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1470 		 * (void *)tmr);
1471 		 */
1472 		CURVNET_RESTORE();
1473 		return;
1474 	}
1475 	tmr->stopped_from = 0xa001;
1476 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1477 		/*
1478 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1479 		 * tmr->type);
1480 		 */
1481 		CURVNET_RESTORE();
1482 		return;
1483 	}
1484 	tmr->stopped_from = 0xa002;
1485 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1486 		CURVNET_RESTORE();
1487 		return;
1488 	}
1489 	/* if this is an iterator timeout, get the struct and clear inp */
1490 	tmr->stopped_from = 0xa003;
1491 	if (inp) {
1492 		SCTP_INP_INCR_REF(inp);
1493 		if ((inp->sctp_socket == NULL) &&
1494 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1495 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1496 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1499 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1500 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1501 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1502 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1503 		    ) {
1504 			SCTP_INP_DECR_REF(inp);
1505 			CURVNET_RESTORE();
1506 			return;
1507 		}
1508 	}
1509 	tmr->stopped_from = 0xa004;
1510 	if (stcb) {
1511 		atomic_add_int(&stcb->asoc.refcnt, 1);
1512 		if (stcb->asoc.state == 0) {
1513 			atomic_add_int(&stcb->asoc.refcnt, -1);
1514 			if (inp) {
1515 				SCTP_INP_DECR_REF(inp);
1516 			}
1517 			CURVNET_RESTORE();
1518 			return;
1519 		}
1520 	}
1521 	tmr->stopped_from = 0xa005;
1522 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1523 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1524 		if (inp) {
1525 			SCTP_INP_DECR_REF(inp);
1526 		}
1527 		if (stcb) {
1528 			atomic_add_int(&stcb->asoc.refcnt, -1);
1529 		}
1530 		CURVNET_RESTORE();
1531 		return;
1532 	}
1533 	tmr->stopped_from = 0xa006;
1534 
1535 	if (stcb) {
1536 		SCTP_TCB_LOCK(stcb);
1537 		atomic_add_int(&stcb->asoc.refcnt, -1);
1538 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1539 		    ((stcb->asoc.state == 0) ||
1540 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1541 			SCTP_TCB_UNLOCK(stcb);
1542 			if (inp) {
1543 				SCTP_INP_DECR_REF(inp);
1544 			}
1545 			CURVNET_RESTORE();
1546 			return;
1547 		}
1548 	}
1549 	/* record in stopped what t-o occured */
1550 	tmr->stopped_from = tmr->type;
1551 
1552 	/* mark as being serviced now */
1553 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1554 		/*
1555 		 * Callout has been rescheduled.
1556 		 */
1557 		goto get_out;
1558 	}
1559 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1560 		/*
1561 		 * Not active, so no action.
1562 		 */
1563 		goto get_out;
1564 	}
1565 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1566 
1567 	/* call the handler for the appropriate timer type */
1568 	switch (tmr->type) {
1569 	case SCTP_TIMER_TYPE_ZERO_COPY:
1570 		if (inp == NULL) {
1571 			break;
1572 		}
1573 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1574 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1575 		}
1576 		break;
1577 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1578 		if (inp == NULL) {
1579 			break;
1580 		}
1581 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1582 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1583 		}
1584 		break;
1585 	case SCTP_TIMER_TYPE_ADDR_WQ:
1586 		sctp_handle_addr_wq();
1587 		break;
1588 	case SCTP_TIMER_TYPE_SEND:
1589 		if ((stcb == NULL) || (inp == NULL)) {
1590 			break;
1591 		}
1592 		SCTP_STAT_INCR(sctps_timodata);
1593 		stcb->asoc.timodata++;
1594 		stcb->asoc.num_send_timers_up--;
1595 		if (stcb->asoc.num_send_timers_up < 0) {
1596 			stcb->asoc.num_send_timers_up = 0;
1597 		}
1598 		SCTP_TCB_LOCK_ASSERT(stcb);
1599 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1600 			/* no need to unlock on tcb its gone */
1601 
1602 			goto out_decr;
1603 		}
1604 		SCTP_TCB_LOCK_ASSERT(stcb);
1605 #ifdef SCTP_AUDITING_ENABLED
1606 		sctp_auditing(4, inp, stcb, net);
1607 #endif
1608 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1609 		if ((stcb->asoc.num_send_timers_up == 0) &&
1610 		    (stcb->asoc.sent_queue_cnt > 0)) {
1611 			struct sctp_tmit_chunk *chk;
1612 
1613 			/*
1614 			 * safeguard. If there on some on the sent queue
1615 			 * somewhere but no timers running something is
1616 			 * wrong... so we start a timer on the first chunk
1617 			 * on the send queue on whatever net it is sent to.
1618 			 */
1619 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1620 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1621 			    chk->whoTo);
1622 		}
1623 		break;
1624 	case SCTP_TIMER_TYPE_INIT:
1625 		if ((stcb == NULL) || (inp == NULL)) {
1626 			break;
1627 		}
1628 		SCTP_STAT_INCR(sctps_timoinit);
1629 		stcb->asoc.timoinit++;
1630 		if (sctp_t1init_timer(inp, stcb, net)) {
1631 			/* no need to unlock on tcb its gone */
1632 			goto out_decr;
1633 		}
1634 		/* We do output but not here */
1635 		did_output = 0;
1636 		break;
1637 	case SCTP_TIMER_TYPE_RECV:
1638 		if ((stcb == NULL) || (inp == NULL)) {
1639 			break;
1640 		}
1641 		SCTP_STAT_INCR(sctps_timosack);
1642 		stcb->asoc.timosack++;
1643 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1644 #ifdef SCTP_AUDITING_ENABLED
1645 		sctp_auditing(4, inp, stcb, net);
1646 #endif
1647 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1648 		break;
1649 	case SCTP_TIMER_TYPE_SHUTDOWN:
1650 		if ((stcb == NULL) || (inp == NULL)) {
1651 			break;
1652 		}
1653 		if (sctp_shutdown_timer(inp, stcb, net)) {
1654 			/* no need to unlock on tcb its gone */
1655 			goto out_decr;
1656 		}
1657 		SCTP_STAT_INCR(sctps_timoshutdown);
1658 		stcb->asoc.timoshutdown++;
1659 #ifdef SCTP_AUDITING_ENABLED
1660 		sctp_auditing(4, inp, stcb, net);
1661 #endif
1662 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1663 		break;
1664 	case SCTP_TIMER_TYPE_HEARTBEAT:
1665 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1666 			break;
1667 		}
1668 		SCTP_STAT_INCR(sctps_timoheartbeat);
1669 		stcb->asoc.timoheartbeat++;
1670 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1671 			/* no need to unlock on tcb its gone */
1672 			goto out_decr;
1673 		}
1674 #ifdef SCTP_AUDITING_ENABLED
1675 		sctp_auditing(4, inp, stcb, net);
1676 #endif
1677 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1678 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1679 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1680 		}
1681 		break;
1682 	case SCTP_TIMER_TYPE_COOKIE:
1683 		if ((stcb == NULL) || (inp == NULL)) {
1684 			break;
1685 		}
1686 		if (sctp_cookie_timer(inp, stcb, net)) {
1687 			/* no need to unlock on tcb its gone */
1688 			goto out_decr;
1689 		}
1690 		SCTP_STAT_INCR(sctps_timocookie);
1691 		stcb->asoc.timocookie++;
1692 #ifdef SCTP_AUDITING_ENABLED
1693 		sctp_auditing(4, inp, stcb, net);
1694 #endif
1695 		/*
1696 		 * We consider T3 and Cookie timer pretty much the same with
1697 		 * respect to where from in chunk_output.
1698 		 */
1699 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1700 		break;
1701 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1702 		{
1703 			struct timeval tv;
1704 			int i, secret;
1705 
1706 			if (inp == NULL) {
1707 				break;
1708 			}
1709 			SCTP_STAT_INCR(sctps_timosecret);
1710 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1711 			SCTP_INP_WLOCK(inp);
1712 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1713 			inp->sctp_ep.last_secret_number =
1714 			    inp->sctp_ep.current_secret_number;
1715 			inp->sctp_ep.current_secret_number++;
1716 			if (inp->sctp_ep.current_secret_number >=
1717 			    SCTP_HOW_MANY_SECRETS) {
1718 				inp->sctp_ep.current_secret_number = 0;
1719 			}
1720 			secret = (int)inp->sctp_ep.current_secret_number;
1721 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1722 				inp->sctp_ep.secret_key[secret][i] =
1723 				    sctp_select_initial_TSN(&inp->sctp_ep);
1724 			}
1725 			SCTP_INP_WUNLOCK(inp);
1726 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1727 		}
1728 		did_output = 0;
1729 		break;
1730 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1731 		if ((stcb == NULL) || (inp == NULL)) {
1732 			break;
1733 		}
1734 		SCTP_STAT_INCR(sctps_timopathmtu);
1735 		sctp_pathmtu_timer(inp, stcb, net);
1736 		did_output = 0;
1737 		break;
1738 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1739 		if ((stcb == NULL) || (inp == NULL)) {
1740 			break;
1741 		}
1742 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1743 			/* no need to unlock on tcb its gone */
1744 			goto out_decr;
1745 		}
1746 		SCTP_STAT_INCR(sctps_timoshutdownack);
1747 		stcb->asoc.timoshutdownack++;
1748 #ifdef SCTP_AUDITING_ENABLED
1749 		sctp_auditing(4, inp, stcb, net);
1750 #endif
1751 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1752 		break;
1753 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1758 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1759 		/* no need to unlock on tcb its gone */
1760 		goto out_decr;
1761 
1762 	case SCTP_TIMER_TYPE_STRRESET:
1763 		if ((stcb == NULL) || (inp == NULL)) {
1764 			break;
1765 		}
1766 		if (sctp_strreset_timer(inp, stcb, net)) {
1767 			/* no need to unlock on tcb its gone */
1768 			goto out_decr;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timostrmrst);
1771 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1772 		break;
1773 	case SCTP_TIMER_TYPE_ASCONF:
1774 		if ((stcb == NULL) || (inp == NULL)) {
1775 			break;
1776 		}
1777 		if (sctp_asconf_timer(inp, stcb, net)) {
1778 			/* no need to unlock on tcb its gone */
1779 			goto out_decr;
1780 		}
1781 		SCTP_STAT_INCR(sctps_timoasconf);
1782 #ifdef SCTP_AUDITING_ENABLED
1783 		sctp_auditing(4, inp, stcb, net);
1784 #endif
1785 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1786 		break;
1787 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1788 		if ((stcb == NULL) || (inp == NULL)) {
1789 			break;
1790 		}
1791 		sctp_delete_prim_timer(inp, stcb, net);
1792 		SCTP_STAT_INCR(sctps_timodelprim);
1793 		break;
1794 
1795 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1796 		if ((stcb == NULL) || (inp == NULL)) {
1797 			break;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoautoclose);
1800 		sctp_autoclose_timer(inp, stcb, net);
1801 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1802 		did_output = 0;
1803 		break;
1804 	case SCTP_TIMER_TYPE_ASOCKILL:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoassockill);
1809 		/* Can we free it yet? */
1810 		SCTP_INP_DECR_REF(inp);
1811 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1812 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1813 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1814 		so = SCTP_INP_SO(inp);
1815 		atomic_add_int(&stcb->asoc.refcnt, 1);
1816 		SCTP_TCB_UNLOCK(stcb);
1817 		SCTP_SOCKET_LOCK(so, 1);
1818 		SCTP_TCB_LOCK(stcb);
1819 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1820 #endif
1821 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1822 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1823 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1824 		SCTP_SOCKET_UNLOCK(so, 1);
1825 #endif
1826 		/*
1827 		 * free asoc, always unlocks (or destroy's) so prevent
1828 		 * duplicate unlock or unlock of a free mtx :-0
1829 		 */
1830 		stcb = NULL;
1831 		goto out_no_decr;
1832 	case SCTP_TIMER_TYPE_INPKILL:
1833 		SCTP_STAT_INCR(sctps_timoinpkill);
1834 		if (inp == NULL) {
1835 			break;
1836 		}
1837 		/*
1838 		 * special case, take away our increment since WE are the
1839 		 * killer
1840 		 */
1841 		SCTP_INP_DECR_REF(inp);
1842 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1843 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1844 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1845 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1846 		inp = NULL;
1847 		goto out_no_decr;
1848 	default:
1849 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1850 		    tmr->type);
1851 		break;
1852 	}
1853 #ifdef SCTP_AUDITING_ENABLED
1854 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1855 	if (inp)
1856 		sctp_auditing(5, inp, stcb, net);
1857 #endif
1858 	if ((did_output) && stcb) {
1859 		/*
1860 		 * Now we need to clean up the control chunk chain if an
1861 		 * ECNE is on it. It must be marked as UNSENT again so next
1862 		 * call will continue to send it until such time that we get
1863 		 * a CWR, to remove it. It is, however, less likely that we
1864 		 * will find a ecn echo on the chain though.
1865 		 */
1866 		sctp_fix_ecn_echo(&stcb->asoc);
1867 	}
1868 get_out:
1869 	if (stcb) {
1870 		SCTP_TCB_UNLOCK(stcb);
1871 	}
1872 out_decr:
1873 	if (inp) {
1874 		SCTP_INP_DECR_REF(inp);
1875 	}
1876 out_no_decr:
1877 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1878 	    tmr->type);
1879 	CURVNET_RESTORE();
1880 }
1881 
1882 void
1883 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1884     struct sctp_nets *net)
1885 {
1886 	uint32_t to_ticks;
1887 	struct sctp_timer *tmr;
1888 
1889 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1890 		return;
1891 
1892 	tmr = NULL;
1893 	if (stcb) {
1894 		SCTP_TCB_LOCK_ASSERT(stcb);
1895 	}
1896 	switch (t_type) {
1897 	case SCTP_TIMER_TYPE_ZERO_COPY:
1898 		tmr = &inp->sctp_ep.zero_copy_timer;
1899 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1900 		break;
1901 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1902 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1903 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1904 		break;
1905 	case SCTP_TIMER_TYPE_ADDR_WQ:
1906 		/* Only 1 tick away :-) */
1907 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1908 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1909 		break;
1910 	case SCTP_TIMER_TYPE_SEND:
1911 		/* Here we use the RTO timer */
1912 		{
1913 			int rto_val;
1914 
1915 			if ((stcb == NULL) || (net == NULL)) {
1916 				return;
1917 			}
1918 			tmr = &net->rxt_timer;
1919 			if (net->RTO == 0) {
1920 				rto_val = stcb->asoc.initial_rto;
1921 			} else {
1922 				rto_val = net->RTO;
1923 			}
1924 			to_ticks = MSEC_TO_TICKS(rto_val);
1925 		}
1926 		break;
1927 	case SCTP_TIMER_TYPE_INIT:
1928 		/*
1929 		 * Here we use the INIT timer default usually about 1
1930 		 * minute.
1931 		 */
1932 		if ((stcb == NULL) || (net == NULL)) {
1933 			return;
1934 		}
1935 		tmr = &net->rxt_timer;
1936 		if (net->RTO == 0) {
1937 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1938 		} else {
1939 			to_ticks = MSEC_TO_TICKS(net->RTO);
1940 		}
1941 		break;
1942 	case SCTP_TIMER_TYPE_RECV:
1943 		/*
1944 		 * Here we use the Delayed-Ack timer value from the inp
1945 		 * ususually about 200ms.
1946 		 */
1947 		if (stcb == NULL) {
1948 			return;
1949 		}
1950 		tmr = &stcb->asoc.dack_timer;
1951 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1952 		break;
1953 	case SCTP_TIMER_TYPE_SHUTDOWN:
1954 		/* Here we use the RTO of the destination. */
1955 		if ((stcb == NULL) || (net == NULL)) {
1956 			return;
1957 		}
1958 		if (net->RTO == 0) {
1959 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1960 		} else {
1961 			to_ticks = MSEC_TO_TICKS(net->RTO);
1962 		}
1963 		tmr = &net->rxt_timer;
1964 		break;
1965 	case SCTP_TIMER_TYPE_HEARTBEAT:
1966 		/*
1967 		 * the net is used here so that we can add in the RTO. Even
1968 		 * though we use a different timer. We also add the HB timer
1969 		 * PLUS a random jitter.
1970 		 */
1971 		if ((stcb == NULL) || (net == NULL)) {
1972 			return;
1973 		} else {
1974 			uint32_t rndval;
1975 			uint32_t jitter;
1976 
1977 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1978 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1979 				return;
1980 			}
1981 			if (net->RTO == 0) {
1982 				to_ticks = stcb->asoc.initial_rto;
1983 			} else {
1984 				to_ticks = net->RTO;
1985 			}
1986 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1987 			jitter = rndval % to_ticks;
1988 			if (jitter >= (to_ticks >> 1)) {
1989 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1990 			} else {
1991 				to_ticks = to_ticks - jitter;
1992 			}
1993 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1994 			    !(net->dest_state & SCTP_ADDR_PF)) {
1995 				to_ticks += net->heart_beat_delay;
1996 			}
1997 			/*
1998 			 * Now we must convert the to_ticks that are now in
1999 			 * ms to ticks.
2000 			 */
2001 			to_ticks = MSEC_TO_TICKS(to_ticks);
2002 			tmr = &net->hb_timer;
2003 		}
2004 		break;
2005 	case SCTP_TIMER_TYPE_COOKIE:
2006 		/*
2007 		 * Here we can use the RTO timer from the network since one
2008 		 * RTT was compelete. If a retran happened then we will be
2009 		 * using the RTO initial value.
2010 		 */
2011 		if ((stcb == NULL) || (net == NULL)) {
2012 			return;
2013 		}
2014 		if (net->RTO == 0) {
2015 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2016 		} else {
2017 			to_ticks = MSEC_TO_TICKS(net->RTO);
2018 		}
2019 		tmr = &net->rxt_timer;
2020 		break;
2021 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2022 		/*
2023 		 * nothing needed but the endpoint here ususually about 60
2024 		 * minutes.
2025 		 */
2026 		tmr = &inp->sctp_ep.signature_change;
2027 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2028 		break;
2029 	case SCTP_TIMER_TYPE_ASOCKILL:
2030 		if (stcb == NULL) {
2031 			return;
2032 		}
2033 		tmr = &stcb->asoc.strreset_timer;
2034 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2035 		break;
2036 	case SCTP_TIMER_TYPE_INPKILL:
2037 		/*
2038 		 * The inp is setup to die. We re-use the signature_chage
2039 		 * timer since that has stopped and we are in the GONE
2040 		 * state.
2041 		 */
2042 		tmr = &inp->sctp_ep.signature_change;
2043 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2044 		break;
2045 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2046 		/*
2047 		 * Here we use the value found in the EP for PMTU ususually
2048 		 * about 10 minutes.
2049 		 */
2050 		if ((stcb == NULL) || (net == NULL)) {
2051 			return;
2052 		}
2053 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2054 			return;
2055 		}
2056 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2057 		tmr = &net->pmtu_timer;
2058 		break;
2059 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2060 		/* Here we use the RTO of the destination */
2061 		if ((stcb == NULL) || (net == NULL)) {
2062 			return;
2063 		}
2064 		if (net->RTO == 0) {
2065 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2066 		} else {
2067 			to_ticks = MSEC_TO_TICKS(net->RTO);
2068 		}
2069 		tmr = &net->rxt_timer;
2070 		break;
2071 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2072 		/*
2073 		 * Here we use the endpoints shutdown guard timer usually
2074 		 * about 3 minutes.
2075 		 */
2076 		if (stcb == NULL) {
2077 			return;
2078 		}
2079 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2080 		tmr = &stcb->asoc.shut_guard_timer;
2081 		break;
2082 	case SCTP_TIMER_TYPE_STRRESET:
2083 		/*
2084 		 * Here the timer comes from the stcb but its value is from
2085 		 * the net's RTO.
2086 		 */
2087 		if ((stcb == NULL) || (net == NULL)) {
2088 			return;
2089 		}
2090 		if (net->RTO == 0) {
2091 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2092 		} else {
2093 			to_ticks = MSEC_TO_TICKS(net->RTO);
2094 		}
2095 		tmr = &stcb->asoc.strreset_timer;
2096 		break;
2097 	case SCTP_TIMER_TYPE_ASCONF:
2098 		/*
2099 		 * Here the timer comes from the stcb but its value is from
2100 		 * the net's RTO.
2101 		 */
2102 		if ((stcb == NULL) || (net == NULL)) {
2103 			return;
2104 		}
2105 		if (net->RTO == 0) {
2106 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2107 		} else {
2108 			to_ticks = MSEC_TO_TICKS(net->RTO);
2109 		}
2110 		tmr = &stcb->asoc.asconf_timer;
2111 		break;
2112 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2113 		if ((stcb == NULL) || (net != NULL)) {
2114 			return;
2115 		}
2116 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2117 		tmr = &stcb->asoc.delete_prim_timer;
2118 		break;
2119 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2120 		if (stcb == NULL) {
2121 			return;
2122 		}
2123 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2124 			/*
2125 			 * Really an error since stcb is NOT set to
2126 			 * autoclose
2127 			 */
2128 			return;
2129 		}
2130 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2131 		tmr = &stcb->asoc.autoclose_timer;
2132 		break;
2133 	default:
2134 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2135 		    __FUNCTION__, t_type);
2136 		return;
2137 		break;
2138 	}
2139 	if ((to_ticks <= 0) || (tmr == NULL)) {
2140 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2141 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2142 		return;
2143 	}
2144 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2145 		/*
2146 		 * we do NOT allow you to have it already running. if it is
2147 		 * we leave the current one up unchanged
2148 		 */
2149 		return;
2150 	}
2151 	/* At this point we can proceed */
2152 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2153 		stcb->asoc.num_send_timers_up++;
2154 	}
2155 	tmr->stopped_from = 0;
2156 	tmr->type = t_type;
2157 	tmr->ep = (void *)inp;
2158 	tmr->tcb = (void *)stcb;
2159 	tmr->net = (void *)net;
2160 	tmr->self = (void *)tmr;
2161 	tmr->vnet = (void *)curvnet;
2162 	tmr->ticks = sctp_get_tick_count();
2163 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2164 	return;
2165 }
2166 
2167 void
2168 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2169     struct sctp_nets *net, uint32_t from)
2170 {
2171 	struct sctp_timer *tmr;
2172 
2173 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2174 	    (inp == NULL))
2175 		return;
2176 
2177 	tmr = NULL;
2178 	if (stcb) {
2179 		SCTP_TCB_LOCK_ASSERT(stcb);
2180 	}
2181 	switch (t_type) {
2182 	case SCTP_TIMER_TYPE_ZERO_COPY:
2183 		tmr = &inp->sctp_ep.zero_copy_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2186 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2187 		break;
2188 	case SCTP_TIMER_TYPE_ADDR_WQ:
2189 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2190 		break;
2191 	case SCTP_TIMER_TYPE_SEND:
2192 		if ((stcb == NULL) || (net == NULL)) {
2193 			return;
2194 		}
2195 		tmr = &net->rxt_timer;
2196 		break;
2197 	case SCTP_TIMER_TYPE_INIT:
2198 		if ((stcb == NULL) || (net == NULL)) {
2199 			return;
2200 		}
2201 		tmr = &net->rxt_timer;
2202 		break;
2203 	case SCTP_TIMER_TYPE_RECV:
2204 		if (stcb == NULL) {
2205 			return;
2206 		}
2207 		tmr = &stcb->asoc.dack_timer;
2208 		break;
2209 	case SCTP_TIMER_TYPE_SHUTDOWN:
2210 		if ((stcb == NULL) || (net == NULL)) {
2211 			return;
2212 		}
2213 		tmr = &net->rxt_timer;
2214 		break;
2215 	case SCTP_TIMER_TYPE_HEARTBEAT:
2216 		if ((stcb == NULL) || (net == NULL)) {
2217 			return;
2218 		}
2219 		tmr = &net->hb_timer;
2220 		break;
2221 	case SCTP_TIMER_TYPE_COOKIE:
2222 		if ((stcb == NULL) || (net == NULL)) {
2223 			return;
2224 		}
2225 		tmr = &net->rxt_timer;
2226 		break;
2227 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2228 		/* nothing needed but the endpoint here */
2229 		tmr = &inp->sctp_ep.signature_change;
2230 		/*
2231 		 * We re-use the newcookie timer for the INP kill timer. We
2232 		 * must assure that we do not kill it by accident.
2233 		 */
2234 		break;
2235 	case SCTP_TIMER_TYPE_ASOCKILL:
2236 		/*
2237 		 * Stop the asoc kill timer.
2238 		 */
2239 		if (stcb == NULL) {
2240 			return;
2241 		}
2242 		tmr = &stcb->asoc.strreset_timer;
2243 		break;
2244 
2245 	case SCTP_TIMER_TYPE_INPKILL:
2246 		/*
2247 		 * The inp is setup to die. We re-use the signature_chage
2248 		 * timer since that has stopped and we are in the GONE
2249 		 * state.
2250 		 */
2251 		tmr = &inp->sctp_ep.signature_change;
2252 		break;
2253 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2254 		if ((stcb == NULL) || (net == NULL)) {
2255 			return;
2256 		}
2257 		tmr = &net->pmtu_timer;
2258 		break;
2259 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2260 		if ((stcb == NULL) || (net == NULL)) {
2261 			return;
2262 		}
2263 		tmr = &net->rxt_timer;
2264 		break;
2265 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2266 		if (stcb == NULL) {
2267 			return;
2268 		}
2269 		tmr = &stcb->asoc.shut_guard_timer;
2270 		break;
2271 	case SCTP_TIMER_TYPE_STRRESET:
2272 		if (stcb == NULL) {
2273 			return;
2274 		}
2275 		tmr = &stcb->asoc.strreset_timer;
2276 		break;
2277 	case SCTP_TIMER_TYPE_ASCONF:
2278 		if (stcb == NULL) {
2279 			return;
2280 		}
2281 		tmr = &stcb->asoc.asconf_timer;
2282 		break;
2283 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2284 		if (stcb == NULL) {
2285 			return;
2286 		}
2287 		tmr = &stcb->asoc.delete_prim_timer;
2288 		break;
2289 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2290 		if (stcb == NULL) {
2291 			return;
2292 		}
2293 		tmr = &stcb->asoc.autoclose_timer;
2294 		break;
2295 	default:
2296 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2297 		    __FUNCTION__, t_type);
2298 		break;
2299 	}
2300 	if (tmr == NULL) {
2301 		return;
2302 	}
2303 	if ((tmr->type != t_type) && tmr->type) {
2304 		/*
2305 		 * Ok we have a timer that is under joint use. Cookie timer
2306 		 * per chance with the SEND timer. We therefore are NOT
2307 		 * running the timer that the caller wants stopped.  So just
2308 		 * return.
2309 		 */
2310 		return;
2311 	}
2312 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2313 		stcb->asoc.num_send_timers_up--;
2314 		if (stcb->asoc.num_send_timers_up < 0) {
2315 			stcb->asoc.num_send_timers_up = 0;
2316 		}
2317 	}
2318 	tmr->self = NULL;
2319 	tmr->stopped_from = from;
2320 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2321 	return;
2322 }
2323 
2324 uint32_t
2325 sctp_calculate_len(struct mbuf *m)
2326 {
2327 	uint32_t tlen = 0;
2328 	struct mbuf *at;
2329 
2330 	at = m;
2331 	while (at) {
2332 		tlen += SCTP_BUF_LEN(at);
2333 		at = SCTP_BUF_NEXT(at);
2334 	}
2335 	return (tlen);
2336 }
2337 
2338 void
2339 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2340     struct sctp_association *asoc, uint32_t mtu)
2341 {
2342 	/*
2343 	 * Reset the P-MTU size on this association, this involves changing
2344 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2345 	 * allow the DF flag to be cleared.
2346 	 */
2347 	struct sctp_tmit_chunk *chk;
2348 	unsigned int eff_mtu, ovh;
2349 
2350 	asoc->smallest_mtu = mtu;
2351 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2352 		ovh = SCTP_MIN_OVERHEAD;
2353 	} else {
2354 		ovh = SCTP_MIN_V4_OVERHEAD;
2355 	}
2356 	eff_mtu = mtu - ovh;
2357 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2358 		if (chk->send_size > eff_mtu) {
2359 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2360 		}
2361 	}
2362 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2363 		if (chk->send_size > eff_mtu) {
2364 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2365 		}
2366 	}
2367 }
2368 
2369 
2370 /*
2371  * given an association and starting time of the current RTT period return
2372  * RTO in number of msecs net should point to the current network
2373  */
2374 
2375 uint32_t
2376 sctp_calculate_rto(struct sctp_tcb *stcb,
2377     struct sctp_association *asoc,
2378     struct sctp_nets *net,
2379     struct timeval *told,
2380     int safe, int rtt_from_sack)
2381 {
2382 	/*-
2383 	 * given an association and the starting time of the current RTT
2384 	 * period (in value1/value2) return RTO in number of msecs.
2385 	 */
2386 	int32_t rtt;		/* RTT in ms */
2387 	uint32_t new_rto;
2388 	int first_measure = 0;
2389 	struct timeval now, then, *old;
2390 
2391 	/* Copy it out for sparc64 */
2392 	if (safe == sctp_align_unsafe_makecopy) {
2393 		old = &then;
2394 		memcpy(&then, told, sizeof(struct timeval));
2395 	} else if (safe == sctp_align_safe_nocopy) {
2396 		old = told;
2397 	} else {
2398 		/* error */
2399 		SCTP_PRINTF("Huh, bad rto calc call\n");
2400 		return (0);
2401 	}
2402 	/************************/
2403 	/* 1. calculate new RTT */
2404 	/************************/
2405 	/* get the current time */
2406 	if (stcb->asoc.use_precise_time) {
2407 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2408 	} else {
2409 		(void)SCTP_GETTIME_TIMEVAL(&now);
2410 	}
2411 	timevalsub(&now, old);
2412 	/* store the current RTT in us */
2413 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2414 	        (uint64_t) now.tv_usec;
2415 
2416 	/* compute rtt in ms */
2417 	rtt = (int32_t) (net->rtt / 1000);
2418 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2419 		/*
2420 		 * Tell the CC module that a new update has just occurred
2421 		 * from a sack
2422 		 */
2423 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2424 	}
2425 	/*
2426 	 * Do we need to determine the lan? We do this only on sacks i.e.
2427 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2428 	 */
2429 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2430 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2431 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2432 			net->lan_type = SCTP_LAN_INTERNET;
2433 		} else {
2434 			net->lan_type = SCTP_LAN_LOCAL;
2435 		}
2436 	}
2437 	/***************************/
2438 	/* 2. update RTTVAR & SRTT */
2439 	/***************************/
2440 	/*-
2441 	 * Compute the scaled average lastsa and the
2442 	 * scaled variance lastsv as described in van Jacobson
2443 	 * Paper "Congestion Avoidance and Control", Annex A.
2444 	 *
2445 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2446 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2447 	 */
2448 	if (net->RTO_measured) {
2449 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2450 		net->lastsa += rtt;
2451 		if (rtt < 0) {
2452 			rtt = -rtt;
2453 		}
2454 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2455 		net->lastsv += rtt;
2456 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2457 			rto_logging(net, SCTP_LOG_RTTVAR);
2458 		}
2459 	} else {
2460 		/* First RTO measurment */
2461 		net->RTO_measured = 1;
2462 		first_measure = 1;
2463 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2464 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2465 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2466 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2467 		}
2468 	}
2469 	if (net->lastsv == 0) {
2470 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2471 	}
2472 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2473 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2474 	    (stcb->asoc.sat_network_lockout == 0)) {
2475 		stcb->asoc.sat_network = 1;
2476 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2477 		stcb->asoc.sat_network = 0;
2478 		stcb->asoc.sat_network_lockout = 1;
2479 	}
2480 	/* bound it, per C6/C7 in Section 5.3.1 */
2481 	if (new_rto < stcb->asoc.minrto) {
2482 		new_rto = stcb->asoc.minrto;
2483 	}
2484 	if (new_rto > stcb->asoc.maxrto) {
2485 		new_rto = stcb->asoc.maxrto;
2486 	}
2487 	/* we are now returning the RTO */
2488 	return (new_rto);
2489 }
2490 
2491 /*
2492  * return a pointer to a contiguous piece of data from the given mbuf chain
2493  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2494  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2495  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2496  */
2497 caddr_t
2498 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2499 {
2500 	uint32_t count;
2501 	uint8_t *ptr;
2502 
2503 	ptr = in_ptr;
2504 	if ((off < 0) || (len <= 0))
2505 		return (NULL);
2506 
2507 	/* find the desired start location */
2508 	while ((m != NULL) && (off > 0)) {
2509 		if (off < SCTP_BUF_LEN(m))
2510 			break;
2511 		off -= SCTP_BUF_LEN(m);
2512 		m = SCTP_BUF_NEXT(m);
2513 	}
2514 	if (m == NULL)
2515 		return (NULL);
2516 
2517 	/* is the current mbuf large enough (eg. contiguous)? */
2518 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2519 		return (mtod(m, caddr_t)+off);
2520 	} else {
2521 		/* else, it spans more than one mbuf, so save a temp copy... */
2522 		while ((m != NULL) && (len > 0)) {
2523 			count = min(SCTP_BUF_LEN(m) - off, len);
2524 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2525 			len -= count;
2526 			ptr += count;
2527 			off = 0;
2528 			m = SCTP_BUF_NEXT(m);
2529 		}
2530 		if ((m == NULL) && (len > 0))
2531 			return (NULL);
2532 		else
2533 			return ((caddr_t)in_ptr);
2534 	}
2535 }
2536 
2537 
2538 
2539 struct sctp_paramhdr *
2540 sctp_get_next_param(struct mbuf *m,
2541     int offset,
2542     struct sctp_paramhdr *pull,
2543     int pull_limit)
2544 {
2545 	/* This just provides a typed signature to Peter's Pull routine */
2546 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2547 	    (uint8_t *) pull));
2548 }
2549 
2550 
2551 struct mbuf *
2552 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2553 {
2554 	struct mbuf *m_last;
2555 	caddr_t dp;
2556 
2557 	if (padlen > 3) {
2558 		return (NULL);
2559 	}
2560 	if (padlen <= M_TRAILINGSPACE(m)) {
2561 		/*
2562 		 * The easy way. We hope the majority of the time we hit
2563 		 * here :)
2564 		 */
2565 		m_last = m;
2566 	} else {
2567 		/* Hard way we must grow the mbuf chain */
2568 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2569 		if (m_last == NULL) {
2570 			return (NULL);
2571 		}
2572 		SCTP_BUF_LEN(m_last) = 0;
2573 		SCTP_BUF_NEXT(m_last) = NULL;
2574 		SCTP_BUF_NEXT(m) = m_last;
2575 	}
2576 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2577 	SCTP_BUF_LEN(m_last) += padlen;
2578 	memset(dp, 0, padlen);
2579 	return (m_last);
2580 }
2581 
2582 struct mbuf *
2583 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2584 {
2585 	/* find the last mbuf in chain and pad it */
2586 	struct mbuf *m_at;
2587 
2588 	if (last_mbuf != NULL) {
2589 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2590 	} else {
2591 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2592 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2593 				return (sctp_add_pad_tombuf(m_at, padval));
2594 			}
2595 		}
2596 	}
2597 	return (NULL);
2598 }
2599 
2600 static void
2601 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2602     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2603 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2604     SCTP_UNUSED
2605 #endif
2606 )
2607 {
2608 	struct mbuf *m_notify;
2609 	struct sctp_assoc_change *sac;
2610 	struct sctp_queued_to_read *control;
2611 	size_t notif_len, abort_len;
2612 	unsigned int i;
2613 
2614 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2615 	struct socket *so;
2616 
2617 #endif
2618 
2619 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2620 		notif_len = sizeof(struct sctp_assoc_change);
2621 		if (abort != NULL) {
2622 			abort_len = ntohs(abort->ch.chunk_length);
2623 		} else {
2624 			abort_len = 0;
2625 		}
2626 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2627 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2628 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2629 			notif_len += abort_len;
2630 		}
2631 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2632 		if (m_notify == NULL) {
2633 			/* Retry with smaller value. */
2634 			notif_len = sizeof(struct sctp_assoc_change);
2635 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2636 			if (m_notify == NULL) {
2637 				goto set_error;
2638 			}
2639 		}
2640 		SCTP_BUF_NEXT(m_notify) = NULL;
2641 		sac = mtod(m_notify, struct sctp_assoc_change *);
2642 		memset(sac, 0, notif_len);
2643 		sac->sac_type = SCTP_ASSOC_CHANGE;
2644 		sac->sac_flags = 0;
2645 		sac->sac_length = sizeof(struct sctp_assoc_change);
2646 		sac->sac_state = state;
2647 		sac->sac_error = error;
2648 		/* XXX verify these stream counts */
2649 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2650 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2651 		sac->sac_assoc_id = sctp_get_associd(stcb);
2652 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2653 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2654 				i = 0;
2655 				if (stcb->asoc.prsctp_supported == 1) {
2656 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2657 				}
2658 				if (stcb->asoc.auth_supported == 1) {
2659 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2660 				}
2661 				if (stcb->asoc.asconf_supported == 1) {
2662 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2663 				}
2664 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2665 				if (stcb->asoc.reconfig_supported == 1) {
2666 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2667 				}
2668 				sac->sac_length += i;
2669 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2670 				memcpy(sac->sac_info, abort, abort_len);
2671 				sac->sac_length += abort_len;
2672 			}
2673 		}
2674 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2675 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2676 		    0, 0, stcb->asoc.context, 0, 0, 0,
2677 		    m_notify);
2678 		if (control != NULL) {
2679 			control->length = SCTP_BUF_LEN(m_notify);
2680 			/* not that we need this */
2681 			control->tail_mbuf = m_notify;
2682 			control->spec_flags = M_NOTIFICATION;
2683 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2684 			    control,
2685 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2686 			    so_locked);
2687 		} else {
2688 			sctp_m_freem(m_notify);
2689 		}
2690 	}
2691 	/*
2692 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2693 	 * comes in.
2694 	 */
2695 set_error:
2696 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2697 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2698 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2699 		SOCK_LOCK(stcb->sctp_socket);
2700 		if (from_peer) {
2701 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2702 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2703 				stcb->sctp_socket->so_error = ECONNREFUSED;
2704 			} else {
2705 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2706 				stcb->sctp_socket->so_error = ECONNRESET;
2707 			}
2708 		} else {
2709 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2710 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2711 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2712 				stcb->sctp_socket->so_error = ETIMEDOUT;
2713 			} else {
2714 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2715 				stcb->sctp_socket->so_error = ECONNABORTED;
2716 			}
2717 		}
2718 	}
2719 	/* Wake ANY sleepers */
2720 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2721 	so = SCTP_INP_SO(stcb->sctp_ep);
2722 	if (!so_locked) {
2723 		atomic_add_int(&stcb->asoc.refcnt, 1);
2724 		SCTP_TCB_UNLOCK(stcb);
2725 		SCTP_SOCKET_LOCK(so, 1);
2726 		SCTP_TCB_LOCK(stcb);
2727 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2728 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2729 			SCTP_SOCKET_UNLOCK(so, 1);
2730 			return;
2731 		}
2732 	}
2733 #endif
2734 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2735 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2736 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2737 		socantrcvmore_locked(stcb->sctp_socket);
2738 	}
2739 	sorwakeup(stcb->sctp_socket);
2740 	sowwakeup(stcb->sctp_socket);
2741 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2742 	if (!so_locked) {
2743 		SCTP_SOCKET_UNLOCK(so, 1);
2744 	}
2745 #endif
2746 }
2747 
2748 static void
2749 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2750     struct sockaddr *sa, uint32_t error, int so_locked
2751 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2752     SCTP_UNUSED
2753 #endif
2754 )
2755 {
2756 	struct mbuf *m_notify;
2757 	struct sctp_paddr_change *spc;
2758 	struct sctp_queued_to_read *control;
2759 
2760 	if ((stcb == NULL) ||
2761 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2762 		/* event not enabled */
2763 		return;
2764 	}
2765 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2766 	if (m_notify == NULL)
2767 		return;
2768 	SCTP_BUF_LEN(m_notify) = 0;
2769 	spc = mtod(m_notify, struct sctp_paddr_change *);
2770 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2771 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2772 	spc->spc_flags = 0;
2773 	spc->spc_length = sizeof(struct sctp_paddr_change);
2774 	switch (sa->sa_family) {
2775 #ifdef INET
2776 	case AF_INET:
2777 #ifdef INET6
2778 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2779 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2780 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2781 		} else {
2782 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2783 		}
2784 #else
2785 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2786 #endif
2787 		break;
2788 #endif
2789 #ifdef INET6
2790 	case AF_INET6:
2791 		{
2792 			struct sockaddr_in6 *sin6;
2793 
2794 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2795 
2796 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2797 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2798 				if (sin6->sin6_scope_id == 0) {
2799 					/* recover scope_id for user */
2800 					(void)sa6_recoverscope(sin6);
2801 				} else {
2802 					/* clear embedded scope_id for user */
2803 					in6_clearscope(&sin6->sin6_addr);
2804 				}
2805 			}
2806 			break;
2807 		}
2808 #endif
2809 	default:
2810 		/* TSNH */
2811 		break;
2812 	}
2813 	spc->spc_state = state;
2814 	spc->spc_error = error;
2815 	spc->spc_assoc_id = sctp_get_associd(stcb);
2816 
2817 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2818 	SCTP_BUF_NEXT(m_notify) = NULL;
2819 
2820 	/* append to socket */
2821 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2822 	    0, 0, stcb->asoc.context, 0, 0, 0,
2823 	    m_notify);
2824 	if (control == NULL) {
2825 		/* no memory */
2826 		sctp_m_freem(m_notify);
2827 		return;
2828 	}
2829 	control->length = SCTP_BUF_LEN(m_notify);
2830 	control->spec_flags = M_NOTIFICATION;
2831 	/* not that we need this */
2832 	control->tail_mbuf = m_notify;
2833 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2834 	    control,
2835 	    &stcb->sctp_socket->so_rcv, 1,
2836 	    SCTP_READ_LOCK_NOT_HELD,
2837 	    so_locked);
2838 }
2839 
2840 
2841 static void
2842 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2843     struct sctp_tmit_chunk *chk, int so_locked
2844 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2845     SCTP_UNUSED
2846 #endif
2847 )
2848 {
2849 	struct mbuf *m_notify;
2850 	struct sctp_send_failed *ssf;
2851 	struct sctp_send_failed_event *ssfe;
2852 	struct sctp_queued_to_read *control;
2853 	int length;
2854 
2855 	if ((stcb == NULL) ||
2856 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2857 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2858 		/* event not enabled */
2859 		return;
2860 	}
2861 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2862 		length = sizeof(struct sctp_send_failed_event);
2863 	} else {
2864 		length = sizeof(struct sctp_send_failed);
2865 	}
2866 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2867 	if (m_notify == NULL)
2868 		/* no space left */
2869 		return;
2870 	SCTP_BUF_LEN(m_notify) = 0;
2871 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2872 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2873 		memset(ssfe, 0, length);
2874 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2875 		if (sent) {
2876 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2877 		} else {
2878 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2879 		}
2880 		length += chk->send_size;
2881 		length -= sizeof(struct sctp_data_chunk);
2882 		ssfe->ssfe_length = length;
2883 		ssfe->ssfe_error = error;
2884 		/* not exactly what the user sent in, but should be close :) */
2885 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2886 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2887 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2888 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2889 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2890 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2891 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2892 	} else {
2893 		ssf = mtod(m_notify, struct sctp_send_failed *);
2894 		memset(ssf, 0, length);
2895 		ssf->ssf_type = SCTP_SEND_FAILED;
2896 		if (sent) {
2897 			ssf->ssf_flags = SCTP_DATA_SENT;
2898 		} else {
2899 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2900 		}
2901 		length += chk->send_size;
2902 		length -= sizeof(struct sctp_data_chunk);
2903 		ssf->ssf_length = length;
2904 		ssf->ssf_error = error;
2905 		/* not exactly what the user sent in, but should be close :) */
2906 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2907 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2908 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2909 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2910 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2911 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2912 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2913 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2914 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2915 	}
2916 	if (chk->data) {
2917 		/*
2918 		 * trim off the sctp chunk header(it should be there)
2919 		 */
2920 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2921 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2922 			sctp_mbuf_crush(chk->data);
2923 			chk->send_size -= sizeof(struct sctp_data_chunk);
2924 		}
2925 	}
2926 	SCTP_BUF_NEXT(m_notify) = chk->data;
2927 	/* Steal off the mbuf */
2928 	chk->data = NULL;
2929 	/*
2930 	 * For this case, we check the actual socket buffer, since the assoc
2931 	 * is going away we don't want to overfill the socket buffer for a
2932 	 * non-reader
2933 	 */
2934 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2935 		sctp_m_freem(m_notify);
2936 		return;
2937 	}
2938 	/* append to socket */
2939 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2940 	    0, 0, stcb->asoc.context, 0, 0, 0,
2941 	    m_notify);
2942 	if (control == NULL) {
2943 		/* no memory */
2944 		sctp_m_freem(m_notify);
2945 		return;
2946 	}
2947 	control->spec_flags = M_NOTIFICATION;
2948 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2949 	    control,
2950 	    &stcb->sctp_socket->so_rcv, 1,
2951 	    SCTP_READ_LOCK_NOT_HELD,
2952 	    so_locked);
2953 }
2954 
2955 
2956 static void
2957 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2958     struct sctp_stream_queue_pending *sp, int so_locked
2959 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2960     SCTP_UNUSED
2961 #endif
2962 )
2963 {
2964 	struct mbuf *m_notify;
2965 	struct sctp_send_failed *ssf;
2966 	struct sctp_send_failed_event *ssfe;
2967 	struct sctp_queued_to_read *control;
2968 	int length;
2969 
2970 	if ((stcb == NULL) ||
2971 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2972 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2973 		/* event not enabled */
2974 		return;
2975 	}
2976 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2977 		length = sizeof(struct sctp_send_failed_event);
2978 	} else {
2979 		length = sizeof(struct sctp_send_failed);
2980 	}
2981 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2982 	if (m_notify == NULL) {
2983 		/* no space left */
2984 		return;
2985 	}
2986 	SCTP_BUF_LEN(m_notify) = 0;
2987 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2988 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2989 		memset(ssfe, 0, length);
2990 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2991 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2992 		length += sp->length;
2993 		ssfe->ssfe_length = length;
2994 		ssfe->ssfe_error = error;
2995 		/* not exactly what the user sent in, but should be close :) */
2996 		ssfe->ssfe_info.snd_sid = sp->stream;
2997 		if (sp->some_taken) {
2998 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2999 		} else {
3000 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3001 		}
3002 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3003 		ssfe->ssfe_info.snd_context = sp->context;
3004 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3005 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3006 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3007 	} else {
3008 		ssf = mtod(m_notify, struct sctp_send_failed *);
3009 		memset(ssf, 0, length);
3010 		ssf->ssf_type = SCTP_SEND_FAILED;
3011 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3012 		length += sp->length;
3013 		ssf->ssf_length = length;
3014 		ssf->ssf_error = error;
3015 		/* not exactly what the user sent in, but should be close :) */
3016 		ssf->ssf_info.sinfo_stream = sp->stream;
3017 		ssf->ssf_info.sinfo_ssn = 0;
3018 		if (sp->some_taken) {
3019 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3020 		} else {
3021 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3022 		}
3023 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3024 		ssf->ssf_info.sinfo_context = sp->context;
3025 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3026 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3027 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3028 	}
3029 	SCTP_BUF_NEXT(m_notify) = sp->data;
3030 
3031 	/* Steal off the mbuf */
3032 	sp->data = NULL;
3033 	/*
3034 	 * For this case, we check the actual socket buffer, since the assoc
3035 	 * is going away we don't want to overfill the socket buffer for a
3036 	 * non-reader
3037 	 */
3038 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3039 		sctp_m_freem(m_notify);
3040 		return;
3041 	}
3042 	/* append to socket */
3043 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3044 	    0, 0, stcb->asoc.context, 0, 0, 0,
3045 	    m_notify);
3046 	if (control == NULL) {
3047 		/* no memory */
3048 		sctp_m_freem(m_notify);
3049 		return;
3050 	}
3051 	control->spec_flags = M_NOTIFICATION;
3052 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3053 	    control,
3054 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3055 }
3056 
3057 
3058 
3059 static void
3060 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3061 {
3062 	struct mbuf *m_notify;
3063 	struct sctp_adaptation_event *sai;
3064 	struct sctp_queued_to_read *control;
3065 
3066 	if ((stcb == NULL) ||
3067 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3068 		/* event not enabled */
3069 		return;
3070 	}
3071 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3072 	if (m_notify == NULL)
3073 		/* no space left */
3074 		return;
3075 	SCTP_BUF_LEN(m_notify) = 0;
3076 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3077 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3078 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3079 	sai->sai_flags = 0;
3080 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3081 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3082 	sai->sai_assoc_id = sctp_get_associd(stcb);
3083 
3084 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3085 	SCTP_BUF_NEXT(m_notify) = NULL;
3086 
3087 	/* append to socket */
3088 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3089 	    0, 0, stcb->asoc.context, 0, 0, 0,
3090 	    m_notify);
3091 	if (control == NULL) {
3092 		/* no memory */
3093 		sctp_m_freem(m_notify);
3094 		return;
3095 	}
3096 	control->length = SCTP_BUF_LEN(m_notify);
3097 	control->spec_flags = M_NOTIFICATION;
3098 	/* not that we need this */
3099 	control->tail_mbuf = m_notify;
3100 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3101 	    control,
3102 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3103 }
3104 
3105 /* This always must be called with the read-queue LOCKED in the INP */
3106 static void
3107 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3108     uint32_t val, int so_locked
3109 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3110     SCTP_UNUSED
3111 #endif
3112 )
3113 {
3114 	struct mbuf *m_notify;
3115 	struct sctp_pdapi_event *pdapi;
3116 	struct sctp_queued_to_read *control;
3117 	struct sockbuf *sb;
3118 
3119 	if ((stcb == NULL) ||
3120 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3121 		/* event not enabled */
3122 		return;
3123 	}
3124 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3125 		return;
3126 	}
3127 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3128 	if (m_notify == NULL)
3129 		/* no space left */
3130 		return;
3131 	SCTP_BUF_LEN(m_notify) = 0;
3132 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3133 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3134 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3135 	pdapi->pdapi_flags = 0;
3136 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3137 	pdapi->pdapi_indication = error;
3138 	pdapi->pdapi_stream = (val >> 16);
3139 	pdapi->pdapi_seq = (val & 0x0000ffff);
3140 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3141 
3142 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3143 	SCTP_BUF_NEXT(m_notify) = NULL;
3144 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3145 	    0, 0, stcb->asoc.context, 0, 0, 0,
3146 	    m_notify);
3147 	if (control == NULL) {
3148 		/* no memory */
3149 		sctp_m_freem(m_notify);
3150 		return;
3151 	}
3152 	control->spec_flags = M_NOTIFICATION;
3153 	control->length = SCTP_BUF_LEN(m_notify);
3154 	/* not that we need this */
3155 	control->tail_mbuf = m_notify;
3156 	control->held_length = 0;
3157 	control->length = 0;
3158 	sb = &stcb->sctp_socket->so_rcv;
3159 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3160 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3161 	}
3162 	sctp_sballoc(stcb, sb, m_notify);
3163 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3164 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3165 	}
3166 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3167 	control->end_added = 1;
3168 	if (stcb->asoc.control_pdapi)
3169 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3170 	else {
3171 		/* we really should not see this case */
3172 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3173 	}
3174 	if (stcb->sctp_ep && stcb->sctp_socket) {
3175 		/* This should always be the case */
3176 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3177 		struct socket *so;
3178 
3179 		so = SCTP_INP_SO(stcb->sctp_ep);
3180 		if (!so_locked) {
3181 			atomic_add_int(&stcb->asoc.refcnt, 1);
3182 			SCTP_TCB_UNLOCK(stcb);
3183 			SCTP_SOCKET_LOCK(so, 1);
3184 			SCTP_TCB_LOCK(stcb);
3185 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3186 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3187 				SCTP_SOCKET_UNLOCK(so, 1);
3188 				return;
3189 			}
3190 		}
3191 #endif
3192 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3193 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3194 		if (!so_locked) {
3195 			SCTP_SOCKET_UNLOCK(so, 1);
3196 		}
3197 #endif
3198 	}
3199 }
3200 
3201 static void
3202 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3203 {
3204 	struct mbuf *m_notify;
3205 	struct sctp_shutdown_event *sse;
3206 	struct sctp_queued_to_read *control;
3207 
3208 	/*
3209 	 * For TCP model AND UDP connected sockets we will send an error up
3210 	 * when an SHUTDOWN completes
3211 	 */
3212 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3213 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3214 		/* mark socket closed for read/write and wakeup! */
3215 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3216 		struct socket *so;
3217 
3218 		so = SCTP_INP_SO(stcb->sctp_ep);
3219 		atomic_add_int(&stcb->asoc.refcnt, 1);
3220 		SCTP_TCB_UNLOCK(stcb);
3221 		SCTP_SOCKET_LOCK(so, 1);
3222 		SCTP_TCB_LOCK(stcb);
3223 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3224 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3225 			SCTP_SOCKET_UNLOCK(so, 1);
3226 			return;
3227 		}
3228 #endif
3229 		socantsendmore(stcb->sctp_socket);
3230 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3231 		SCTP_SOCKET_UNLOCK(so, 1);
3232 #endif
3233 	}
3234 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3235 		/* event not enabled */
3236 		return;
3237 	}
3238 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3239 	if (m_notify == NULL)
3240 		/* no space left */
3241 		return;
3242 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3243 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3244 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3245 	sse->sse_flags = 0;
3246 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3247 	sse->sse_assoc_id = sctp_get_associd(stcb);
3248 
3249 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3250 	SCTP_BUF_NEXT(m_notify) = NULL;
3251 
3252 	/* append to socket */
3253 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3254 	    0, 0, stcb->asoc.context, 0, 0, 0,
3255 	    m_notify);
3256 	if (control == NULL) {
3257 		/* no memory */
3258 		sctp_m_freem(m_notify);
3259 		return;
3260 	}
3261 	control->spec_flags = M_NOTIFICATION;
3262 	control->length = SCTP_BUF_LEN(m_notify);
3263 	/* not that we need this */
3264 	control->tail_mbuf = m_notify;
3265 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3266 	    control,
3267 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3268 }
3269 
3270 static void
3271 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3272     int so_locked
3273 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3274     SCTP_UNUSED
3275 #endif
3276 )
3277 {
3278 	struct mbuf *m_notify;
3279 	struct sctp_sender_dry_event *event;
3280 	struct sctp_queued_to_read *control;
3281 
3282 	if ((stcb == NULL) ||
3283 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3284 		/* event not enabled */
3285 		return;
3286 	}
3287 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3288 	if (m_notify == NULL) {
3289 		/* no space left */
3290 		return;
3291 	}
3292 	SCTP_BUF_LEN(m_notify) = 0;
3293 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3294 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3295 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3296 	event->sender_dry_flags = 0;
3297 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3298 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3299 
3300 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3301 	SCTP_BUF_NEXT(m_notify) = NULL;
3302 
3303 	/* append to socket */
3304 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3305 	    0, 0, stcb->asoc.context, 0, 0, 0,
3306 	    m_notify);
3307 	if (control == NULL) {
3308 		/* no memory */
3309 		sctp_m_freem(m_notify);
3310 		return;
3311 	}
3312 	control->length = SCTP_BUF_LEN(m_notify);
3313 	control->spec_flags = M_NOTIFICATION;
3314 	/* not that we need this */
3315 	control->tail_mbuf = m_notify;
3316 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3317 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3318 }
3319 
3320 
3321 void
3322 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3323 {
3324 	struct mbuf *m_notify;
3325 	struct sctp_queued_to_read *control;
3326 	struct sctp_stream_change_event *stradd;
3327 
3328 	if ((stcb == NULL) ||
3329 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3330 		/* event not enabled */
3331 		return;
3332 	}
3333 	if ((stcb->asoc.peer_req_out) && flag) {
3334 		/* Peer made the request, don't tell the local user */
3335 		stcb->asoc.peer_req_out = 0;
3336 		return;
3337 	}
3338 	stcb->asoc.peer_req_out = 0;
3339 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3340 	if (m_notify == NULL)
3341 		/* no space left */
3342 		return;
3343 	SCTP_BUF_LEN(m_notify) = 0;
3344 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3345 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3346 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3347 	stradd->strchange_flags = flag;
3348 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3349 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3350 	stradd->strchange_instrms = numberin;
3351 	stradd->strchange_outstrms = numberout;
3352 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3353 	SCTP_BUF_NEXT(m_notify) = NULL;
3354 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3355 		/* no space */
3356 		sctp_m_freem(m_notify);
3357 		return;
3358 	}
3359 	/* append to socket */
3360 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3361 	    0, 0, stcb->asoc.context, 0, 0, 0,
3362 	    m_notify);
3363 	if (control == NULL) {
3364 		/* no memory */
3365 		sctp_m_freem(m_notify);
3366 		return;
3367 	}
3368 	control->spec_flags = M_NOTIFICATION;
3369 	control->length = SCTP_BUF_LEN(m_notify);
3370 	/* not that we need this */
3371 	control->tail_mbuf = m_notify;
3372 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3373 	    control,
3374 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3375 }
3376 
3377 void
3378 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3379 {
3380 	struct mbuf *m_notify;
3381 	struct sctp_queued_to_read *control;
3382 	struct sctp_assoc_reset_event *strasoc;
3383 
3384 	if ((stcb == NULL) ||
3385 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3386 		/* event not enabled */
3387 		return;
3388 	}
3389 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3390 	if (m_notify == NULL)
3391 		/* no space left */
3392 		return;
3393 	SCTP_BUF_LEN(m_notify) = 0;
3394 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3395 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3396 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3397 	strasoc->assocreset_flags = flag;
3398 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3399 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3400 	strasoc->assocreset_local_tsn = sending_tsn;
3401 	strasoc->assocreset_remote_tsn = recv_tsn;
3402 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3403 	SCTP_BUF_NEXT(m_notify) = NULL;
3404 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3405 		/* no space */
3406 		sctp_m_freem(m_notify);
3407 		return;
3408 	}
3409 	/* append to socket */
3410 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3411 	    0, 0, stcb->asoc.context, 0, 0, 0,
3412 	    m_notify);
3413 	if (control == NULL) {
3414 		/* no memory */
3415 		sctp_m_freem(m_notify);
3416 		return;
3417 	}
3418 	control->spec_flags = M_NOTIFICATION;
3419 	control->length = SCTP_BUF_LEN(m_notify);
3420 	/* not that we need this */
3421 	control->tail_mbuf = m_notify;
3422 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3423 	    control,
3424 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3425 }
3426 
3427 
3428 
3429 static void
3430 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3431     int number_entries, uint16_t * list, int flag)
3432 {
3433 	struct mbuf *m_notify;
3434 	struct sctp_queued_to_read *control;
3435 	struct sctp_stream_reset_event *strreset;
3436 	int len;
3437 
3438 	if ((stcb == NULL) ||
3439 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3440 		/* event not enabled */
3441 		return;
3442 	}
3443 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3444 	if (m_notify == NULL)
3445 		/* no space left */
3446 		return;
3447 	SCTP_BUF_LEN(m_notify) = 0;
3448 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3449 	if (len > M_TRAILINGSPACE(m_notify)) {
3450 		/* never enough room */
3451 		sctp_m_freem(m_notify);
3452 		return;
3453 	}
3454 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3455 	memset(strreset, 0, len);
3456 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3457 	strreset->strreset_flags = flag;
3458 	strreset->strreset_length = len;
3459 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3460 	if (number_entries) {
3461 		int i;
3462 
3463 		for (i = 0; i < number_entries; i++) {
3464 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3465 		}
3466 	}
3467 	SCTP_BUF_LEN(m_notify) = len;
3468 	SCTP_BUF_NEXT(m_notify) = NULL;
3469 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3470 		/* no space */
3471 		sctp_m_freem(m_notify);
3472 		return;
3473 	}
3474 	/* append to socket */
3475 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3476 	    0, 0, stcb->asoc.context, 0, 0, 0,
3477 	    m_notify);
3478 	if (control == NULL) {
3479 		/* no memory */
3480 		sctp_m_freem(m_notify);
3481 		return;
3482 	}
3483 	control->spec_flags = M_NOTIFICATION;
3484 	control->length = SCTP_BUF_LEN(m_notify);
3485 	/* not that we need this */
3486 	control->tail_mbuf = m_notify;
3487 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3488 	    control,
3489 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3490 }
3491 
3492 
3493 static void
3494 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3495 {
3496 	struct mbuf *m_notify;
3497 	struct sctp_remote_error *sre;
3498 	struct sctp_queued_to_read *control;
3499 	size_t notif_len, chunk_len;
3500 
3501 	if ((stcb == NULL) ||
3502 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3503 		return;
3504 	}
3505 	if (chunk != NULL) {
3506 		chunk_len = ntohs(chunk->ch.chunk_length);
3507 	} else {
3508 		chunk_len = 0;
3509 	}
3510 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3511 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3512 	if (m_notify == NULL) {
3513 		/* Retry with smaller value. */
3514 		notif_len = sizeof(struct sctp_remote_error);
3515 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3516 		if (m_notify == NULL) {
3517 			return;
3518 		}
3519 	}
3520 	SCTP_BUF_NEXT(m_notify) = NULL;
3521 	sre = mtod(m_notify, struct sctp_remote_error *);
3522 	memset(sre, 0, notif_len);
3523 	sre->sre_type = SCTP_REMOTE_ERROR;
3524 	sre->sre_flags = 0;
3525 	sre->sre_length = sizeof(struct sctp_remote_error);
3526 	sre->sre_error = error;
3527 	sre->sre_assoc_id = sctp_get_associd(stcb);
3528 	if (notif_len > sizeof(struct sctp_remote_error)) {
3529 		memcpy(sre->sre_data, chunk, chunk_len);
3530 		sre->sre_length += chunk_len;
3531 	}
3532 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3533 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3534 	    0, 0, stcb->asoc.context, 0, 0, 0,
3535 	    m_notify);
3536 	if (control != NULL) {
3537 		control->length = SCTP_BUF_LEN(m_notify);
3538 		/* not that we need this */
3539 		control->tail_mbuf = m_notify;
3540 		control->spec_flags = M_NOTIFICATION;
3541 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3542 		    control,
3543 		    &stcb->sctp_socket->so_rcv, 1,
3544 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3545 	} else {
3546 		sctp_m_freem(m_notify);
3547 	}
3548 }
3549 
3550 
3551 void
3552 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3553     uint32_t error, void *data, int so_locked
3554 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3555     SCTP_UNUSED
3556 #endif
3557 )
3558 {
3559 	if ((stcb == NULL) ||
3560 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3561 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3562 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3563 		/* If the socket is gone we are out of here */
3564 		return;
3565 	}
3566 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3567 		return;
3568 	}
3569 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3570 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3571 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3572 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3573 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3574 			/* Don't report these in front states */
3575 			return;
3576 		}
3577 	}
3578 	switch (notification) {
3579 	case SCTP_NOTIFY_ASSOC_UP:
3580 		if (stcb->asoc.assoc_up_sent == 0) {
3581 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3582 			stcb->asoc.assoc_up_sent = 1;
3583 		}
3584 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3585 			sctp_notify_adaptation_layer(stcb);
3586 		}
3587 		if (stcb->asoc.auth_supported == 0) {
3588 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3589 			    NULL, so_locked);
3590 		}
3591 		break;
3592 	case SCTP_NOTIFY_ASSOC_DOWN:
3593 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3594 		break;
3595 	case SCTP_NOTIFY_INTERFACE_DOWN:
3596 		{
3597 			struct sctp_nets *net;
3598 
3599 			net = (struct sctp_nets *)data;
3600 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3601 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3602 			break;
3603 		}
3604 	case SCTP_NOTIFY_INTERFACE_UP:
3605 		{
3606 			struct sctp_nets *net;
3607 
3608 			net = (struct sctp_nets *)data;
3609 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3610 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3611 			break;
3612 		}
3613 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3614 		{
3615 			struct sctp_nets *net;
3616 
3617 			net = (struct sctp_nets *)data;
3618 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3619 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3620 			break;
3621 		}
3622 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3623 		sctp_notify_send_failed2(stcb, error,
3624 		    (struct sctp_stream_queue_pending *)data, so_locked);
3625 		break;
3626 	case SCTP_NOTIFY_SENT_DG_FAIL:
3627 		sctp_notify_send_failed(stcb, 1, error,
3628 		    (struct sctp_tmit_chunk *)data, so_locked);
3629 		break;
3630 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3631 		sctp_notify_send_failed(stcb, 0, error,
3632 		    (struct sctp_tmit_chunk *)data, so_locked);
3633 		break;
3634 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3635 		{
3636 			uint32_t val;
3637 
3638 			val = *((uint32_t *) data);
3639 
3640 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3641 			break;
3642 		}
3643 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3644 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3645 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3646 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3647 		} else {
3648 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3649 		}
3650 		break;
3651 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3652 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3653 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3654 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3655 		} else {
3656 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3657 		}
3658 		break;
3659 	case SCTP_NOTIFY_ASSOC_RESTART:
3660 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3661 		if (stcb->asoc.auth_supported == 0) {
3662 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3663 			    NULL, so_locked);
3664 		}
3665 		break;
3666 	case SCTP_NOTIFY_STR_RESET_SEND:
3667 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3668 		break;
3669 	case SCTP_NOTIFY_STR_RESET_RECV:
3670 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3671 		break;
3672 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3673 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3674 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3675 		break;
3676 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3677 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3678 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3679 		break;
3680 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3681 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3682 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3683 		break;
3684 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3685 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3686 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3687 		break;
3688 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3689 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3690 		    error, so_locked);
3691 		break;
3692 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3693 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3694 		    error, so_locked);
3695 		break;
3696 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3697 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3698 		    error, so_locked);
3699 		break;
3700 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3701 		sctp_notify_shutdown_event(stcb);
3702 		break;
3703 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3704 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3705 		    (uint16_t) (uintptr_t) data,
3706 		    so_locked);
3707 		break;
3708 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3709 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3710 		    (uint16_t) (uintptr_t) data,
3711 		    so_locked);
3712 		break;
3713 	case SCTP_NOTIFY_NO_PEER_AUTH:
3714 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3715 		    (uint16_t) (uintptr_t) data,
3716 		    so_locked);
3717 		break;
3718 	case SCTP_NOTIFY_SENDER_DRY:
3719 		sctp_notify_sender_dry_event(stcb, so_locked);
3720 		break;
3721 	case SCTP_NOTIFY_REMOTE_ERROR:
3722 		sctp_notify_remote_error(stcb, error, data);
3723 		break;
3724 	default:
3725 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3726 		    __FUNCTION__, notification, notification);
3727 		break;
3728 	}			/* end switch */
3729 }
3730 
3731 void
3732 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3733 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3734     SCTP_UNUSED
3735 #endif
3736 )
3737 {
3738 	struct sctp_association *asoc;
3739 	struct sctp_stream_out *outs;
3740 	struct sctp_tmit_chunk *chk, *nchk;
3741 	struct sctp_stream_queue_pending *sp, *nsp;
3742 	int i;
3743 
3744 	if (stcb == NULL) {
3745 		return;
3746 	}
3747 	asoc = &stcb->asoc;
3748 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3749 		/* already being freed */
3750 		return;
3751 	}
3752 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3753 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3754 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3755 		return;
3756 	}
3757 	/* now through all the gunk freeing chunks */
3758 	if (holds_lock == 0) {
3759 		SCTP_TCB_SEND_LOCK(stcb);
3760 	}
3761 	/* sent queue SHOULD be empty */
3762 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3763 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3764 		asoc->sent_queue_cnt--;
3765 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3766 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3767 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3768 #ifdef INVARIANTS
3769 			} else {
3770 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3771 #endif
3772 			}
3773 		}
3774 		if (chk->data != NULL) {
3775 			sctp_free_bufspace(stcb, asoc, chk, 1);
3776 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3777 			    error, chk, so_locked);
3778 			if (chk->data) {
3779 				sctp_m_freem(chk->data);
3780 				chk->data = NULL;
3781 			}
3782 		}
3783 		sctp_free_a_chunk(stcb, chk, so_locked);
3784 		/* sa_ignore FREED_MEMORY */
3785 	}
3786 	/* pending send queue SHOULD be empty */
3787 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3788 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3789 		asoc->send_queue_cnt--;
3790 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3791 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3792 #ifdef INVARIANTS
3793 		} else {
3794 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3795 #endif
3796 		}
3797 		if (chk->data != NULL) {
3798 			sctp_free_bufspace(stcb, asoc, chk, 1);
3799 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3800 			    error, chk, so_locked);
3801 			if (chk->data) {
3802 				sctp_m_freem(chk->data);
3803 				chk->data = NULL;
3804 			}
3805 		}
3806 		sctp_free_a_chunk(stcb, chk, so_locked);
3807 		/* sa_ignore FREED_MEMORY */
3808 	}
3809 	for (i = 0; i < asoc->streamoutcnt; i++) {
3810 		/* For each stream */
3811 		outs = &asoc->strmout[i];
3812 		/* clean up any sends there */
3813 		asoc->locked_on_sending = NULL;
3814 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3815 			asoc->stream_queue_cnt--;
3816 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3817 			sctp_free_spbufspace(stcb, asoc, sp);
3818 			if (sp->data) {
3819 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3820 				    error, (void *)sp, so_locked);
3821 				if (sp->data) {
3822 					sctp_m_freem(sp->data);
3823 					sp->data = NULL;
3824 					sp->tail_mbuf = NULL;
3825 					sp->length = 0;
3826 				}
3827 			}
3828 			if (sp->net) {
3829 				sctp_free_remote_addr(sp->net);
3830 				sp->net = NULL;
3831 			}
3832 			/* Free the chunk */
3833 			sctp_free_a_strmoq(stcb, sp, so_locked);
3834 			/* sa_ignore FREED_MEMORY */
3835 		}
3836 	}
3837 
3838 	if (holds_lock == 0) {
3839 		SCTP_TCB_SEND_UNLOCK(stcb);
3840 	}
3841 }
3842 
3843 void
3844 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3845     struct sctp_abort_chunk *abort, int so_locked
3846 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3847     SCTP_UNUSED
3848 #endif
3849 )
3850 {
3851 	if (stcb == NULL) {
3852 		return;
3853 	}
3854 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3855 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3856 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3857 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3858 	}
3859 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3860 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3861 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3862 		return;
3863 	}
3864 	/* Tell them we lost the asoc */
3865 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3866 	if (from_peer) {
3867 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3868 	} else {
3869 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3870 	}
3871 }
3872 
3873 void
3874 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3875     struct mbuf *m, int iphlen,
3876     struct sockaddr *src, struct sockaddr *dst,
3877     struct sctphdr *sh, struct mbuf *op_err,
3878     uint8_t mflowtype, uint32_t mflowid,
3879     uint32_t vrf_id, uint16_t port)
3880 {
3881 	uint32_t vtag;
3882 
3883 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3884 	struct socket *so;
3885 
3886 #endif
3887 
3888 	vtag = 0;
3889 	if (stcb != NULL) {
3890 		/* We have a TCB to abort, send notification too */
3891 		vtag = stcb->asoc.peer_vtag;
3892 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3893 		/* get the assoc vrf id and table id */
3894 		vrf_id = stcb->asoc.vrf_id;
3895 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3896 	}
3897 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3898 	    mflowtype, mflowid, inp->fibnum,
3899 	    vrf_id, port);
3900 	if (stcb != NULL) {
3901 		/* Ok, now lets free it */
3902 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3903 		so = SCTP_INP_SO(inp);
3904 		atomic_add_int(&stcb->asoc.refcnt, 1);
3905 		SCTP_TCB_UNLOCK(stcb);
3906 		SCTP_SOCKET_LOCK(so, 1);
3907 		SCTP_TCB_LOCK(stcb);
3908 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3909 #endif
3910 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3911 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3912 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3913 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3914 		}
3915 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3916 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3917 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3918 		SCTP_SOCKET_UNLOCK(so, 1);
3919 #endif
3920 	}
3921 }
3922 
3923 #ifdef SCTP_ASOCLOG_OF_TSNS
3924 void
3925 sctp_print_out_track_log(struct sctp_tcb *stcb)
3926 {
3927 #ifdef NOSIY_PRINTS
3928 	int i;
3929 
3930 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3931 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3932 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3933 		SCTP_PRINTF("None rcvd\n");
3934 		goto none_in;
3935 	}
3936 	if (stcb->asoc.tsn_in_wrapped) {
3937 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3938 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3939 			    stcb->asoc.in_tsnlog[i].tsn,
3940 			    stcb->asoc.in_tsnlog[i].strm,
3941 			    stcb->asoc.in_tsnlog[i].seq,
3942 			    stcb->asoc.in_tsnlog[i].flgs,
3943 			    stcb->asoc.in_tsnlog[i].sz);
3944 		}
3945 	}
3946 	if (stcb->asoc.tsn_in_at) {
3947 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3948 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3949 			    stcb->asoc.in_tsnlog[i].tsn,
3950 			    stcb->asoc.in_tsnlog[i].strm,
3951 			    stcb->asoc.in_tsnlog[i].seq,
3952 			    stcb->asoc.in_tsnlog[i].flgs,
3953 			    stcb->asoc.in_tsnlog[i].sz);
3954 		}
3955 	}
3956 none_in:
3957 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3958 	if ((stcb->asoc.tsn_out_at == 0) &&
3959 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3960 		SCTP_PRINTF("None sent\n");
3961 	}
3962 	if (stcb->asoc.tsn_out_wrapped) {
3963 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3964 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3965 			    stcb->asoc.out_tsnlog[i].tsn,
3966 			    stcb->asoc.out_tsnlog[i].strm,
3967 			    stcb->asoc.out_tsnlog[i].seq,
3968 			    stcb->asoc.out_tsnlog[i].flgs,
3969 			    stcb->asoc.out_tsnlog[i].sz);
3970 		}
3971 	}
3972 	if (stcb->asoc.tsn_out_at) {
3973 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3974 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3975 			    stcb->asoc.out_tsnlog[i].tsn,
3976 			    stcb->asoc.out_tsnlog[i].strm,
3977 			    stcb->asoc.out_tsnlog[i].seq,
3978 			    stcb->asoc.out_tsnlog[i].flgs,
3979 			    stcb->asoc.out_tsnlog[i].sz);
3980 		}
3981 	}
3982 #endif
3983 }
3984 
3985 #endif
3986 
3987 void
3988 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3989     struct mbuf *op_err,
3990     int so_locked
3991 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3992     SCTP_UNUSED
3993 #endif
3994 )
3995 {
3996 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3997 	struct socket *so;
3998 
3999 #endif
4000 
4001 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4002 	so = SCTP_INP_SO(inp);
4003 #endif
4004 	if (stcb == NULL) {
4005 		/* Got to have a TCB */
4006 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4007 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4008 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4009 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4010 			}
4011 		}
4012 		return;
4013 	} else {
4014 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4015 	}
4016 	/* notify the ulp */
4017 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4018 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4019 	}
4020 	/* notify the peer */
4021 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4022 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4023 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4024 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4025 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4026 	}
4027 	/* now free the asoc */
4028 #ifdef SCTP_ASOCLOG_OF_TSNS
4029 	sctp_print_out_track_log(stcb);
4030 #endif
4031 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4032 	if (!so_locked) {
4033 		atomic_add_int(&stcb->asoc.refcnt, 1);
4034 		SCTP_TCB_UNLOCK(stcb);
4035 		SCTP_SOCKET_LOCK(so, 1);
4036 		SCTP_TCB_LOCK(stcb);
4037 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4038 	}
4039 #endif
4040 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4041 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4042 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4043 	if (!so_locked) {
4044 		SCTP_SOCKET_UNLOCK(so, 1);
4045 	}
4046 #endif
4047 }
4048 
4049 void
4050 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4051     struct sockaddr *src, struct sockaddr *dst,
4052     struct sctphdr *sh, struct sctp_inpcb *inp,
4053     struct mbuf *cause,
4054     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4055     uint32_t vrf_id, uint16_t port)
4056 {
4057 	struct sctp_chunkhdr *ch, chunk_buf;
4058 	unsigned int chk_length;
4059 	int contains_init_chunk;
4060 
4061 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4062 	/* Generate a TO address for future reference */
4063 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4064 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4065 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4066 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4067 		}
4068 	}
4069 	contains_init_chunk = 0;
4070 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4071 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4072 	while (ch != NULL) {
4073 		chk_length = ntohs(ch->chunk_length);
4074 		if (chk_length < sizeof(*ch)) {
4075 			/* break to abort land */
4076 			break;
4077 		}
4078 		switch (ch->chunk_type) {
4079 		case SCTP_INIT:
4080 			contains_init_chunk = 1;
4081 			break;
4082 		case SCTP_PACKET_DROPPED:
4083 			/* we don't respond to pkt-dropped */
4084 			return;
4085 		case SCTP_ABORT_ASSOCIATION:
4086 			/* we don't respond with an ABORT to an ABORT */
4087 			return;
4088 		case SCTP_SHUTDOWN_COMPLETE:
4089 			/*
4090 			 * we ignore it since we are not waiting for it and
4091 			 * peer is gone
4092 			 */
4093 			return;
4094 		case SCTP_SHUTDOWN_ACK:
4095 			sctp_send_shutdown_complete2(src, dst, sh,
4096 			    mflowtype, mflowid, fibnum,
4097 			    vrf_id, port);
4098 			return;
4099 		default:
4100 			break;
4101 		}
4102 		offset += SCTP_SIZE32(chk_length);
4103 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4104 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4105 	}
4106 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4107 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4108 	    (contains_init_chunk == 0))) {
4109 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4110 		    mflowtype, mflowid, fibnum,
4111 		    vrf_id, port);
4112 	}
4113 }
4114 
4115 /*
4116  * check the inbound datagram to make sure there is not an abort inside it,
4117  * if there is return 1, else return 0.
4118  */
4119 int
4120 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4121 {
4122 	struct sctp_chunkhdr *ch;
4123 	struct sctp_init_chunk *init_chk, chunk_buf;
4124 	int offset;
4125 	unsigned int chk_length;
4126 
4127 	offset = iphlen + sizeof(struct sctphdr);
4128 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4129 	    (uint8_t *) & chunk_buf);
4130 	while (ch != NULL) {
4131 		chk_length = ntohs(ch->chunk_length);
4132 		if (chk_length < sizeof(*ch)) {
4133 			/* packet is probably corrupt */
4134 			break;
4135 		}
4136 		/* we seem to be ok, is it an abort? */
4137 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4138 			/* yep, tell them */
4139 			return (1);
4140 		}
4141 		if (ch->chunk_type == SCTP_INITIATION) {
4142 			/* need to update the Vtag */
4143 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4144 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4145 			if (init_chk != NULL) {
4146 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4147 			}
4148 		}
4149 		/* Nope, move to the next chunk */
4150 		offset += SCTP_SIZE32(chk_length);
4151 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4152 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4153 	}
4154 	return (0);
4155 }
4156 
4157 /*
4158  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4159  * set (i.e. it's 0) so, create this function to compare link local scopes
4160  */
4161 #ifdef INET6
4162 uint32_t
4163 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4164 {
4165 	struct sockaddr_in6 a, b;
4166 
4167 	/* save copies */
4168 	a = *addr1;
4169 	b = *addr2;
4170 
4171 	if (a.sin6_scope_id == 0)
4172 		if (sa6_recoverscope(&a)) {
4173 			/* can't get scope, so can't match */
4174 			return (0);
4175 		}
4176 	if (b.sin6_scope_id == 0)
4177 		if (sa6_recoverscope(&b)) {
4178 			/* can't get scope, so can't match */
4179 			return (0);
4180 		}
4181 	if (a.sin6_scope_id != b.sin6_scope_id)
4182 		return (0);
4183 
4184 	return (1);
4185 }
4186 
4187 /*
4188  * returns a sockaddr_in6 with embedded scope recovered and removed
4189  */
4190 struct sockaddr_in6 *
4191 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4192 {
4193 	/* check and strip embedded scope junk */
4194 	if (addr->sin6_family == AF_INET6) {
4195 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4196 			if (addr->sin6_scope_id == 0) {
4197 				*store = *addr;
4198 				if (!sa6_recoverscope(store)) {
4199 					/* use the recovered scope */
4200 					addr = store;
4201 				}
4202 			} else {
4203 				/* else, return the original "to" addr */
4204 				in6_clearscope(&addr->sin6_addr);
4205 			}
4206 		}
4207 	}
4208 	return (addr);
4209 }
4210 
4211 #endif
4212 
4213 /*
4214  * are the two addresses the same?  currently a "scopeless" check returns: 1
4215  * if same, 0 if not
4216  */
4217 int
4218 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4219 {
4220 
4221 	/* must be valid */
4222 	if (sa1 == NULL || sa2 == NULL)
4223 		return (0);
4224 
4225 	/* must be the same family */
4226 	if (sa1->sa_family != sa2->sa_family)
4227 		return (0);
4228 
4229 	switch (sa1->sa_family) {
4230 #ifdef INET6
4231 	case AF_INET6:
4232 		{
4233 			/* IPv6 addresses */
4234 			struct sockaddr_in6 *sin6_1, *sin6_2;
4235 
4236 			sin6_1 = (struct sockaddr_in6 *)sa1;
4237 			sin6_2 = (struct sockaddr_in6 *)sa2;
4238 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4239 			    sin6_2));
4240 		}
4241 #endif
4242 #ifdef INET
4243 	case AF_INET:
4244 		{
4245 			/* IPv4 addresses */
4246 			struct sockaddr_in *sin_1, *sin_2;
4247 
4248 			sin_1 = (struct sockaddr_in *)sa1;
4249 			sin_2 = (struct sockaddr_in *)sa2;
4250 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4251 		}
4252 #endif
4253 	default:
4254 		/* we don't do these... */
4255 		return (0);
4256 	}
4257 }
4258 
4259 void
4260 sctp_print_address(struct sockaddr *sa)
4261 {
4262 #ifdef INET6
4263 	char ip6buf[INET6_ADDRSTRLEN];
4264 
4265 #endif
4266 
4267 	switch (sa->sa_family) {
4268 #ifdef INET6
4269 	case AF_INET6:
4270 		{
4271 			struct sockaddr_in6 *sin6;
4272 
4273 			sin6 = (struct sockaddr_in6 *)sa;
4274 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4275 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4276 			    ntohs(sin6->sin6_port),
4277 			    sin6->sin6_scope_id);
4278 			break;
4279 		}
4280 #endif
4281 #ifdef INET
4282 	case AF_INET:
4283 		{
4284 			struct sockaddr_in *sin;
4285 			unsigned char *p;
4286 
4287 			sin = (struct sockaddr_in *)sa;
4288 			p = (unsigned char *)&sin->sin_addr;
4289 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4290 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4291 			break;
4292 		}
4293 #endif
4294 	default:
4295 		SCTP_PRINTF("?\n");
4296 		break;
4297 	}
4298 }
4299 
4300 void
4301 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4302     struct sctp_inpcb *new_inp,
4303     struct sctp_tcb *stcb,
4304     int waitflags)
4305 {
4306 	/*
4307 	 * go through our old INP and pull off any control structures that
4308 	 * belong to stcb and move then to the new inp.
4309 	 */
4310 	struct socket *old_so, *new_so;
4311 	struct sctp_queued_to_read *control, *nctl;
4312 	struct sctp_readhead tmp_queue;
4313 	struct mbuf *m;
4314 	int error = 0;
4315 
4316 	old_so = old_inp->sctp_socket;
4317 	new_so = new_inp->sctp_socket;
4318 	TAILQ_INIT(&tmp_queue);
4319 	error = sblock(&old_so->so_rcv, waitflags);
4320 	if (error) {
4321 		/*
4322 		 * Gak, can't get sblock, we have a problem. data will be
4323 		 * left stranded.. and we don't dare look at it since the
4324 		 * other thread may be reading something. Oh well, its a
4325 		 * screwed up app that does a peeloff OR a accept while
4326 		 * reading from the main socket... actually its only the
4327 		 * peeloff() case, since I think read will fail on a
4328 		 * listening socket..
4329 		 */
4330 		return;
4331 	}
4332 	/* lock the socket buffers */
4333 	SCTP_INP_READ_LOCK(old_inp);
4334 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4335 		/* Pull off all for out target stcb */
4336 		if (control->stcb == stcb) {
4337 			/* remove it we want it */
4338 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4339 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4340 			m = control->data;
4341 			while (m) {
4342 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4343 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4344 				}
4345 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4346 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4347 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4348 				}
4349 				m = SCTP_BUF_NEXT(m);
4350 			}
4351 		}
4352 	}
4353 	SCTP_INP_READ_UNLOCK(old_inp);
4354 	/* Remove the sb-lock on the old socket */
4355 
4356 	sbunlock(&old_so->so_rcv);
4357 	/* Now we move them over to the new socket buffer */
4358 	SCTP_INP_READ_LOCK(new_inp);
4359 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4360 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4361 		m = control->data;
4362 		while (m) {
4363 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4364 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4365 			}
4366 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4367 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4368 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4369 			}
4370 			m = SCTP_BUF_NEXT(m);
4371 		}
4372 	}
4373 	SCTP_INP_READ_UNLOCK(new_inp);
4374 }
4375 
4376 void
4377 sctp_add_to_readq(struct sctp_inpcb *inp,
4378     struct sctp_tcb *stcb,
4379     struct sctp_queued_to_read *control,
4380     struct sockbuf *sb,
4381     int end,
4382     int inp_read_lock_held,
4383     int so_locked
4384 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4385     SCTP_UNUSED
4386 #endif
4387 )
4388 {
4389 	/*
4390 	 * Here we must place the control on the end of the socket read
4391 	 * queue AND increment sb_cc so that select will work properly on
4392 	 * read.
4393 	 */
4394 	struct mbuf *m, *prev = NULL;
4395 
4396 	if (inp == NULL) {
4397 		/* Gak, TSNH!! */
4398 #ifdef INVARIANTS
4399 		panic("Gak, inp NULL on add_to_readq");
4400 #endif
4401 		return;
4402 	}
4403 	if (inp_read_lock_held == 0)
4404 		SCTP_INP_READ_LOCK(inp);
4405 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4406 		sctp_free_remote_addr(control->whoFrom);
4407 		if (control->data) {
4408 			sctp_m_freem(control->data);
4409 			control->data = NULL;
4410 		}
4411 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4412 		if (inp_read_lock_held == 0)
4413 			SCTP_INP_READ_UNLOCK(inp);
4414 		return;
4415 	}
4416 	if (!(control->spec_flags & M_NOTIFICATION)) {
4417 		atomic_add_int(&inp->total_recvs, 1);
4418 		if (!control->do_not_ref_stcb) {
4419 			atomic_add_int(&stcb->total_recvs, 1);
4420 		}
4421 	}
4422 	m = control->data;
4423 	control->held_length = 0;
4424 	control->length = 0;
4425 	while (m) {
4426 		if (SCTP_BUF_LEN(m) == 0) {
4427 			/* Skip mbufs with NO length */
4428 			if (prev == NULL) {
4429 				/* First one */
4430 				control->data = sctp_m_free(m);
4431 				m = control->data;
4432 			} else {
4433 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4434 				m = SCTP_BUF_NEXT(prev);
4435 			}
4436 			if (m == NULL) {
4437 				control->tail_mbuf = prev;
4438 			}
4439 			continue;
4440 		}
4441 		prev = m;
4442 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4443 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4444 		}
4445 		sctp_sballoc(stcb, sb, m);
4446 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4447 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4448 		}
4449 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4450 		m = SCTP_BUF_NEXT(m);
4451 	}
4452 	if (prev != NULL) {
4453 		control->tail_mbuf = prev;
4454 	} else {
4455 		/* Everything got collapsed out?? */
4456 		sctp_free_remote_addr(control->whoFrom);
4457 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4458 		if (inp_read_lock_held == 0)
4459 			SCTP_INP_READ_UNLOCK(inp);
4460 		return;
4461 	}
4462 	if (end) {
4463 		control->end_added = 1;
4464 	}
4465 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4466 	if (inp_read_lock_held == 0)
4467 		SCTP_INP_READ_UNLOCK(inp);
4468 	if (inp && inp->sctp_socket) {
4469 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4470 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4471 		} else {
4472 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4473 			struct socket *so;
4474 
4475 			so = SCTP_INP_SO(inp);
4476 			if (!so_locked) {
4477 				if (stcb) {
4478 					atomic_add_int(&stcb->asoc.refcnt, 1);
4479 					SCTP_TCB_UNLOCK(stcb);
4480 				}
4481 				SCTP_SOCKET_LOCK(so, 1);
4482 				if (stcb) {
4483 					SCTP_TCB_LOCK(stcb);
4484 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4485 				}
4486 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4487 					SCTP_SOCKET_UNLOCK(so, 1);
4488 					return;
4489 				}
4490 			}
4491 #endif
4492 			sctp_sorwakeup(inp, inp->sctp_socket);
4493 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4494 			if (!so_locked) {
4495 				SCTP_SOCKET_UNLOCK(so, 1);
4496 			}
4497 #endif
4498 		}
4499 	}
4500 }
4501 
4502 
4503 int
4504 sctp_append_to_readq(struct sctp_inpcb *inp,
4505     struct sctp_tcb *stcb,
4506     struct sctp_queued_to_read *control,
4507     struct mbuf *m,
4508     int end,
4509     int ctls_cumack,
4510     struct sockbuf *sb)
4511 {
4512 	/*
4513 	 * A partial delivery API event is underway. OR we are appending on
4514 	 * the reassembly queue.
4515 	 *
4516 	 * If PDAPI this means we need to add m to the end of the data.
4517 	 * Increase the length in the control AND increment the sb_cc.
4518 	 * Otherwise sb is NULL and all we need to do is put it at the end
4519 	 * of the mbuf chain.
4520 	 */
4521 	int len = 0;
4522 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4523 
4524 	if (inp) {
4525 		SCTP_INP_READ_LOCK(inp);
4526 	}
4527 	if (control == NULL) {
4528 get_out:
4529 		if (inp) {
4530 			SCTP_INP_READ_UNLOCK(inp);
4531 		}
4532 		return (-1);
4533 	}
4534 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4535 		SCTP_INP_READ_UNLOCK(inp);
4536 		return (0);
4537 	}
4538 	if (control->end_added) {
4539 		/* huh this one is complete? */
4540 		goto get_out;
4541 	}
4542 	mm = m;
4543 	if (mm == NULL) {
4544 		goto get_out;
4545 	}
4546 	while (mm) {
4547 		if (SCTP_BUF_LEN(mm) == 0) {
4548 			/* Skip mbufs with NO lenght */
4549 			if (prev == NULL) {
4550 				/* First one */
4551 				m = sctp_m_free(mm);
4552 				mm = m;
4553 			} else {
4554 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4555 				mm = SCTP_BUF_NEXT(prev);
4556 			}
4557 			continue;
4558 		}
4559 		prev = mm;
4560 		len += SCTP_BUF_LEN(mm);
4561 		if (sb) {
4562 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4563 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4564 			}
4565 			sctp_sballoc(stcb, sb, mm);
4566 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4567 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4568 			}
4569 		}
4570 		mm = SCTP_BUF_NEXT(mm);
4571 	}
4572 	if (prev) {
4573 		tail = prev;
4574 	} else {
4575 		/* Really there should always be a prev */
4576 		if (m == NULL) {
4577 			/* Huh nothing left? */
4578 #ifdef INVARIANTS
4579 			panic("Nothing left to add?");
4580 #else
4581 			goto get_out;
4582 #endif
4583 		}
4584 		tail = m;
4585 	}
4586 	if (control->tail_mbuf) {
4587 		/* append */
4588 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4589 		control->tail_mbuf = tail;
4590 	} else {
4591 		/* nothing there */
4592 #ifdef INVARIANTS
4593 		if (control->data != NULL) {
4594 			panic("This should NOT happen");
4595 		}
4596 #endif
4597 		control->data = m;
4598 		control->tail_mbuf = tail;
4599 	}
4600 	atomic_add_int(&control->length, len);
4601 	if (end) {
4602 		/* message is complete */
4603 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4604 			stcb->asoc.control_pdapi = NULL;
4605 		}
4606 		control->held_length = 0;
4607 		control->end_added = 1;
4608 	}
4609 	if (stcb == NULL) {
4610 		control->do_not_ref_stcb = 1;
4611 	}
4612 	/*
4613 	 * When we are appending in partial delivery, the cum-ack is used
4614 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4615 	 * is populated in the outbound sinfo structure from the true cumack
4616 	 * if the association exists...
4617 	 */
4618 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4619 	if (inp) {
4620 		SCTP_INP_READ_UNLOCK(inp);
4621 	}
4622 	if (inp && inp->sctp_socket) {
4623 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4624 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4625 		} else {
4626 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4627 			struct socket *so;
4628 
4629 			so = SCTP_INP_SO(inp);
4630 			if (stcb) {
4631 				atomic_add_int(&stcb->asoc.refcnt, 1);
4632 				SCTP_TCB_UNLOCK(stcb);
4633 			}
4634 			SCTP_SOCKET_LOCK(so, 1);
4635 			if (stcb) {
4636 				SCTP_TCB_LOCK(stcb);
4637 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4638 			}
4639 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4640 				SCTP_SOCKET_UNLOCK(so, 1);
4641 				return (0);
4642 			}
4643 #endif
4644 			sctp_sorwakeup(inp, inp->sctp_socket);
4645 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4646 			SCTP_SOCKET_UNLOCK(so, 1);
4647 #endif
4648 		}
4649 	}
4650 	return (0);
4651 }
4652 
4653 
4654 
4655 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4656  *************ALTERNATE ROUTING CODE
4657  */
4658 
4659 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4660  *************ALTERNATE ROUTING CODE
4661  */
4662 
4663 struct mbuf *
4664 sctp_generate_cause(uint16_t code, char *info)
4665 {
4666 	struct mbuf *m;
4667 	struct sctp_gen_error_cause *cause;
4668 	size_t info_len, len;
4669 
4670 	if ((code == 0) || (info == NULL)) {
4671 		return (NULL);
4672 	}
4673 	info_len = strlen(info);
4674 	len = sizeof(struct sctp_paramhdr) + info_len;
4675 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4676 	if (m != NULL) {
4677 		SCTP_BUF_LEN(m) = len;
4678 		cause = mtod(m, struct sctp_gen_error_cause *);
4679 		cause->code = htons(code);
4680 		cause->length = htons((uint16_t) len);
4681 		memcpy(cause->info, info, info_len);
4682 	}
4683 	return (m);
4684 }
4685 
4686 struct mbuf *
4687 sctp_generate_no_user_data_cause(uint32_t tsn)
4688 {
4689 	struct mbuf *m;
4690 	struct sctp_error_no_user_data *no_user_data_cause;
4691 	size_t len;
4692 
4693 	len = sizeof(struct sctp_error_no_user_data);
4694 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4695 	if (m != NULL) {
4696 		SCTP_BUF_LEN(m) = len;
4697 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4698 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4699 		no_user_data_cause->cause.length = htons((uint16_t) len);
4700 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4701 	}
4702 	return (m);
4703 }
4704 
4705 #ifdef SCTP_MBCNT_LOGGING
4706 void
4707 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4708     struct sctp_tmit_chunk *tp1, int chk_cnt)
4709 {
4710 	if (tp1->data == NULL) {
4711 		return;
4712 	}
4713 	asoc->chunks_on_out_queue -= chk_cnt;
4714 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4715 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4716 		    asoc->total_output_queue_size,
4717 		    tp1->book_size,
4718 		    0,
4719 		    tp1->mbcnt);
4720 	}
4721 	if (asoc->total_output_queue_size >= tp1->book_size) {
4722 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4723 	} else {
4724 		asoc->total_output_queue_size = 0;
4725 	}
4726 
4727 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4728 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4729 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4730 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4731 		} else {
4732 			stcb->sctp_socket->so_snd.sb_cc = 0;
4733 
4734 		}
4735 	}
4736 }
4737 
4738 #endif
4739 
4740 int
4741 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4742     uint8_t sent, int so_locked
4743 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4744     SCTP_UNUSED
4745 #endif
4746 )
4747 {
4748 	struct sctp_stream_out *strq;
4749 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4750 	struct sctp_stream_queue_pending *sp;
4751 	uint16_t stream = 0, seq = 0;
4752 	uint8_t foundeom = 0;
4753 	int ret_sz = 0;
4754 	int notdone;
4755 	int do_wakeup_routine = 0;
4756 
4757 	stream = tp1->rec.data.stream_number;
4758 	seq = tp1->rec.data.stream_seq;
4759 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4760 		stcb->asoc.abandoned_sent[0]++;
4761 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4762 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4763 #if defined(SCTP_DETAILED_STR_STATS)
4764 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4765 #endif
4766 	} else {
4767 		stcb->asoc.abandoned_unsent[0]++;
4768 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4769 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4770 #if defined(SCTP_DETAILED_STR_STATS)
4771 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4772 #endif
4773 	}
4774 	do {
4775 		ret_sz += tp1->book_size;
4776 		if (tp1->data != NULL) {
4777 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4778 				sctp_flight_size_decrease(tp1);
4779 				sctp_total_flight_decrease(stcb, tp1);
4780 			}
4781 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4782 			stcb->asoc.peers_rwnd += tp1->send_size;
4783 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4784 			if (sent) {
4785 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4786 			} else {
4787 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4788 			}
4789 			if (tp1->data) {
4790 				sctp_m_freem(tp1->data);
4791 				tp1->data = NULL;
4792 			}
4793 			do_wakeup_routine = 1;
4794 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4795 				stcb->asoc.sent_queue_cnt_removeable--;
4796 			}
4797 		}
4798 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4799 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4800 		    SCTP_DATA_NOT_FRAG) {
4801 			/* not frag'ed we ae done   */
4802 			notdone = 0;
4803 			foundeom = 1;
4804 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4805 			/* end of frag, we are done */
4806 			notdone = 0;
4807 			foundeom = 1;
4808 		} else {
4809 			/*
4810 			 * Its a begin or middle piece, we must mark all of
4811 			 * it
4812 			 */
4813 			notdone = 1;
4814 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4815 		}
4816 	} while (tp1 && notdone);
4817 	if (foundeom == 0) {
4818 		/*
4819 		 * The multi-part message was scattered across the send and
4820 		 * sent queue.
4821 		 */
4822 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4823 			if ((tp1->rec.data.stream_number != stream) ||
4824 			    (tp1->rec.data.stream_seq != seq)) {
4825 				break;
4826 			}
4827 			/*
4828 			 * save to chk in case we have some on stream out
4829 			 * queue. If so and we have an un-transmitted one we
4830 			 * don't have to fudge the TSN.
4831 			 */
4832 			chk = tp1;
4833 			ret_sz += tp1->book_size;
4834 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4835 			if (sent) {
4836 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4837 			} else {
4838 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4839 			}
4840 			if (tp1->data) {
4841 				sctp_m_freem(tp1->data);
4842 				tp1->data = NULL;
4843 			}
4844 			/* No flight involved here book the size to 0 */
4845 			tp1->book_size = 0;
4846 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4847 				foundeom = 1;
4848 			}
4849 			do_wakeup_routine = 1;
4850 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4851 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4852 			/*
4853 			 * on to the sent queue so we can wait for it to be
4854 			 * passed by.
4855 			 */
4856 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4857 			    sctp_next);
4858 			stcb->asoc.send_queue_cnt--;
4859 			stcb->asoc.sent_queue_cnt++;
4860 		}
4861 	}
4862 	if (foundeom == 0) {
4863 		/*
4864 		 * Still no eom found. That means there is stuff left on the
4865 		 * stream out queue.. yuck.
4866 		 */
4867 		SCTP_TCB_SEND_LOCK(stcb);
4868 		strq = &stcb->asoc.strmout[stream];
4869 		sp = TAILQ_FIRST(&strq->outqueue);
4870 		if (sp != NULL) {
4871 			sp->discard_rest = 1;
4872 			/*
4873 			 * We may need to put a chunk on the queue that
4874 			 * holds the TSN that would have been sent with the
4875 			 * LAST bit.
4876 			 */
4877 			if (chk == NULL) {
4878 				/* Yep, we have to */
4879 				sctp_alloc_a_chunk(stcb, chk);
4880 				if (chk == NULL) {
4881 					/*
4882 					 * we are hosed. All we can do is
4883 					 * nothing.. which will cause an
4884 					 * abort if the peer is paying
4885 					 * attention.
4886 					 */
4887 					goto oh_well;
4888 				}
4889 				memset(chk, 0, sizeof(*chk));
4890 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4891 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4892 				chk->asoc = &stcb->asoc;
4893 				chk->rec.data.stream_seq = strq->next_sequence_send;
4894 				chk->rec.data.stream_number = sp->stream;
4895 				chk->rec.data.payloadtype = sp->ppid;
4896 				chk->rec.data.context = sp->context;
4897 				chk->flags = sp->act_flags;
4898 				chk->whoTo = NULL;
4899 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4900 				strq->chunks_on_queues++;
4901 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4902 				stcb->asoc.sent_queue_cnt++;
4903 				stcb->asoc.pr_sctp_cnt++;
4904 			} else {
4905 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4906 			}
4907 			strq->next_sequence_send++;
4908 	oh_well:
4909 			if (sp->data) {
4910 				/*
4911 				 * Pull any data to free up the SB and allow
4912 				 * sender to "add more" while we will throw
4913 				 * away :-)
4914 				 */
4915 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4916 				ret_sz += sp->length;
4917 				do_wakeup_routine = 1;
4918 				sp->some_taken = 1;
4919 				sctp_m_freem(sp->data);
4920 				sp->data = NULL;
4921 				sp->tail_mbuf = NULL;
4922 				sp->length = 0;
4923 			}
4924 		}
4925 		SCTP_TCB_SEND_UNLOCK(stcb);
4926 	}
4927 	if (do_wakeup_routine) {
4928 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4929 		struct socket *so;
4930 
4931 		so = SCTP_INP_SO(stcb->sctp_ep);
4932 		if (!so_locked) {
4933 			atomic_add_int(&stcb->asoc.refcnt, 1);
4934 			SCTP_TCB_UNLOCK(stcb);
4935 			SCTP_SOCKET_LOCK(so, 1);
4936 			SCTP_TCB_LOCK(stcb);
4937 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4938 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4939 				/* assoc was freed while we were unlocked */
4940 				SCTP_SOCKET_UNLOCK(so, 1);
4941 				return (ret_sz);
4942 			}
4943 		}
4944 #endif
4945 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4946 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4947 		if (!so_locked) {
4948 			SCTP_SOCKET_UNLOCK(so, 1);
4949 		}
4950 #endif
4951 	}
4952 	return (ret_sz);
4953 }
4954 
4955 /*
4956  * checks to see if the given address, sa, is one that is currently known by
4957  * the kernel note: can't distinguish the same address on multiple interfaces
4958  * and doesn't handle multiple addresses with different zone/scope id's note:
4959  * ifa_ifwithaddr() compares the entire sockaddr struct
4960  */
4961 struct sctp_ifa *
4962 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4963     int holds_lock)
4964 {
4965 	struct sctp_laddr *laddr;
4966 
4967 	if (holds_lock == 0) {
4968 		SCTP_INP_RLOCK(inp);
4969 	}
4970 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4971 		if (laddr->ifa == NULL)
4972 			continue;
4973 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4974 			continue;
4975 #ifdef INET
4976 		if (addr->sa_family == AF_INET) {
4977 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4978 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4979 				/* found him. */
4980 				if (holds_lock == 0) {
4981 					SCTP_INP_RUNLOCK(inp);
4982 				}
4983 				return (laddr->ifa);
4984 				break;
4985 			}
4986 		}
4987 #endif
4988 #ifdef INET6
4989 		if (addr->sa_family == AF_INET6) {
4990 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4991 			    &laddr->ifa->address.sin6)) {
4992 				/* found him. */
4993 				if (holds_lock == 0) {
4994 					SCTP_INP_RUNLOCK(inp);
4995 				}
4996 				return (laddr->ifa);
4997 				break;
4998 			}
4999 		}
5000 #endif
5001 	}
5002 	if (holds_lock == 0) {
5003 		SCTP_INP_RUNLOCK(inp);
5004 	}
5005 	return (NULL);
5006 }
5007 
5008 uint32_t
5009 sctp_get_ifa_hash_val(struct sockaddr *addr)
5010 {
5011 	switch (addr->sa_family) {
5012 #ifdef INET
5013 	case AF_INET:
5014 		{
5015 			struct sockaddr_in *sin;
5016 
5017 			sin = (struct sockaddr_in *)addr;
5018 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5019 		}
5020 #endif
5021 #ifdef INET6
5022 	case AF_INET6:
5023 		{
5024 			struct sockaddr_in6 *sin6;
5025 			uint32_t hash_of_addr;
5026 
5027 			sin6 = (struct sockaddr_in6 *)addr;
5028 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5029 			    sin6->sin6_addr.s6_addr32[1] +
5030 			    sin6->sin6_addr.s6_addr32[2] +
5031 			    sin6->sin6_addr.s6_addr32[3]);
5032 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5033 			return (hash_of_addr);
5034 		}
5035 #endif
5036 	default:
5037 		break;
5038 	}
5039 	return (0);
5040 }
5041 
5042 struct sctp_ifa *
5043 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5044 {
5045 	struct sctp_ifa *sctp_ifap;
5046 	struct sctp_vrf *vrf;
5047 	struct sctp_ifalist *hash_head;
5048 	uint32_t hash_of_addr;
5049 
5050 	if (holds_lock == 0)
5051 		SCTP_IPI_ADDR_RLOCK();
5052 
5053 	vrf = sctp_find_vrf(vrf_id);
5054 	if (vrf == NULL) {
5055 		if (holds_lock == 0)
5056 			SCTP_IPI_ADDR_RUNLOCK();
5057 		return (NULL);
5058 	}
5059 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5060 
5061 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5062 	if (hash_head == NULL) {
5063 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5064 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5065 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5066 		sctp_print_address(addr);
5067 		SCTP_PRINTF("No such bucket for address\n");
5068 		if (holds_lock == 0)
5069 			SCTP_IPI_ADDR_RUNLOCK();
5070 
5071 		return (NULL);
5072 	}
5073 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5074 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5075 			continue;
5076 #ifdef INET
5077 		if (addr->sa_family == AF_INET) {
5078 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5079 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5080 				/* found him. */
5081 				if (holds_lock == 0)
5082 					SCTP_IPI_ADDR_RUNLOCK();
5083 				return (sctp_ifap);
5084 				break;
5085 			}
5086 		}
5087 #endif
5088 #ifdef INET6
5089 		if (addr->sa_family == AF_INET6) {
5090 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5091 			    &sctp_ifap->address.sin6)) {
5092 				/* found him. */
5093 				if (holds_lock == 0)
5094 					SCTP_IPI_ADDR_RUNLOCK();
5095 				return (sctp_ifap);
5096 				break;
5097 			}
5098 		}
5099 #endif
5100 	}
5101 	if (holds_lock == 0)
5102 		SCTP_IPI_ADDR_RUNLOCK();
5103 	return (NULL);
5104 }
5105 
5106 static void
5107 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5108     uint32_t rwnd_req)
5109 {
5110 	/* User pulled some data, do we need a rwnd update? */
5111 	int r_unlocked = 0;
5112 	uint32_t dif, rwnd;
5113 	struct socket *so = NULL;
5114 
5115 	if (stcb == NULL)
5116 		return;
5117 
5118 	atomic_add_int(&stcb->asoc.refcnt, 1);
5119 
5120 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5121 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5122 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5123 		/* Pre-check If we are freeing no update */
5124 		goto no_lock;
5125 	}
5126 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5127 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5128 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5129 		goto out;
5130 	}
5131 	so = stcb->sctp_socket;
5132 	if (so == NULL) {
5133 		goto out;
5134 	}
5135 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5136 	/* Have you have freed enough to look */
5137 	*freed_so_far = 0;
5138 	/* Yep, its worth a look and the lock overhead */
5139 
5140 	/* Figure out what the rwnd would be */
5141 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5142 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5143 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5144 	} else {
5145 		dif = 0;
5146 	}
5147 	if (dif >= rwnd_req) {
5148 		if (hold_rlock) {
5149 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5150 			r_unlocked = 1;
5151 		}
5152 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5153 			/*
5154 			 * One last check before we allow the guy possibly
5155 			 * to get in. There is a race, where the guy has not
5156 			 * reached the gate. In that case
5157 			 */
5158 			goto out;
5159 		}
5160 		SCTP_TCB_LOCK(stcb);
5161 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5162 			/* No reports here */
5163 			SCTP_TCB_UNLOCK(stcb);
5164 			goto out;
5165 		}
5166 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5167 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5168 
5169 		sctp_chunk_output(stcb->sctp_ep, stcb,
5170 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5171 		/* make sure no timer is running */
5172 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5173 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5174 		SCTP_TCB_UNLOCK(stcb);
5175 	} else {
5176 		/* Update how much we have pending */
5177 		stcb->freed_by_sorcv_sincelast = dif;
5178 	}
5179 out:
5180 	if (so && r_unlocked && hold_rlock) {
5181 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5182 	}
5183 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5184 no_lock:
5185 	atomic_add_int(&stcb->asoc.refcnt, -1);
5186 	return;
5187 }
5188 
5189 int
5190 sctp_sorecvmsg(struct socket *so,
5191     struct uio *uio,
5192     struct mbuf **mp,
5193     struct sockaddr *from,
5194     int fromlen,
5195     int *msg_flags,
5196     struct sctp_sndrcvinfo *sinfo,
5197     int filling_sinfo)
5198 {
5199 	/*
5200 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5201 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5202 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5203 	 * On the way out we may send out any combination of:
5204 	 * MSG_NOTIFICATION MSG_EOR
5205 	 *
5206 	 */
5207 	struct sctp_inpcb *inp = NULL;
5208 	int my_len = 0;
5209 	int cp_len = 0, error = 0;
5210 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5211 	struct mbuf *m = NULL;
5212 	struct sctp_tcb *stcb = NULL;
5213 	int wakeup_read_socket = 0;
5214 	int freecnt_applied = 0;
5215 	int out_flags = 0, in_flags = 0;
5216 	int block_allowed = 1;
5217 	uint32_t freed_so_far = 0;
5218 	uint32_t copied_so_far = 0;
5219 	int in_eeor_mode = 0;
5220 	int no_rcv_needed = 0;
5221 	uint32_t rwnd_req = 0;
5222 	int hold_sblock = 0;
5223 	int hold_rlock = 0;
5224 	int slen = 0;
5225 	uint32_t held_length = 0;
5226 	int sockbuf_lock = 0;
5227 
5228 	if (uio == NULL) {
5229 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5230 		return (EINVAL);
5231 	}
5232 	if (msg_flags) {
5233 		in_flags = *msg_flags;
5234 		if (in_flags & MSG_PEEK)
5235 			SCTP_STAT_INCR(sctps_read_peeks);
5236 	} else {
5237 		in_flags = 0;
5238 	}
5239 	slen = uio->uio_resid;
5240 
5241 	/* Pull in and set up our int flags */
5242 	if (in_flags & MSG_OOB) {
5243 		/* Out of band's NOT supported */
5244 		return (EOPNOTSUPP);
5245 	}
5246 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5247 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5248 		return (EINVAL);
5249 	}
5250 	if ((in_flags & (MSG_DONTWAIT
5251 	    | MSG_NBIO
5252 	    )) ||
5253 	    SCTP_SO_IS_NBIO(so)) {
5254 		block_allowed = 0;
5255 	}
5256 	/* setup the endpoint */
5257 	inp = (struct sctp_inpcb *)so->so_pcb;
5258 	if (inp == NULL) {
5259 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5260 		return (EFAULT);
5261 	}
5262 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5263 	/* Must be at least a MTU's worth */
5264 	if (rwnd_req < SCTP_MIN_RWND)
5265 		rwnd_req = SCTP_MIN_RWND;
5266 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5267 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5268 		sctp_misc_ints(SCTP_SORECV_ENTER,
5269 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5270 	}
5271 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5272 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5273 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5274 	}
5275 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5276 	if (error) {
5277 		goto release_unlocked;
5278 	}
5279 	sockbuf_lock = 1;
5280 restart:
5281 
5282 
5283 restart_nosblocks:
5284 	if (hold_sblock == 0) {
5285 		SOCKBUF_LOCK(&so->so_rcv);
5286 		hold_sblock = 1;
5287 	}
5288 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5289 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5290 		goto out;
5291 	}
5292 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5293 		if (so->so_error) {
5294 			error = so->so_error;
5295 			if ((in_flags & MSG_PEEK) == 0)
5296 				so->so_error = 0;
5297 			goto out;
5298 		} else {
5299 			if (so->so_rcv.sb_cc == 0) {
5300 				/* indicate EOF */
5301 				error = 0;
5302 				goto out;
5303 			}
5304 		}
5305 	}
5306 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5307 		/* we need to wait for data */
5308 		if ((so->so_rcv.sb_cc == 0) &&
5309 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5310 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5311 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5312 				/*
5313 				 * For active open side clear flags for
5314 				 * re-use passive open is blocked by
5315 				 * connect.
5316 				 */
5317 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5318 					/*
5319 					 * You were aborted, passive side
5320 					 * always hits here
5321 					 */
5322 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5323 					error = ECONNRESET;
5324 				}
5325 				so->so_state &= ~(SS_ISCONNECTING |
5326 				    SS_ISDISCONNECTING |
5327 				    SS_ISCONFIRMING |
5328 				    SS_ISCONNECTED);
5329 				if (error == 0) {
5330 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5331 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5332 						error = ENOTCONN;
5333 					}
5334 				}
5335 				goto out;
5336 			}
5337 		}
5338 		error = sbwait(&so->so_rcv);
5339 		if (error) {
5340 			goto out;
5341 		}
5342 		held_length = 0;
5343 		goto restart_nosblocks;
5344 	} else if (so->so_rcv.sb_cc == 0) {
5345 		if (so->so_error) {
5346 			error = so->so_error;
5347 			if ((in_flags & MSG_PEEK) == 0)
5348 				so->so_error = 0;
5349 		} else {
5350 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5351 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5352 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5353 					/*
5354 					 * For active open side clear flags
5355 					 * for re-use passive open is
5356 					 * blocked by connect.
5357 					 */
5358 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5359 						/*
5360 						 * You were aborted, passive
5361 						 * side always hits here
5362 						 */
5363 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5364 						error = ECONNRESET;
5365 					}
5366 					so->so_state &= ~(SS_ISCONNECTING |
5367 					    SS_ISDISCONNECTING |
5368 					    SS_ISCONFIRMING |
5369 					    SS_ISCONNECTED);
5370 					if (error == 0) {
5371 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5372 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5373 							error = ENOTCONN;
5374 						}
5375 					}
5376 					goto out;
5377 				}
5378 			}
5379 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5380 			error = EWOULDBLOCK;
5381 		}
5382 		goto out;
5383 	}
5384 	if (hold_sblock == 1) {
5385 		SOCKBUF_UNLOCK(&so->so_rcv);
5386 		hold_sblock = 0;
5387 	}
5388 	/* we possibly have data we can read */
5389 	/* sa_ignore FREED_MEMORY */
5390 	control = TAILQ_FIRST(&inp->read_queue);
5391 	if (control == NULL) {
5392 		/*
5393 		 * This could be happening since the appender did the
5394 		 * increment but as not yet did the tailq insert onto the
5395 		 * read_queue
5396 		 */
5397 		if (hold_rlock == 0) {
5398 			SCTP_INP_READ_LOCK(inp);
5399 		}
5400 		control = TAILQ_FIRST(&inp->read_queue);
5401 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5402 #ifdef INVARIANTS
5403 			panic("Huh, its non zero and nothing on control?");
5404 #endif
5405 			so->so_rcv.sb_cc = 0;
5406 		}
5407 		SCTP_INP_READ_UNLOCK(inp);
5408 		hold_rlock = 0;
5409 		goto restart;
5410 	}
5411 	if ((control->length == 0) &&
5412 	    (control->do_not_ref_stcb)) {
5413 		/*
5414 		 * Clean up code for freeing assoc that left behind a
5415 		 * pdapi.. maybe a peer in EEOR that just closed after
5416 		 * sending and never indicated a EOR.
5417 		 */
5418 		if (hold_rlock == 0) {
5419 			hold_rlock = 1;
5420 			SCTP_INP_READ_LOCK(inp);
5421 		}
5422 		control->held_length = 0;
5423 		if (control->data) {
5424 			/* Hmm there is data here .. fix */
5425 			struct mbuf *m_tmp;
5426 			int cnt = 0;
5427 
5428 			m_tmp = control->data;
5429 			while (m_tmp) {
5430 				cnt += SCTP_BUF_LEN(m_tmp);
5431 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5432 					control->tail_mbuf = m_tmp;
5433 					control->end_added = 1;
5434 				}
5435 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5436 			}
5437 			control->length = cnt;
5438 		} else {
5439 			/* remove it */
5440 			TAILQ_REMOVE(&inp->read_queue, control, next);
5441 			/* Add back any hiddend data */
5442 			sctp_free_remote_addr(control->whoFrom);
5443 			sctp_free_a_readq(stcb, control);
5444 		}
5445 		if (hold_rlock) {
5446 			hold_rlock = 0;
5447 			SCTP_INP_READ_UNLOCK(inp);
5448 		}
5449 		goto restart;
5450 	}
5451 	if ((control->length == 0) &&
5452 	    (control->end_added == 1)) {
5453 		/*
5454 		 * Do we also need to check for (control->pdapi_aborted ==
5455 		 * 1)?
5456 		 */
5457 		if (hold_rlock == 0) {
5458 			hold_rlock = 1;
5459 			SCTP_INP_READ_LOCK(inp);
5460 		}
5461 		TAILQ_REMOVE(&inp->read_queue, control, next);
5462 		if (control->data) {
5463 #ifdef INVARIANTS
5464 			panic("control->data not null but control->length == 0");
5465 #else
5466 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5467 			sctp_m_freem(control->data);
5468 			control->data = NULL;
5469 #endif
5470 		}
5471 		if (control->aux_data) {
5472 			sctp_m_free(control->aux_data);
5473 			control->aux_data = NULL;
5474 		}
5475 		sctp_free_remote_addr(control->whoFrom);
5476 		sctp_free_a_readq(stcb, control);
5477 		if (hold_rlock) {
5478 			hold_rlock = 0;
5479 			SCTP_INP_READ_UNLOCK(inp);
5480 		}
5481 		goto restart;
5482 	}
5483 	if (control->length == 0) {
5484 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5485 		    (filling_sinfo)) {
5486 			/* find a more suitable one then this */
5487 			ctl = TAILQ_NEXT(control, next);
5488 			while (ctl) {
5489 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5490 				    (ctl->some_taken ||
5491 				    (ctl->spec_flags & M_NOTIFICATION) ||
5492 				    ((ctl->do_not_ref_stcb == 0) &&
5493 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5494 				    ) {
5495 					/*-
5496 					 * If we have a different TCB next, and there is data
5497 					 * present. If we have already taken some (pdapi), OR we can
5498 					 * ref the tcb and no delivery as started on this stream, we
5499 					 * take it. Note we allow a notification on a different
5500 					 * assoc to be delivered..
5501 					 */
5502 					control = ctl;
5503 					goto found_one;
5504 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5505 					    (ctl->length) &&
5506 					    ((ctl->some_taken) ||
5507 					    ((ctl->do_not_ref_stcb == 0) &&
5508 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5509 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5510 					/*-
5511 					 * If we have the same tcb, and there is data present, and we
5512 					 * have the strm interleave feature present. Then if we have
5513 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5514 					 * not started a delivery for this stream, we can take it.
5515 					 * Note we do NOT allow a notificaiton on the same assoc to
5516 					 * be delivered.
5517 					 */
5518 					control = ctl;
5519 					goto found_one;
5520 				}
5521 				ctl = TAILQ_NEXT(ctl, next);
5522 			}
5523 		}
5524 		/*
5525 		 * if we reach here, not suitable replacement is available
5526 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5527 		 * into the our held count, and its time to sleep again.
5528 		 */
5529 		held_length = so->so_rcv.sb_cc;
5530 		control->held_length = so->so_rcv.sb_cc;
5531 		goto restart;
5532 	}
5533 	/* Clear the held length since there is something to read */
5534 	control->held_length = 0;
5535 	if (hold_rlock) {
5536 		SCTP_INP_READ_UNLOCK(inp);
5537 		hold_rlock = 0;
5538 	}
5539 found_one:
5540 	/*
5541 	 * If we reach here, control has a some data for us to read off.
5542 	 * Note that stcb COULD be NULL.
5543 	 */
5544 	control->some_taken++;
5545 	if (hold_sblock) {
5546 		SOCKBUF_UNLOCK(&so->so_rcv);
5547 		hold_sblock = 0;
5548 	}
5549 	stcb = control->stcb;
5550 	if (stcb) {
5551 		if ((control->do_not_ref_stcb == 0) &&
5552 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5553 			if (freecnt_applied == 0)
5554 				stcb = NULL;
5555 		} else if (control->do_not_ref_stcb == 0) {
5556 			/* you can't free it on me please */
5557 			/*
5558 			 * The lock on the socket buffer protects us so the
5559 			 * free code will stop. But since we used the
5560 			 * socketbuf lock and the sender uses the tcb_lock
5561 			 * to increment, we need to use the atomic add to
5562 			 * the refcnt
5563 			 */
5564 			if (freecnt_applied) {
5565 #ifdef INVARIANTS
5566 				panic("refcnt already incremented");
5567 #else
5568 				SCTP_PRINTF("refcnt already incremented?\n");
5569 #endif
5570 			} else {
5571 				atomic_add_int(&stcb->asoc.refcnt, 1);
5572 				freecnt_applied = 1;
5573 			}
5574 			/*
5575 			 * Setup to remember how much we have not yet told
5576 			 * the peer our rwnd has opened up. Note we grab the
5577 			 * value from the tcb from last time. Note too that
5578 			 * sack sending clears this when a sack is sent,
5579 			 * which is fine. Once we hit the rwnd_req, we then
5580 			 * will go to the sctp_user_rcvd() that will not
5581 			 * lock until it KNOWs it MUST send a WUP-SACK.
5582 			 */
5583 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5584 			stcb->freed_by_sorcv_sincelast = 0;
5585 		}
5586 	}
5587 	if (stcb &&
5588 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5589 	    control->do_not_ref_stcb == 0) {
5590 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5591 	}
5592 	/* First lets get off the sinfo and sockaddr info */
5593 	if ((sinfo) && filling_sinfo) {
5594 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5595 		nxt = TAILQ_NEXT(control, next);
5596 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5597 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5598 			struct sctp_extrcvinfo *s_extra;
5599 
5600 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5601 			if ((nxt) &&
5602 			    (nxt->length)) {
5603 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5604 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5605 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5606 				}
5607 				if (nxt->spec_flags & M_NOTIFICATION) {
5608 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5609 				}
5610 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5611 				s_extra->sreinfo_next_length = nxt->length;
5612 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5613 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5614 				if (nxt->tail_mbuf != NULL) {
5615 					if (nxt->end_added) {
5616 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5617 					}
5618 				}
5619 			} else {
5620 				/*
5621 				 * we explicitly 0 this, since the memcpy
5622 				 * got some other things beyond the older
5623 				 * sinfo_ that is on the control's structure
5624 				 * :-D
5625 				 */
5626 				nxt = NULL;
5627 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5628 				s_extra->sreinfo_next_aid = 0;
5629 				s_extra->sreinfo_next_length = 0;
5630 				s_extra->sreinfo_next_ppid = 0;
5631 				s_extra->sreinfo_next_stream = 0;
5632 			}
5633 		}
5634 		/*
5635 		 * update off the real current cum-ack, if we have an stcb.
5636 		 */
5637 		if ((control->do_not_ref_stcb == 0) && stcb)
5638 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5639 		/*
5640 		 * mask off the high bits, we keep the actual chunk bits in
5641 		 * there.
5642 		 */
5643 		sinfo->sinfo_flags &= 0x00ff;
5644 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5645 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5646 		}
5647 	}
5648 #ifdef SCTP_ASOCLOG_OF_TSNS
5649 	{
5650 		int index, newindex;
5651 		struct sctp_pcbtsn_rlog *entry;
5652 
5653 		do {
5654 			index = inp->readlog_index;
5655 			newindex = index + 1;
5656 			if (newindex >= SCTP_READ_LOG_SIZE) {
5657 				newindex = 0;
5658 			}
5659 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5660 		entry = &inp->readlog[index];
5661 		entry->vtag = control->sinfo_assoc_id;
5662 		entry->strm = control->sinfo_stream;
5663 		entry->seq = control->sinfo_ssn;
5664 		entry->sz = control->length;
5665 		entry->flgs = control->sinfo_flags;
5666 	}
5667 #endif
5668 	if ((fromlen > 0) && (from != NULL)) {
5669 		union sctp_sockstore store;
5670 		size_t len;
5671 
5672 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5673 #ifdef INET6
5674 		case AF_INET6:
5675 			len = sizeof(struct sockaddr_in6);
5676 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5677 			store.sin6.sin6_port = control->port_from;
5678 			break;
5679 #endif
5680 #ifdef INET
5681 		case AF_INET:
5682 #ifdef INET6
5683 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5684 				len = sizeof(struct sockaddr_in6);
5685 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5686 				    &store.sin6);
5687 				store.sin6.sin6_port = control->port_from;
5688 			} else {
5689 				len = sizeof(struct sockaddr_in);
5690 				store.sin = control->whoFrom->ro._l_addr.sin;
5691 				store.sin.sin_port = control->port_from;
5692 			}
5693 #else
5694 			len = sizeof(struct sockaddr_in);
5695 			store.sin = control->whoFrom->ro._l_addr.sin;
5696 			store.sin.sin_port = control->port_from;
5697 #endif
5698 			break;
5699 #endif
5700 		default:
5701 			len = 0;
5702 			break;
5703 		}
5704 		memcpy(from, &store, min((size_t)fromlen, len));
5705 #ifdef INET6
5706 		{
5707 			struct sockaddr_in6 lsa6, *from6;
5708 
5709 			from6 = (struct sockaddr_in6 *)from;
5710 			sctp_recover_scope_mac(from6, (&lsa6));
5711 		}
5712 #endif
5713 	}
5714 	/* now copy out what data we can */
5715 	if (mp == NULL) {
5716 		/* copy out each mbuf in the chain up to length */
5717 get_more_data:
5718 		m = control->data;
5719 		while (m) {
5720 			/* Move out all we can */
5721 			cp_len = (int)uio->uio_resid;
5722 			my_len = (int)SCTP_BUF_LEN(m);
5723 			if (cp_len > my_len) {
5724 				/* not enough in this buf */
5725 				cp_len = my_len;
5726 			}
5727 			if (hold_rlock) {
5728 				SCTP_INP_READ_UNLOCK(inp);
5729 				hold_rlock = 0;
5730 			}
5731 			if (cp_len > 0)
5732 				error = uiomove(mtod(m, char *), cp_len, uio);
5733 			/* re-read */
5734 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5735 				goto release;
5736 			}
5737 			if ((control->do_not_ref_stcb == 0) && stcb &&
5738 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5739 				no_rcv_needed = 1;
5740 			}
5741 			if (error) {
5742 				/* error we are out of here */
5743 				goto release;
5744 			}
5745 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5746 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5747 			    ((control->end_added == 0) ||
5748 			    (control->end_added &&
5749 			    (TAILQ_NEXT(control, next) == NULL)))
5750 			    ) {
5751 				SCTP_INP_READ_LOCK(inp);
5752 				hold_rlock = 1;
5753 			}
5754 			if (cp_len == SCTP_BUF_LEN(m)) {
5755 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5756 				    (control->end_added)) {
5757 					out_flags |= MSG_EOR;
5758 					if ((control->do_not_ref_stcb == 0) &&
5759 					    (control->stcb != NULL) &&
5760 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5761 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5762 				}
5763 				if (control->spec_flags & M_NOTIFICATION) {
5764 					out_flags |= MSG_NOTIFICATION;
5765 				}
5766 				/* we ate up the mbuf */
5767 				if (in_flags & MSG_PEEK) {
5768 					/* just looking */
5769 					m = SCTP_BUF_NEXT(m);
5770 					copied_so_far += cp_len;
5771 				} else {
5772 					/* dispose of the mbuf */
5773 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5774 						sctp_sblog(&so->so_rcv,
5775 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5776 					}
5777 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5778 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5779 						sctp_sblog(&so->so_rcv,
5780 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5781 					}
5782 					copied_so_far += cp_len;
5783 					freed_so_far += cp_len;
5784 					freed_so_far += MSIZE;
5785 					atomic_subtract_int(&control->length, cp_len);
5786 					control->data = sctp_m_free(m);
5787 					m = control->data;
5788 					/*
5789 					 * been through it all, must hold sb
5790 					 * lock ok to null tail
5791 					 */
5792 					if (control->data == NULL) {
5793 #ifdef INVARIANTS
5794 						if ((control->end_added == 0) ||
5795 						    (TAILQ_NEXT(control, next) == NULL)) {
5796 							/*
5797 							 * If the end is not
5798 							 * added, OR the
5799 							 * next is NOT null
5800 							 * we MUST have the
5801 							 * lock.
5802 							 */
5803 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5804 								panic("Hmm we don't own the lock?");
5805 							}
5806 						}
5807 #endif
5808 						control->tail_mbuf = NULL;
5809 #ifdef INVARIANTS
5810 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5811 							panic("end_added, nothing left and no MSG_EOR");
5812 						}
5813 #endif
5814 					}
5815 				}
5816 			} else {
5817 				/* Do we need to trim the mbuf? */
5818 				if (control->spec_flags & M_NOTIFICATION) {
5819 					out_flags |= MSG_NOTIFICATION;
5820 				}
5821 				if ((in_flags & MSG_PEEK) == 0) {
5822 					SCTP_BUF_RESV_UF(m, cp_len);
5823 					SCTP_BUF_LEN(m) -= cp_len;
5824 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5825 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5826 					}
5827 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5828 					if ((control->do_not_ref_stcb == 0) &&
5829 					    stcb) {
5830 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5831 					}
5832 					copied_so_far += cp_len;
5833 					freed_so_far += cp_len;
5834 					freed_so_far += MSIZE;
5835 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5836 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5837 						    SCTP_LOG_SBRESULT, 0);
5838 					}
5839 					atomic_subtract_int(&control->length, cp_len);
5840 				} else {
5841 					copied_so_far += cp_len;
5842 				}
5843 			}
5844 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5845 				break;
5846 			}
5847 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5848 			    (control->do_not_ref_stcb == 0) &&
5849 			    (freed_so_far >= rwnd_req)) {
5850 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5851 			}
5852 		}		/* end while(m) */
5853 		/*
5854 		 * At this point we have looked at it all and we either have
5855 		 * a MSG_EOR/or read all the user wants... <OR>
5856 		 * control->length == 0.
5857 		 */
5858 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5859 			/* we are done with this control */
5860 			if (control->length == 0) {
5861 				if (control->data) {
5862 #ifdef INVARIANTS
5863 					panic("control->data not null at read eor?");
5864 #else
5865 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5866 					sctp_m_freem(control->data);
5867 					control->data = NULL;
5868 #endif
5869 				}
5870 		done_with_control:
5871 				if (TAILQ_NEXT(control, next) == NULL) {
5872 					/*
5873 					 * If we don't have a next we need a
5874 					 * lock, if there is a next
5875 					 * interrupt is filling ahead of us
5876 					 * and we don't need a lock to
5877 					 * remove this guy (which is the
5878 					 * head of the queue).
5879 					 */
5880 					if (hold_rlock == 0) {
5881 						SCTP_INP_READ_LOCK(inp);
5882 						hold_rlock = 1;
5883 					}
5884 				}
5885 				TAILQ_REMOVE(&inp->read_queue, control, next);
5886 				/* Add back any hiddend data */
5887 				if (control->held_length) {
5888 					held_length = 0;
5889 					control->held_length = 0;
5890 					wakeup_read_socket = 1;
5891 				}
5892 				if (control->aux_data) {
5893 					sctp_m_free(control->aux_data);
5894 					control->aux_data = NULL;
5895 				}
5896 				no_rcv_needed = control->do_not_ref_stcb;
5897 				sctp_free_remote_addr(control->whoFrom);
5898 				control->data = NULL;
5899 				sctp_free_a_readq(stcb, control);
5900 				control = NULL;
5901 				if ((freed_so_far >= rwnd_req) &&
5902 				    (no_rcv_needed == 0))
5903 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5904 
5905 			} else {
5906 				/*
5907 				 * The user did not read all of this
5908 				 * message, turn off the returned MSG_EOR
5909 				 * since we are leaving more behind on the
5910 				 * control to read.
5911 				 */
5912 #ifdef INVARIANTS
5913 				if (control->end_added &&
5914 				    (control->data == NULL) &&
5915 				    (control->tail_mbuf == NULL)) {
5916 					panic("Gak, control->length is corrupt?");
5917 				}
5918 #endif
5919 				no_rcv_needed = control->do_not_ref_stcb;
5920 				out_flags &= ~MSG_EOR;
5921 			}
5922 		}
5923 		if (out_flags & MSG_EOR) {
5924 			goto release;
5925 		}
5926 		if ((uio->uio_resid == 0) ||
5927 		    ((in_eeor_mode) &&
5928 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5929 			goto release;
5930 		}
5931 		/*
5932 		 * If I hit here the receiver wants more and this message is
5933 		 * NOT done (pd-api). So two questions. Can we block? if not
5934 		 * we are done. Did the user NOT set MSG_WAITALL?
5935 		 */
5936 		if (block_allowed == 0) {
5937 			goto release;
5938 		}
5939 		/*
5940 		 * We need to wait for more data a few things: - We don't
5941 		 * sbunlock() so we don't get someone else reading. - We
5942 		 * must be sure to account for the case where what is added
5943 		 * is NOT to our control when we wakeup.
5944 		 */
5945 
5946 		/*
5947 		 * Do we need to tell the transport a rwnd update might be
5948 		 * needed before we go to sleep?
5949 		 */
5950 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5951 		    ((freed_so_far >= rwnd_req) &&
5952 		    (control->do_not_ref_stcb == 0) &&
5953 		    (no_rcv_needed == 0))) {
5954 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5955 		}
5956 wait_some_more:
5957 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5958 			goto release;
5959 		}
5960 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5961 			goto release;
5962 
5963 		if (hold_rlock == 1) {
5964 			SCTP_INP_READ_UNLOCK(inp);
5965 			hold_rlock = 0;
5966 		}
5967 		if (hold_sblock == 0) {
5968 			SOCKBUF_LOCK(&so->so_rcv);
5969 			hold_sblock = 1;
5970 		}
5971 		if ((copied_so_far) && (control->length == 0) &&
5972 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5973 			goto release;
5974 		}
5975 		if (so->so_rcv.sb_cc <= control->held_length) {
5976 			error = sbwait(&so->so_rcv);
5977 			if (error) {
5978 				goto release;
5979 			}
5980 			control->held_length = 0;
5981 		}
5982 		if (hold_sblock) {
5983 			SOCKBUF_UNLOCK(&so->so_rcv);
5984 			hold_sblock = 0;
5985 		}
5986 		if (control->length == 0) {
5987 			/* still nothing here */
5988 			if (control->end_added == 1) {
5989 				/* he aborted, or is done i.e.did a shutdown */
5990 				out_flags |= MSG_EOR;
5991 				if (control->pdapi_aborted) {
5992 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5993 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5994 
5995 					out_flags |= MSG_TRUNC;
5996 				} else {
5997 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5998 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5999 				}
6000 				goto done_with_control;
6001 			}
6002 			if (so->so_rcv.sb_cc > held_length) {
6003 				control->held_length = so->so_rcv.sb_cc;
6004 				held_length = 0;
6005 			}
6006 			goto wait_some_more;
6007 		} else if (control->data == NULL) {
6008 			/*
6009 			 * we must re-sync since data is probably being
6010 			 * added
6011 			 */
6012 			SCTP_INP_READ_LOCK(inp);
6013 			if ((control->length > 0) && (control->data == NULL)) {
6014 				/*
6015 				 * big trouble.. we have the lock and its
6016 				 * corrupt?
6017 				 */
6018 #ifdef INVARIANTS
6019 				panic("Impossible data==NULL length !=0");
6020 #endif
6021 				out_flags |= MSG_EOR;
6022 				out_flags |= MSG_TRUNC;
6023 				control->length = 0;
6024 				SCTP_INP_READ_UNLOCK(inp);
6025 				goto done_with_control;
6026 			}
6027 			SCTP_INP_READ_UNLOCK(inp);
6028 			/* We will fall around to get more data */
6029 		}
6030 		goto get_more_data;
6031 	} else {
6032 		/*-
6033 		 * Give caller back the mbuf chain,
6034 		 * store in uio_resid the length
6035 		 */
6036 		wakeup_read_socket = 0;
6037 		if ((control->end_added == 0) ||
6038 		    (TAILQ_NEXT(control, next) == NULL)) {
6039 			/* Need to get rlock */
6040 			if (hold_rlock == 0) {
6041 				SCTP_INP_READ_LOCK(inp);
6042 				hold_rlock = 1;
6043 			}
6044 		}
6045 		if (control->end_added) {
6046 			out_flags |= MSG_EOR;
6047 			if ((control->do_not_ref_stcb == 0) &&
6048 			    (control->stcb != NULL) &&
6049 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6050 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6051 		}
6052 		if (control->spec_flags & M_NOTIFICATION) {
6053 			out_flags |= MSG_NOTIFICATION;
6054 		}
6055 		uio->uio_resid = control->length;
6056 		*mp = control->data;
6057 		m = control->data;
6058 		while (m) {
6059 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6060 				sctp_sblog(&so->so_rcv,
6061 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6062 			}
6063 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6064 			freed_so_far += SCTP_BUF_LEN(m);
6065 			freed_so_far += MSIZE;
6066 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6067 				sctp_sblog(&so->so_rcv,
6068 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6069 			}
6070 			m = SCTP_BUF_NEXT(m);
6071 		}
6072 		control->data = control->tail_mbuf = NULL;
6073 		control->length = 0;
6074 		if (out_flags & MSG_EOR) {
6075 			/* Done with this control */
6076 			goto done_with_control;
6077 		}
6078 	}
6079 release:
6080 	if (hold_rlock == 1) {
6081 		SCTP_INP_READ_UNLOCK(inp);
6082 		hold_rlock = 0;
6083 	}
6084 	if (hold_sblock == 1) {
6085 		SOCKBUF_UNLOCK(&so->so_rcv);
6086 		hold_sblock = 0;
6087 	}
6088 	sbunlock(&so->so_rcv);
6089 	sockbuf_lock = 0;
6090 
6091 release_unlocked:
6092 	if (hold_sblock) {
6093 		SOCKBUF_UNLOCK(&so->so_rcv);
6094 		hold_sblock = 0;
6095 	}
6096 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6097 		if ((freed_so_far >= rwnd_req) &&
6098 		    (control && (control->do_not_ref_stcb == 0)) &&
6099 		    (no_rcv_needed == 0))
6100 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6101 	}
6102 out:
6103 	if (msg_flags) {
6104 		*msg_flags = out_flags;
6105 	}
6106 	if (((out_flags & MSG_EOR) == 0) &&
6107 	    ((in_flags & MSG_PEEK) == 0) &&
6108 	    (sinfo) &&
6109 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6110 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6111 		struct sctp_extrcvinfo *s_extra;
6112 
6113 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6114 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6115 	}
6116 	if (hold_rlock == 1) {
6117 		SCTP_INP_READ_UNLOCK(inp);
6118 	}
6119 	if (hold_sblock) {
6120 		SOCKBUF_UNLOCK(&so->so_rcv);
6121 	}
6122 	if (sockbuf_lock) {
6123 		sbunlock(&so->so_rcv);
6124 	}
6125 	if (freecnt_applied) {
6126 		/*
6127 		 * The lock on the socket buffer protects us so the free
6128 		 * code will stop. But since we used the socketbuf lock and
6129 		 * the sender uses the tcb_lock to increment, we need to use
6130 		 * the atomic add to the refcnt.
6131 		 */
6132 		if (stcb == NULL) {
6133 #ifdef INVARIANTS
6134 			panic("stcb for refcnt has gone NULL?");
6135 			goto stage_left;
6136 #else
6137 			goto stage_left;
6138 #endif
6139 		}
6140 		atomic_add_int(&stcb->asoc.refcnt, -1);
6141 		/* Save the value back for next time */
6142 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6143 	}
6144 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6145 		if (stcb) {
6146 			sctp_misc_ints(SCTP_SORECV_DONE,
6147 			    freed_so_far,
6148 			    ((uio) ? (slen - uio->uio_resid) : slen),
6149 			    stcb->asoc.my_rwnd,
6150 			    so->so_rcv.sb_cc);
6151 		} else {
6152 			sctp_misc_ints(SCTP_SORECV_DONE,
6153 			    freed_so_far,
6154 			    ((uio) ? (slen - uio->uio_resid) : slen),
6155 			    0,
6156 			    so->so_rcv.sb_cc);
6157 		}
6158 	}
6159 stage_left:
6160 	if (wakeup_read_socket) {
6161 		sctp_sorwakeup(inp, so);
6162 	}
6163 	return (error);
6164 }
6165 
6166 
6167 #ifdef SCTP_MBUF_LOGGING
6168 struct mbuf *
6169 sctp_m_free(struct mbuf *m)
6170 {
6171 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6172 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6173 	}
6174 	return (m_free(m));
6175 }
6176 
6177 void
6178 sctp_m_freem(struct mbuf *mb)
6179 {
6180 	while (mb != NULL)
6181 		mb = sctp_m_free(mb);
6182 }
6183 
6184 #endif
6185 
6186 int
6187 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6188 {
6189 	/*
6190 	 * Given a local address. For all associations that holds the
6191 	 * address, request a peer-set-primary.
6192 	 */
6193 	struct sctp_ifa *ifa;
6194 	struct sctp_laddr *wi;
6195 
6196 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6197 	if (ifa == NULL) {
6198 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6199 		return (EADDRNOTAVAIL);
6200 	}
6201 	/*
6202 	 * Now that we have the ifa we must awaken the iterator with this
6203 	 * message.
6204 	 */
6205 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6206 	if (wi == NULL) {
6207 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6208 		return (ENOMEM);
6209 	}
6210 	/* Now incr the count and int wi structure */
6211 	SCTP_INCR_LADDR_COUNT();
6212 	bzero(wi, sizeof(*wi));
6213 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6214 	wi->ifa = ifa;
6215 	wi->action = SCTP_SET_PRIM_ADDR;
6216 	atomic_add_int(&ifa->refcount, 1);
6217 
6218 	/* Now add it to the work queue */
6219 	SCTP_WQ_ADDR_LOCK();
6220 	/*
6221 	 * Should this really be a tailq? As it is we will process the
6222 	 * newest first :-0
6223 	 */
6224 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6225 	SCTP_WQ_ADDR_UNLOCK();
6226 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6227 	    (struct sctp_inpcb *)NULL,
6228 	    (struct sctp_tcb *)NULL,
6229 	    (struct sctp_nets *)NULL);
6230 	return (0);
6231 }
6232 
6233 
6234 int
6235 sctp_soreceive(struct socket *so,
6236     struct sockaddr **psa,
6237     struct uio *uio,
6238     struct mbuf **mp0,
6239     struct mbuf **controlp,
6240     int *flagsp)
6241 {
6242 	int error, fromlen;
6243 	uint8_t sockbuf[256];
6244 	struct sockaddr *from;
6245 	struct sctp_extrcvinfo sinfo;
6246 	int filling_sinfo = 1;
6247 	struct sctp_inpcb *inp;
6248 
6249 	inp = (struct sctp_inpcb *)so->so_pcb;
6250 	/* pickup the assoc we are reading from */
6251 	if (inp == NULL) {
6252 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6253 		return (EINVAL);
6254 	}
6255 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6256 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6257 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6258 	    (controlp == NULL)) {
6259 		/* user does not want the sndrcv ctl */
6260 		filling_sinfo = 0;
6261 	}
6262 	if (psa) {
6263 		from = (struct sockaddr *)sockbuf;
6264 		fromlen = sizeof(sockbuf);
6265 		from->sa_len = 0;
6266 	} else {
6267 		from = NULL;
6268 		fromlen = 0;
6269 	}
6270 
6271 	if (filling_sinfo) {
6272 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6273 	}
6274 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6275 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6276 	if (controlp != NULL) {
6277 		/* copy back the sinfo in a CMSG format */
6278 		if (filling_sinfo)
6279 			*controlp = sctp_build_ctl_nchunk(inp,
6280 			    (struct sctp_sndrcvinfo *)&sinfo);
6281 		else
6282 			*controlp = NULL;
6283 	}
6284 	if (psa) {
6285 		/* copy back the address info */
6286 		if (from && from->sa_len) {
6287 			*psa = sodupsockaddr(from, M_NOWAIT);
6288 		} else {
6289 			*psa = NULL;
6290 		}
6291 	}
6292 	return (error);
6293 }
6294 
6295 
6296 
6297 
6298 
6299 int
6300 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6301     int totaddr, int *error)
6302 {
6303 	int added = 0;
6304 	int i;
6305 	struct sctp_inpcb *inp;
6306 	struct sockaddr *sa;
6307 	size_t incr = 0;
6308 
6309 #ifdef INET
6310 	struct sockaddr_in *sin;
6311 
6312 #endif
6313 #ifdef INET6
6314 	struct sockaddr_in6 *sin6;
6315 
6316 #endif
6317 
6318 	sa = addr;
6319 	inp = stcb->sctp_ep;
6320 	*error = 0;
6321 	for (i = 0; i < totaddr; i++) {
6322 		switch (sa->sa_family) {
6323 #ifdef INET
6324 		case AF_INET:
6325 			incr = sizeof(struct sockaddr_in);
6326 			sin = (struct sockaddr_in *)sa;
6327 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6328 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6329 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6330 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6331 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6332 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6333 				*error = EINVAL;
6334 				goto out_now;
6335 			}
6336 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6337 				/* assoc gone no un-lock */
6338 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6339 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6340 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6341 				*error = ENOBUFS;
6342 				goto out_now;
6343 			}
6344 			added++;
6345 			break;
6346 #endif
6347 #ifdef INET6
6348 		case AF_INET6:
6349 			incr = sizeof(struct sockaddr_in6);
6350 			sin6 = (struct sockaddr_in6 *)sa;
6351 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6352 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6353 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6354 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6355 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6356 				*error = EINVAL;
6357 				goto out_now;
6358 			}
6359 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6360 				/* assoc gone no un-lock */
6361 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6362 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6363 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6364 				*error = ENOBUFS;
6365 				goto out_now;
6366 			}
6367 			added++;
6368 			break;
6369 #endif
6370 		default:
6371 			break;
6372 		}
6373 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6374 	}
6375 out_now:
6376 	return (added);
6377 }
6378 
6379 struct sctp_tcb *
6380 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6381     int *totaddr, int *num_v4, int *num_v6, int *error,
6382     int limit, int *bad_addr)
6383 {
6384 	struct sockaddr *sa;
6385 	struct sctp_tcb *stcb = NULL;
6386 	size_t incr, at, i;
6387 
6388 	at = incr = 0;
6389 	sa = addr;
6390 
6391 	*error = *num_v6 = *num_v4 = 0;
6392 	/* account and validate addresses */
6393 	for (i = 0; i < (size_t)*totaddr; i++) {
6394 		switch (sa->sa_family) {
6395 #ifdef INET
6396 		case AF_INET:
6397 			(*num_v4) += 1;
6398 			incr = sizeof(struct sockaddr_in);
6399 			if (sa->sa_len != incr) {
6400 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6401 				*error = EINVAL;
6402 				*bad_addr = 1;
6403 				return (NULL);
6404 			}
6405 			break;
6406 #endif
6407 #ifdef INET6
6408 		case AF_INET6:
6409 			{
6410 				struct sockaddr_in6 *sin6;
6411 
6412 				sin6 = (struct sockaddr_in6 *)sa;
6413 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6414 					/* Must be non-mapped for connectx */
6415 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 					*error = EINVAL;
6417 					*bad_addr = 1;
6418 					return (NULL);
6419 				}
6420 				(*num_v6) += 1;
6421 				incr = sizeof(struct sockaddr_in6);
6422 				if (sa->sa_len != incr) {
6423 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6424 					*error = EINVAL;
6425 					*bad_addr = 1;
6426 					return (NULL);
6427 				}
6428 				break;
6429 			}
6430 #endif
6431 		default:
6432 			*totaddr = i;
6433 			/* we are done */
6434 			break;
6435 		}
6436 		if (i == (size_t)*totaddr) {
6437 			break;
6438 		}
6439 		SCTP_INP_INCR_REF(inp);
6440 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6441 		if (stcb != NULL) {
6442 			/* Already have or am bring up an association */
6443 			return (stcb);
6444 		} else {
6445 			SCTP_INP_DECR_REF(inp);
6446 		}
6447 		if ((at + incr) > (size_t)limit) {
6448 			*totaddr = i;
6449 			break;
6450 		}
6451 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6452 	}
6453 	return ((struct sctp_tcb *)NULL);
6454 }
6455 
6456 /*
6457  * sctp_bindx(ADD) for one address.
6458  * assumes all arguments are valid/checked by caller.
6459  */
6460 void
6461 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6462     struct sockaddr *sa, sctp_assoc_t assoc_id,
6463     uint32_t vrf_id, int *error, void *p)
6464 {
6465 	struct sockaddr *addr_touse;
6466 
6467 #if defined(INET) && defined(INET6)
6468 	struct sockaddr_in sin;
6469 
6470 #endif
6471 
6472 	/* see if we're bound all already! */
6473 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6474 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6475 		*error = EINVAL;
6476 		return;
6477 	}
6478 	addr_touse = sa;
6479 #ifdef INET6
6480 	if (sa->sa_family == AF_INET6) {
6481 #ifdef INET
6482 		struct sockaddr_in6 *sin6;
6483 
6484 #endif
6485 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6486 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487 			*error = EINVAL;
6488 			return;
6489 		}
6490 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6491 			/* can only bind v6 on PF_INET6 sockets */
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		}
6496 #ifdef INET
6497 		sin6 = (struct sockaddr_in6 *)addr_touse;
6498 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6499 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6500 			    SCTP_IPV6_V6ONLY(inp)) {
6501 				/* can't bind v4-mapped on PF_INET sockets */
6502 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6503 				*error = EINVAL;
6504 				return;
6505 			}
6506 			in6_sin6_2_sin(&sin, sin6);
6507 			addr_touse = (struct sockaddr *)&sin;
6508 		}
6509 #endif
6510 	}
6511 #endif
6512 #ifdef INET
6513 	if (sa->sa_family == AF_INET) {
6514 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6515 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6516 			*error = EINVAL;
6517 			return;
6518 		}
6519 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6520 		    SCTP_IPV6_V6ONLY(inp)) {
6521 			/* can't bind v4 on PF_INET sockets */
6522 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6523 			*error = EINVAL;
6524 			return;
6525 		}
6526 	}
6527 #endif
6528 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6529 		if (p == NULL) {
6530 			/* Can't get proc for Net/Open BSD */
6531 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6532 			*error = EINVAL;
6533 			return;
6534 		}
6535 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6536 		return;
6537 	}
6538 	/*
6539 	 * No locks required here since bind and mgmt_ep_sa all do their own
6540 	 * locking. If we do something for the FIX: below we may need to
6541 	 * lock in that case.
6542 	 */
6543 	if (assoc_id == 0) {
6544 		/* add the address */
6545 		struct sctp_inpcb *lep;
6546 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6547 
6548 		/* validate the incoming port */
6549 		if ((lsin->sin_port != 0) &&
6550 		    (lsin->sin_port != inp->sctp_lport)) {
6551 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6552 			*error = EINVAL;
6553 			return;
6554 		} else {
6555 			/* user specified 0 port, set it to existing port */
6556 			lsin->sin_port = inp->sctp_lport;
6557 		}
6558 
6559 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6560 		if (lep != NULL) {
6561 			/*
6562 			 * We must decrement the refcount since we have the
6563 			 * ep already and are binding. No remove going on
6564 			 * here.
6565 			 */
6566 			SCTP_INP_DECR_REF(lep);
6567 		}
6568 		if (lep == inp) {
6569 			/* already bound to it.. ok */
6570 			return;
6571 		} else if (lep == NULL) {
6572 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6573 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6574 			    SCTP_ADD_IP_ADDRESS,
6575 			    vrf_id, NULL);
6576 		} else {
6577 			*error = EADDRINUSE;
6578 		}
6579 		if (*error)
6580 			return;
6581 	} else {
6582 		/*
6583 		 * FIX: decide whether we allow assoc based bindx
6584 		 */
6585 	}
6586 }
6587 
6588 /*
6589  * sctp_bindx(DELETE) for one address.
6590  * assumes all arguments are valid/checked by caller.
6591  */
6592 void
6593 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6594     struct sockaddr *sa, sctp_assoc_t assoc_id,
6595     uint32_t vrf_id, int *error)
6596 {
6597 	struct sockaddr *addr_touse;
6598 
6599 #if defined(INET) && defined(INET6)
6600 	struct sockaddr_in sin;
6601 
6602 #endif
6603 
6604 	/* see if we're bound all already! */
6605 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6606 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6607 		*error = EINVAL;
6608 		return;
6609 	}
6610 	addr_touse = sa;
6611 #ifdef INET6
6612 	if (sa->sa_family == AF_INET6) {
6613 #ifdef INET
6614 		struct sockaddr_in6 *sin6;
6615 
6616 #endif
6617 
6618 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6619 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6620 			*error = EINVAL;
6621 			return;
6622 		}
6623 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6624 			/* can only bind v6 on PF_INET6 sockets */
6625 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626 			*error = EINVAL;
6627 			return;
6628 		}
6629 #ifdef INET
6630 		sin6 = (struct sockaddr_in6 *)addr_touse;
6631 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6632 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6633 			    SCTP_IPV6_V6ONLY(inp)) {
6634 				/* can't bind mapped-v4 on PF_INET sockets */
6635 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6636 				*error = EINVAL;
6637 				return;
6638 			}
6639 			in6_sin6_2_sin(&sin, sin6);
6640 			addr_touse = (struct sockaddr *)&sin;
6641 		}
6642 #endif
6643 	}
6644 #endif
6645 #ifdef INET
6646 	if (sa->sa_family == AF_INET) {
6647 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6648 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6649 			*error = EINVAL;
6650 			return;
6651 		}
6652 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6653 		    SCTP_IPV6_V6ONLY(inp)) {
6654 			/* can't bind v4 on PF_INET sockets */
6655 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6656 			*error = EINVAL;
6657 			return;
6658 		}
6659 	}
6660 #endif
6661 	/*
6662 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6663 	 * below is ever changed we may need to lock before calling
6664 	 * association level binding.
6665 	 */
6666 	if (assoc_id == 0) {
6667 		/* delete the address */
6668 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6669 		    SCTP_DEL_IP_ADDRESS,
6670 		    vrf_id, NULL);
6671 	} else {
6672 		/*
6673 		 * FIX: decide whether we allow assoc based bindx
6674 		 */
6675 	}
6676 }
6677 
6678 /*
6679  * returns the valid local address count for an assoc, taking into account
6680  * all scoping rules
6681  */
6682 int
6683 sctp_local_addr_count(struct sctp_tcb *stcb)
6684 {
6685 	int loopback_scope;
6686 
6687 #if defined(INET)
6688 	int ipv4_local_scope, ipv4_addr_legal;
6689 
6690 #endif
6691 #if defined (INET6)
6692 	int local_scope, site_scope, ipv6_addr_legal;
6693 
6694 #endif
6695 	struct sctp_vrf *vrf;
6696 	struct sctp_ifn *sctp_ifn;
6697 	struct sctp_ifa *sctp_ifa;
6698 	int count = 0;
6699 
6700 	/* Turn on all the appropriate scopes */
6701 	loopback_scope = stcb->asoc.scope.loopback_scope;
6702 #if defined(INET)
6703 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6704 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6705 #endif
6706 #if defined(INET6)
6707 	local_scope = stcb->asoc.scope.local_scope;
6708 	site_scope = stcb->asoc.scope.site_scope;
6709 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6710 #endif
6711 	SCTP_IPI_ADDR_RLOCK();
6712 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6713 	if (vrf == NULL) {
6714 		/* no vrf, no addresses */
6715 		SCTP_IPI_ADDR_RUNLOCK();
6716 		return (0);
6717 	}
6718 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6719 		/*
6720 		 * bound all case: go through all ifns on the vrf
6721 		 */
6722 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6723 			if ((loopback_scope == 0) &&
6724 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6725 				continue;
6726 			}
6727 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6728 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6729 					continue;
6730 				switch (sctp_ifa->address.sa.sa_family) {
6731 #ifdef INET
6732 				case AF_INET:
6733 					if (ipv4_addr_legal) {
6734 						struct sockaddr_in *sin;
6735 
6736 						sin = &sctp_ifa->address.sin;
6737 						if (sin->sin_addr.s_addr == 0) {
6738 							/*
6739 							 * skip unspecified
6740 							 * addrs
6741 							 */
6742 							continue;
6743 						}
6744 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6745 						    &sin->sin_addr) != 0) {
6746 							continue;
6747 						}
6748 						if ((ipv4_local_scope == 0) &&
6749 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6750 							continue;
6751 						}
6752 						/* count this one */
6753 						count++;
6754 					} else {
6755 						continue;
6756 					}
6757 					break;
6758 #endif
6759 #ifdef INET6
6760 				case AF_INET6:
6761 					if (ipv6_addr_legal) {
6762 						struct sockaddr_in6 *sin6;
6763 
6764 						sin6 = &sctp_ifa->address.sin6;
6765 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6766 							continue;
6767 						}
6768 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6769 						    &sin6->sin6_addr) != 0) {
6770 							continue;
6771 						}
6772 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6773 							if (local_scope == 0)
6774 								continue;
6775 							if (sin6->sin6_scope_id == 0) {
6776 								if (sa6_recoverscope(sin6) != 0)
6777 									/*
6778 									 *
6779 									 * bad
6780 									 *
6781 									 * li
6782 									 * nk
6783 									 *
6784 									 * loc
6785 									 * al
6786 									 *
6787 									 * add
6788 									 * re
6789 									 * ss
6790 									 * */
6791 									continue;
6792 							}
6793 						}
6794 						if ((site_scope == 0) &&
6795 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6796 							continue;
6797 						}
6798 						/* count this one */
6799 						count++;
6800 					}
6801 					break;
6802 #endif
6803 				default:
6804 					/* TSNH */
6805 					break;
6806 				}
6807 			}
6808 		}
6809 	} else {
6810 		/*
6811 		 * subset bound case
6812 		 */
6813 		struct sctp_laddr *laddr;
6814 
6815 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6816 		    sctp_nxt_addr) {
6817 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6818 				continue;
6819 			}
6820 			/* count this one */
6821 			count++;
6822 		}
6823 	}
6824 	SCTP_IPI_ADDR_RUNLOCK();
6825 	return (count);
6826 }
6827 
6828 #if defined(SCTP_LOCAL_TRACE_BUF)
6829 
6830 void
6831 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6832 {
6833 	uint32_t saveindex, newindex;
6834 
6835 	do {
6836 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6837 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6838 			newindex = 1;
6839 		} else {
6840 			newindex = saveindex + 1;
6841 		}
6842 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6843 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6844 		saveindex = 0;
6845 	}
6846 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6847 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6848 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6849 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6850 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6851 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6852 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6853 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6854 }
6855 
6856 #endif
6857 static void
6858 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored,
6859     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6860 {
6861 	struct ip *iph;
6862 
6863 #ifdef INET6
6864 	struct ip6_hdr *ip6;
6865 
6866 #endif
6867 	struct mbuf *sp, *last;
6868 	struct udphdr *uhdr;
6869 	uint16_t port;
6870 
6871 	if ((m->m_flags & M_PKTHDR) == 0) {
6872 		/* Can't handle one that is not a pkt hdr */
6873 		goto out;
6874 	}
6875 	/* Pull the src port */
6876 	iph = mtod(m, struct ip *);
6877 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6878 	port = uhdr->uh_sport;
6879 	/*
6880 	 * Split out the mbuf chain. Leave the IP header in m, place the
6881 	 * rest in the sp.
6882 	 */
6883 	sp = m_split(m, off, M_NOWAIT);
6884 	if (sp == NULL) {
6885 		/* Gak, drop packet, we can't do a split */
6886 		goto out;
6887 	}
6888 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6889 		/* Gak, packet can't have an SCTP header in it - too small */
6890 		m_freem(sp);
6891 		goto out;
6892 	}
6893 	/* Now pull up the UDP header and SCTP header together */
6894 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6895 	if (sp == NULL) {
6896 		/* Gak pullup failed */
6897 		goto out;
6898 	}
6899 	/* Trim out the UDP header */
6900 	m_adj(sp, sizeof(struct udphdr));
6901 
6902 	/* Now reconstruct the mbuf chain */
6903 	for (last = m; last->m_next; last = last->m_next);
6904 	last->m_next = sp;
6905 	m->m_pkthdr.len += sp->m_pkthdr.len;
6906 	iph = mtod(m, struct ip *);
6907 	switch (iph->ip_v) {
6908 #ifdef INET
6909 	case IPVERSION:
6910 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6911 		sctp_input_with_port(m, off, port);
6912 		break;
6913 #endif
6914 #ifdef INET6
6915 	case IPV6_VERSION >> 4:
6916 		ip6 = mtod(m, struct ip6_hdr *);
6917 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6918 		sctp6_input_with_port(&m, &off, port);
6919 		break;
6920 #endif
6921 	default:
6922 		goto out;
6923 		break;
6924 	}
6925 	return;
6926 out:
6927 	m_freem(m);
6928 }
6929 
6930 void
6931 sctp_over_udp_stop(void)
6932 {
6933 	/*
6934 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6935 	 * for writting!
6936 	 */
6937 #ifdef INET
6938 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6939 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6940 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6941 	}
6942 #endif
6943 #ifdef INET6
6944 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6945 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6946 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6947 	}
6948 #endif
6949 }
6950 
6951 int
6952 sctp_over_udp_start(void)
6953 {
6954 	uint16_t port;
6955 	int ret;
6956 
6957 #ifdef INET
6958 	struct sockaddr_in sin;
6959 
6960 #endif
6961 #ifdef INET6
6962 	struct sockaddr_in6 sin6;
6963 
6964 #endif
6965 	/*
6966 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6967 	 * for writting!
6968 	 */
6969 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6970 	if (ntohs(port) == 0) {
6971 		/* Must have a port set */
6972 		return (EINVAL);
6973 	}
6974 #ifdef INET
6975 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6976 		/* Already running -- must stop first */
6977 		return (EALREADY);
6978 	}
6979 #endif
6980 #ifdef INET6
6981 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6982 		/* Already running -- must stop first */
6983 		return (EALREADY);
6984 	}
6985 #endif
6986 #ifdef INET
6987 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6988 	    SOCK_DGRAM, IPPROTO_UDP,
6989 	    curthread->td_ucred, curthread))) {
6990 		sctp_over_udp_stop();
6991 		return (ret);
6992 	}
6993 	/* Call the special UDP hook. */
6994 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6995 	    sctp_recv_udp_tunneled_packet, NULL))) {
6996 		sctp_over_udp_stop();
6997 		return (ret);
6998 	}
6999 	/* Ok, we have a socket, bind it to the port. */
7000 	memset(&sin, 0, sizeof(struct sockaddr_in));
7001 	sin.sin_len = sizeof(struct sockaddr_in);
7002 	sin.sin_family = AF_INET;
7003 	sin.sin_port = htons(port);
7004 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7005 	    (struct sockaddr *)&sin, curthread))) {
7006 		sctp_over_udp_stop();
7007 		return (ret);
7008 	}
7009 #endif
7010 #ifdef INET6
7011 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7012 	    SOCK_DGRAM, IPPROTO_UDP,
7013 	    curthread->td_ucred, curthread))) {
7014 		sctp_over_udp_stop();
7015 		return (ret);
7016 	}
7017 	/* Call the special UDP hook. */
7018 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7019 	    sctp_recv_udp_tunneled_packet, NULL))) {
7020 		sctp_over_udp_stop();
7021 		return (ret);
7022 	}
7023 	/* Ok, we have a socket, bind it to the port. */
7024 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7025 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7026 	sin6.sin6_family = AF_INET6;
7027 	sin6.sin6_port = htons(port);
7028 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7029 	    (struct sockaddr *)&sin6, curthread))) {
7030 		sctp_over_udp_stop();
7031 		return (ret);
7032 	}
7033 #endif
7034 	return (0);
7035 }
7036