xref: /freebsd/sys/netinet/sctputil.c (revision 9fe48b8076ae9c6dc486d713c468ff03ec056eda)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int
897 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
898     uint32_t override_tag, uint32_t vrf_id)
899 {
900 	struct sctp_association *asoc;
901 
902 	/*
903 	 * Anything set to zero is taken care of by the allocation routine's
904 	 * bzero
905 	 */
906 
907 	/*
908 	 * Up front select what scoping to apply on addresses I tell my peer
909 	 * Not sure what to do with these right now, we will need to come up
910 	 * with a way to set them. We may need to pass them through from the
911 	 * caller in the sctp_aloc_assoc() function.
912 	 */
913 	int i;
914 
915 #if defined(SCTP_DETAILED_STR_STATS)
916 	int j;
917 
918 #endif
919 
920 	asoc = &stcb->asoc;
921 	/* init all variables to a known value. */
922 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
923 	asoc->max_burst = inp->sctp_ep.max_burst;
924 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
925 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
926 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
927 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
928 	asoc->ecn_supported = inp->ecn_supported;
929 	asoc->prsctp_supported = inp->prsctp_supported;
930 	asoc->auth_supported = inp->auth_supported;
931 	asoc->asconf_supported = inp->asconf_supported;
932 	asoc->reconfig_supported = inp->reconfig_supported;
933 	asoc->nrsack_supported = inp->nrsack_supported;
934 	asoc->pktdrop_supported = inp->pktdrop_supported;
935 	asoc->sctp_cmt_pf = (uint8_t) 0;
936 	asoc->sctp_frag_point = inp->sctp_frag_point;
937 	asoc->sctp_features = inp->sctp_features;
938 	asoc->default_dscp = inp->sctp_ep.default_dscp;
939 	asoc->max_cwnd = inp->max_cwnd;
940 #ifdef INET6
941 	if (inp->sctp_ep.default_flowlabel) {
942 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
943 	} else {
944 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
945 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
946 			asoc->default_flowlabel &= 0x000fffff;
947 			asoc->default_flowlabel |= 0x80000000;
948 		} else {
949 			asoc->default_flowlabel = 0;
950 		}
951 	}
952 #endif
953 	asoc->sb_send_resv = 0;
954 	if (override_tag) {
955 		asoc->my_vtag = override_tag;
956 	} else {
957 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
958 	}
959 	/* Get the nonce tags */
960 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
961 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
962 	asoc->vrf_id = vrf_id;
963 
964 #ifdef SCTP_ASOCLOG_OF_TSNS
965 	asoc->tsn_in_at = 0;
966 	asoc->tsn_out_at = 0;
967 	asoc->tsn_in_wrapped = 0;
968 	asoc->tsn_out_wrapped = 0;
969 	asoc->cumack_log_at = 0;
970 	asoc->cumack_log_atsnt = 0;
971 #endif
972 #ifdef SCTP_FS_SPEC_LOG
973 	asoc->fs_index = 0;
974 #endif
975 	asoc->refcnt = 0;
976 	asoc->assoc_up_sent = 0;
977 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
978 	    sctp_select_initial_TSN(&inp->sctp_ep);
979 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
980 	/* we are optimisitic here */
981 	asoc->peer_supports_nat = 0;
982 	asoc->sent_queue_retran_cnt = 0;
983 
984 	/* for CMT */
985 	asoc->last_net_cmt_send_started = NULL;
986 
987 	/* This will need to be adjusted */
988 	asoc->last_acked_seq = asoc->init_seq_number - 1;
989 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
990 	asoc->asconf_seq_in = asoc->last_acked_seq;
991 
992 	/* here we are different, we hold the next one we expect */
993 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
994 
995 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
996 	asoc->initial_rto = inp->sctp_ep.initial_rto;
997 
998 	asoc->max_init_times = inp->sctp_ep.max_init_times;
999 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1000 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1001 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1002 	asoc->free_chunk_cnt = 0;
1003 
1004 	asoc->iam_blocking = 0;
1005 	asoc->context = inp->sctp_context;
1006 	asoc->local_strreset_support = inp->local_strreset_support;
1007 	asoc->def_send = inp->def_send;
1008 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1009 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1010 	asoc->pr_sctp_cnt = 0;
1011 	asoc->total_output_queue_size = 0;
1012 
1013 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1014 		asoc->scope.ipv6_addr_legal = 1;
1015 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1016 			asoc->scope.ipv4_addr_legal = 1;
1017 		} else {
1018 			asoc->scope.ipv4_addr_legal = 0;
1019 		}
1020 	} else {
1021 		asoc->scope.ipv6_addr_legal = 0;
1022 		asoc->scope.ipv4_addr_legal = 1;
1023 	}
1024 
1025 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1026 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1027 
1028 	asoc->smallest_mtu = inp->sctp_frag_point;
1029 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1030 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1031 
1032 	asoc->locked_on_sending = NULL;
1033 	asoc->stream_locked_on = 0;
1034 	asoc->ecn_echo_cnt_onq = 0;
1035 	asoc->stream_locked = 0;
1036 
1037 	asoc->send_sack = 1;
1038 
1039 	LIST_INIT(&asoc->sctp_restricted_addrs);
1040 
1041 	TAILQ_INIT(&asoc->nets);
1042 	TAILQ_INIT(&asoc->pending_reply_queue);
1043 	TAILQ_INIT(&asoc->asconf_ack_sent);
1044 	/* Setup to fill the hb random cache at first HB */
1045 	asoc->hb_random_idx = 4;
1046 
1047 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1048 
1049 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1050 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1051 
1052 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1053 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1054 
1055 	/*
1056 	 * Now the stream parameters, here we allocate space for all streams
1057 	 * that we request by default.
1058 	 */
1059 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1060 	    inp->sctp_ep.pre_open_stream_count;
1061 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1062 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1063 	    SCTP_M_STRMO);
1064 	if (asoc->strmout == NULL) {
1065 		/* big trouble no memory */
1066 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1067 		return (ENOMEM);
1068 	}
1069 	for (i = 0; i < asoc->streamoutcnt; i++) {
1070 		/*
1071 		 * inbound side must be set to 0xffff, also NOTE when we get
1072 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1073 		 * count (streamoutcnt) but first check if we sent to any of
1074 		 * the upper streams that were dropped (if some were). Those
1075 		 * that were dropped must be notified to the upper layer as
1076 		 * failed to send.
1077 		 */
1078 		asoc->strmout[i].next_sequence_send = 0x0;
1079 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1080 		asoc->strmout[i].chunks_on_queues = 0;
1081 #if defined(SCTP_DETAILED_STR_STATS)
1082 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1083 			asoc->strmout[i].abandoned_sent[j] = 0;
1084 			asoc->strmout[i].abandoned_unsent[j] = 0;
1085 		}
1086 #else
1087 		asoc->strmout[i].abandoned_sent[0] = 0;
1088 		asoc->strmout[i].abandoned_unsent[0] = 0;
1089 #endif
1090 		asoc->strmout[i].stream_no = i;
1091 		asoc->strmout[i].last_msg_incomplete = 0;
1092 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1093 	}
1094 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1095 
1096 	/* Now the mapping array */
1097 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1098 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1099 	    SCTP_M_MAP);
1100 	if (asoc->mapping_array == NULL) {
1101 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1102 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1103 		return (ENOMEM);
1104 	}
1105 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1106 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1107 	    SCTP_M_MAP);
1108 	if (asoc->nr_mapping_array == NULL) {
1109 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1110 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1111 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1112 		return (ENOMEM);
1113 	}
1114 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1115 
1116 	/* Now the init of the other outqueues */
1117 	TAILQ_INIT(&asoc->free_chunks);
1118 	TAILQ_INIT(&asoc->control_send_queue);
1119 	TAILQ_INIT(&asoc->asconf_send_queue);
1120 	TAILQ_INIT(&asoc->send_queue);
1121 	TAILQ_INIT(&asoc->sent_queue);
1122 	TAILQ_INIT(&asoc->reasmqueue);
1123 	TAILQ_INIT(&asoc->resetHead);
1124 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1125 	TAILQ_INIT(&asoc->asconf_queue);
1126 	/* authentication fields */
1127 	asoc->authinfo.random = NULL;
1128 	asoc->authinfo.active_keyid = 0;
1129 	asoc->authinfo.assoc_key = NULL;
1130 	asoc->authinfo.assoc_keyid = 0;
1131 	asoc->authinfo.recv_key = NULL;
1132 	asoc->authinfo.recv_keyid = 0;
1133 	LIST_INIT(&asoc->shared_keys);
1134 	asoc->marked_retrans = 0;
1135 	asoc->port = inp->sctp_ep.port;
1136 	asoc->timoinit = 0;
1137 	asoc->timodata = 0;
1138 	asoc->timosack = 0;
1139 	asoc->timoshutdown = 0;
1140 	asoc->timoheartbeat = 0;
1141 	asoc->timocookie = 0;
1142 	asoc->timoshutdownack = 0;
1143 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1144 	asoc->discontinuity_time = asoc->start_time;
1145 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1146 		asoc->abandoned_unsent[i] = 0;
1147 		asoc->abandoned_sent[i] = 0;
1148 	}
1149 	/*
1150 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1151 	 * freed later when the association is freed.
1152 	 */
1153 	return (0);
1154 }
1155 
1156 void
1157 sctp_print_mapping_array(struct sctp_association *asoc)
1158 {
1159 	unsigned int i, limit;
1160 
1161 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1162 	    asoc->mapping_array_size,
1163 	    asoc->mapping_array_base_tsn,
1164 	    asoc->cumulative_tsn,
1165 	    asoc->highest_tsn_inside_map,
1166 	    asoc->highest_tsn_inside_nr_map);
1167 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1168 		if (asoc->mapping_array[limit - 1] != 0) {
1169 			break;
1170 		}
1171 	}
1172 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1173 	for (i = 0; i < limit; i++) {
1174 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1175 	}
1176 	if (limit % 16)
1177 		SCTP_PRINTF("\n");
1178 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1179 		if (asoc->nr_mapping_array[limit - 1]) {
1180 			break;
1181 		}
1182 	}
1183 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1184 	for (i = 0; i < limit; i++) {
1185 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1186 	}
1187 	if (limit % 16)
1188 		SCTP_PRINTF("\n");
1189 }
1190 
1191 int
1192 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1193 {
1194 	/* mapping array needs to grow */
1195 	uint8_t *new_array1, *new_array2;
1196 	uint32_t new_size;
1197 
1198 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1199 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1200 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1201 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1202 		/* can't get more, forget it */
1203 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1204 		if (new_array1) {
1205 			SCTP_FREE(new_array1, SCTP_M_MAP);
1206 		}
1207 		if (new_array2) {
1208 			SCTP_FREE(new_array2, SCTP_M_MAP);
1209 		}
1210 		return (-1);
1211 	}
1212 	memset(new_array1, 0, new_size);
1213 	memset(new_array2, 0, new_size);
1214 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1215 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1216 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1217 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1218 	asoc->mapping_array = new_array1;
1219 	asoc->nr_mapping_array = new_array2;
1220 	asoc->mapping_array_size = new_size;
1221 	return (0);
1222 }
1223 
1224 
1225 static void
1226 sctp_iterator_work(struct sctp_iterator *it)
1227 {
1228 	int iteration_count = 0;
1229 	int inp_skip = 0;
1230 	int first_in = 1;
1231 	struct sctp_inpcb *tinp;
1232 
1233 	SCTP_INP_INFO_RLOCK();
1234 	SCTP_ITERATOR_LOCK();
1235 	if (it->inp) {
1236 		SCTP_INP_RLOCK(it->inp);
1237 		SCTP_INP_DECR_REF(it->inp);
1238 	}
1239 	if (it->inp == NULL) {
1240 		/* iterator is complete */
1241 done_with_iterator:
1242 		SCTP_ITERATOR_UNLOCK();
1243 		SCTP_INP_INFO_RUNLOCK();
1244 		if (it->function_atend != NULL) {
1245 			(*it->function_atend) (it->pointer, it->val);
1246 		}
1247 		SCTP_FREE(it, SCTP_M_ITER);
1248 		return;
1249 	}
1250 select_a_new_ep:
1251 	if (first_in) {
1252 		first_in = 0;
1253 	} else {
1254 		SCTP_INP_RLOCK(it->inp);
1255 	}
1256 	while (((it->pcb_flags) &&
1257 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1258 	    ((it->pcb_features) &&
1259 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1260 		/* endpoint flags or features don't match, so keep looking */
1261 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1262 			SCTP_INP_RUNLOCK(it->inp);
1263 			goto done_with_iterator;
1264 		}
1265 		tinp = it->inp;
1266 		it->inp = LIST_NEXT(it->inp, sctp_list);
1267 		SCTP_INP_RUNLOCK(tinp);
1268 		if (it->inp == NULL) {
1269 			goto done_with_iterator;
1270 		}
1271 		SCTP_INP_RLOCK(it->inp);
1272 	}
1273 	/* now go through each assoc which is in the desired state */
1274 	if (it->done_current_ep == 0) {
1275 		if (it->function_inp != NULL)
1276 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1277 		it->done_current_ep = 1;
1278 	}
1279 	if (it->stcb == NULL) {
1280 		/* run the per instance function */
1281 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1282 	}
1283 	if ((inp_skip) || it->stcb == NULL) {
1284 		if (it->function_inp_end != NULL) {
1285 			inp_skip = (*it->function_inp_end) (it->inp,
1286 			    it->pointer,
1287 			    it->val);
1288 		}
1289 		SCTP_INP_RUNLOCK(it->inp);
1290 		goto no_stcb;
1291 	}
1292 	while (it->stcb) {
1293 		SCTP_TCB_LOCK(it->stcb);
1294 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1295 			/* not in the right state... keep looking */
1296 			SCTP_TCB_UNLOCK(it->stcb);
1297 			goto next_assoc;
1298 		}
1299 		/* see if we have limited out the iterator loop */
1300 		iteration_count++;
1301 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1302 			/* Pause to let others grab the lock */
1303 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1304 			SCTP_TCB_UNLOCK(it->stcb);
1305 			SCTP_INP_INCR_REF(it->inp);
1306 			SCTP_INP_RUNLOCK(it->inp);
1307 			SCTP_ITERATOR_UNLOCK();
1308 			SCTP_INP_INFO_RUNLOCK();
1309 			SCTP_INP_INFO_RLOCK();
1310 			SCTP_ITERATOR_LOCK();
1311 			if (sctp_it_ctl.iterator_flags) {
1312 				/* We won't be staying here */
1313 				SCTP_INP_DECR_REF(it->inp);
1314 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1315 				if (sctp_it_ctl.iterator_flags &
1316 				    SCTP_ITERATOR_STOP_CUR_IT) {
1317 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1318 					goto done_with_iterator;
1319 				}
1320 				if (sctp_it_ctl.iterator_flags &
1321 				    SCTP_ITERATOR_STOP_CUR_INP) {
1322 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1323 					goto no_stcb;
1324 				}
1325 				/* If we reach here huh? */
1326 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1327 				    sctp_it_ctl.iterator_flags);
1328 				sctp_it_ctl.iterator_flags = 0;
1329 			}
1330 			SCTP_INP_RLOCK(it->inp);
1331 			SCTP_INP_DECR_REF(it->inp);
1332 			SCTP_TCB_LOCK(it->stcb);
1333 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1334 			iteration_count = 0;
1335 		}
1336 		/* run function on this one */
1337 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1338 
1339 		/*
1340 		 * we lie here, it really needs to have its own type but
1341 		 * first I must verify that this won't effect things :-0
1342 		 */
1343 		if (it->no_chunk_output == 0)
1344 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1345 
1346 		SCTP_TCB_UNLOCK(it->stcb);
1347 next_assoc:
1348 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1349 		if (it->stcb == NULL) {
1350 			/* Run last function */
1351 			if (it->function_inp_end != NULL) {
1352 				inp_skip = (*it->function_inp_end) (it->inp,
1353 				    it->pointer,
1354 				    it->val);
1355 			}
1356 		}
1357 	}
1358 	SCTP_INP_RUNLOCK(it->inp);
1359 no_stcb:
1360 	/* done with all assocs on this endpoint, move on to next endpoint */
1361 	it->done_current_ep = 0;
1362 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1363 		it->inp = NULL;
1364 	} else {
1365 		it->inp = LIST_NEXT(it->inp, sctp_list);
1366 	}
1367 	if (it->inp == NULL) {
1368 		goto done_with_iterator;
1369 	}
1370 	goto select_a_new_ep;
1371 }
1372 
1373 void
1374 sctp_iterator_worker(void)
1375 {
1376 	struct sctp_iterator *it, *nit;
1377 
1378 	/* This function is called with the WQ lock in place */
1379 
1380 	sctp_it_ctl.iterator_running = 1;
1381 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1382 		sctp_it_ctl.cur_it = it;
1383 		/* now lets work on this one */
1384 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1385 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1386 		CURVNET_SET(it->vn);
1387 		sctp_iterator_work(it);
1388 		sctp_it_ctl.cur_it = NULL;
1389 		CURVNET_RESTORE();
1390 		SCTP_IPI_ITERATOR_WQ_LOCK();
1391 		/* sa_ignore FREED_MEMORY */
1392 	}
1393 	sctp_it_ctl.iterator_running = 0;
1394 	return;
1395 }
1396 
1397 
1398 static void
1399 sctp_handle_addr_wq(void)
1400 {
1401 	/* deal with the ADDR wq from the rtsock calls */
1402 	struct sctp_laddr *wi, *nwi;
1403 	struct sctp_asconf_iterator *asc;
1404 
1405 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1406 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1407 	if (asc == NULL) {
1408 		/* Try later, no memory */
1409 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1410 		    (struct sctp_inpcb *)NULL,
1411 		    (struct sctp_tcb *)NULL,
1412 		    (struct sctp_nets *)NULL);
1413 		return;
1414 	}
1415 	LIST_INIT(&asc->list_of_work);
1416 	asc->cnt = 0;
1417 
1418 	SCTP_WQ_ADDR_LOCK();
1419 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1420 		LIST_REMOVE(wi, sctp_nxt_addr);
1421 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1422 		asc->cnt++;
1423 	}
1424 	SCTP_WQ_ADDR_UNLOCK();
1425 
1426 	if (asc->cnt == 0) {
1427 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1428 	} else {
1429 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1430 		    sctp_asconf_iterator_stcb,
1431 		    NULL,	/* No ep end for boundall */
1432 		    SCTP_PCB_FLAGS_BOUNDALL,
1433 		    SCTP_PCB_ANY_FEATURES,
1434 		    SCTP_ASOC_ANY_STATE,
1435 		    (void *)asc, 0,
1436 		    sctp_asconf_iterator_end, NULL, 0);
1437 	}
1438 }
1439 
1440 void
1441 sctp_timeout_handler(void *t)
1442 {
1443 	struct sctp_inpcb *inp;
1444 	struct sctp_tcb *stcb;
1445 	struct sctp_nets *net;
1446 	struct sctp_timer *tmr;
1447 
1448 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1449 	struct socket *so;
1450 
1451 #endif
1452 	int did_output, type;
1453 
1454 	tmr = (struct sctp_timer *)t;
1455 	inp = (struct sctp_inpcb *)tmr->ep;
1456 	stcb = (struct sctp_tcb *)tmr->tcb;
1457 	net = (struct sctp_nets *)tmr->net;
1458 	CURVNET_SET((struct vnet *)tmr->vnet);
1459 	did_output = 1;
1460 
1461 #ifdef SCTP_AUDITING_ENABLED
1462 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1463 	sctp_auditing(3, inp, stcb, net);
1464 #endif
1465 
1466 	/* sanity checks... */
1467 	if (tmr->self != (void *)tmr) {
1468 		/*
1469 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1470 		 * (void *)tmr);
1471 		 */
1472 		CURVNET_RESTORE();
1473 		return;
1474 	}
1475 	tmr->stopped_from = 0xa001;
1476 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1477 		/*
1478 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1479 		 * tmr->type);
1480 		 */
1481 		CURVNET_RESTORE();
1482 		return;
1483 	}
1484 	tmr->stopped_from = 0xa002;
1485 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1486 		CURVNET_RESTORE();
1487 		return;
1488 	}
1489 	/* if this is an iterator timeout, get the struct and clear inp */
1490 	tmr->stopped_from = 0xa003;
1491 	type = tmr->type;
1492 	if (inp) {
1493 		SCTP_INP_INCR_REF(inp);
1494 		if ((inp->sctp_socket == NULL) &&
1495 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1496 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1499 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1500 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1501 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1502 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1503 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1504 		    ) {
1505 			SCTP_INP_DECR_REF(inp);
1506 			CURVNET_RESTORE();
1507 			return;
1508 		}
1509 	}
1510 	tmr->stopped_from = 0xa004;
1511 	if (stcb) {
1512 		atomic_add_int(&stcb->asoc.refcnt, 1);
1513 		if (stcb->asoc.state == 0) {
1514 			atomic_add_int(&stcb->asoc.refcnt, -1);
1515 			if (inp) {
1516 				SCTP_INP_DECR_REF(inp);
1517 			}
1518 			CURVNET_RESTORE();
1519 			return;
1520 		}
1521 	}
1522 	tmr->stopped_from = 0xa005;
1523 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1524 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1525 		if (inp) {
1526 			SCTP_INP_DECR_REF(inp);
1527 		}
1528 		if (stcb) {
1529 			atomic_add_int(&stcb->asoc.refcnt, -1);
1530 		}
1531 		CURVNET_RESTORE();
1532 		return;
1533 	}
1534 	tmr->stopped_from = 0xa006;
1535 
1536 	if (stcb) {
1537 		SCTP_TCB_LOCK(stcb);
1538 		atomic_add_int(&stcb->asoc.refcnt, -1);
1539 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1540 		    ((stcb->asoc.state == 0) ||
1541 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1542 			SCTP_TCB_UNLOCK(stcb);
1543 			if (inp) {
1544 				SCTP_INP_DECR_REF(inp);
1545 			}
1546 			CURVNET_RESTORE();
1547 			return;
1548 		}
1549 	}
1550 	/* record in stopped what t-o occured */
1551 	tmr->stopped_from = tmr->type;
1552 
1553 	/* mark as being serviced now */
1554 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1555 		/*
1556 		 * Callout has been rescheduled.
1557 		 */
1558 		goto get_out;
1559 	}
1560 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1561 		/*
1562 		 * Not active, so no action.
1563 		 */
1564 		goto get_out;
1565 	}
1566 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1567 
1568 	/* call the handler for the appropriate timer type */
1569 	switch (tmr->type) {
1570 	case SCTP_TIMER_TYPE_ZERO_COPY:
1571 		if (inp == NULL) {
1572 			break;
1573 		}
1574 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1575 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1576 		}
1577 		break;
1578 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1579 		if (inp == NULL) {
1580 			break;
1581 		}
1582 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1583 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1584 		}
1585 		break;
1586 	case SCTP_TIMER_TYPE_ADDR_WQ:
1587 		sctp_handle_addr_wq();
1588 		break;
1589 	case SCTP_TIMER_TYPE_SEND:
1590 		if ((stcb == NULL) || (inp == NULL)) {
1591 			break;
1592 		}
1593 		SCTP_STAT_INCR(sctps_timodata);
1594 		stcb->asoc.timodata++;
1595 		stcb->asoc.num_send_timers_up--;
1596 		if (stcb->asoc.num_send_timers_up < 0) {
1597 			stcb->asoc.num_send_timers_up = 0;
1598 		}
1599 		SCTP_TCB_LOCK_ASSERT(stcb);
1600 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1601 			/* no need to unlock on tcb its gone */
1602 
1603 			goto out_decr;
1604 		}
1605 		SCTP_TCB_LOCK_ASSERT(stcb);
1606 #ifdef SCTP_AUDITING_ENABLED
1607 		sctp_auditing(4, inp, stcb, net);
1608 #endif
1609 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1610 		if ((stcb->asoc.num_send_timers_up == 0) &&
1611 		    (stcb->asoc.sent_queue_cnt > 0)) {
1612 			struct sctp_tmit_chunk *chk;
1613 
1614 			/*
1615 			 * safeguard. If there on some on the sent queue
1616 			 * somewhere but no timers running something is
1617 			 * wrong... so we start a timer on the first chunk
1618 			 * on the send queue on whatever net it is sent to.
1619 			 */
1620 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1621 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1622 			    chk->whoTo);
1623 		}
1624 		break;
1625 	case SCTP_TIMER_TYPE_INIT:
1626 		if ((stcb == NULL) || (inp == NULL)) {
1627 			break;
1628 		}
1629 		SCTP_STAT_INCR(sctps_timoinit);
1630 		stcb->asoc.timoinit++;
1631 		if (sctp_t1init_timer(inp, stcb, net)) {
1632 			/* no need to unlock on tcb its gone */
1633 			goto out_decr;
1634 		}
1635 		/* We do output but not here */
1636 		did_output = 0;
1637 		break;
1638 	case SCTP_TIMER_TYPE_RECV:
1639 		if ((stcb == NULL) || (inp == NULL)) {
1640 			break;
1641 		}
1642 		SCTP_STAT_INCR(sctps_timosack);
1643 		stcb->asoc.timosack++;
1644 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1645 #ifdef SCTP_AUDITING_ENABLED
1646 		sctp_auditing(4, inp, stcb, net);
1647 #endif
1648 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1649 		break;
1650 	case SCTP_TIMER_TYPE_SHUTDOWN:
1651 		if ((stcb == NULL) || (inp == NULL)) {
1652 			break;
1653 		}
1654 		if (sctp_shutdown_timer(inp, stcb, net)) {
1655 			/* no need to unlock on tcb its gone */
1656 			goto out_decr;
1657 		}
1658 		SCTP_STAT_INCR(sctps_timoshutdown);
1659 		stcb->asoc.timoshutdown++;
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1664 		break;
1665 	case SCTP_TIMER_TYPE_HEARTBEAT:
1666 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1667 			break;
1668 		}
1669 		SCTP_STAT_INCR(sctps_timoheartbeat);
1670 		stcb->asoc.timoheartbeat++;
1671 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1672 			/* no need to unlock on tcb its gone */
1673 			goto out_decr;
1674 		}
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1679 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1680 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1681 		}
1682 		break;
1683 	case SCTP_TIMER_TYPE_COOKIE:
1684 		if ((stcb == NULL) || (inp == NULL)) {
1685 			break;
1686 		}
1687 		if (sctp_cookie_timer(inp, stcb, net)) {
1688 			/* no need to unlock on tcb its gone */
1689 			goto out_decr;
1690 		}
1691 		SCTP_STAT_INCR(sctps_timocookie);
1692 		stcb->asoc.timocookie++;
1693 #ifdef SCTP_AUDITING_ENABLED
1694 		sctp_auditing(4, inp, stcb, net);
1695 #endif
1696 		/*
1697 		 * We consider T3 and Cookie timer pretty much the same with
1698 		 * respect to where from in chunk_output.
1699 		 */
1700 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1701 		break;
1702 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1703 		{
1704 			struct timeval tv;
1705 			int i, secret;
1706 
1707 			if (inp == NULL) {
1708 				break;
1709 			}
1710 			SCTP_STAT_INCR(sctps_timosecret);
1711 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1712 			SCTP_INP_WLOCK(inp);
1713 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1714 			inp->sctp_ep.last_secret_number =
1715 			    inp->sctp_ep.current_secret_number;
1716 			inp->sctp_ep.current_secret_number++;
1717 			if (inp->sctp_ep.current_secret_number >=
1718 			    SCTP_HOW_MANY_SECRETS) {
1719 				inp->sctp_ep.current_secret_number = 0;
1720 			}
1721 			secret = (int)inp->sctp_ep.current_secret_number;
1722 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1723 				inp->sctp_ep.secret_key[secret][i] =
1724 				    sctp_select_initial_TSN(&inp->sctp_ep);
1725 			}
1726 			SCTP_INP_WUNLOCK(inp);
1727 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1728 		}
1729 		did_output = 0;
1730 		break;
1731 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1732 		if ((stcb == NULL) || (inp == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timopathmtu);
1736 		sctp_pathmtu_timer(inp, stcb, net);
1737 		did_output = 0;
1738 		break;
1739 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1740 		if ((stcb == NULL) || (inp == NULL)) {
1741 			break;
1742 		}
1743 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1744 			/* no need to unlock on tcb its gone */
1745 			goto out_decr;
1746 		}
1747 		SCTP_STAT_INCR(sctps_timoshutdownack);
1748 		stcb->asoc.timoshutdownack++;
1749 #ifdef SCTP_AUDITING_ENABLED
1750 		sctp_auditing(4, inp, stcb, net);
1751 #endif
1752 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1753 		break;
1754 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1755 		if ((stcb == NULL) || (inp == NULL)) {
1756 			break;
1757 		}
1758 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1759 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1760 		/* no need to unlock on tcb its gone */
1761 		goto out_decr;
1762 
1763 	case SCTP_TIMER_TYPE_STRRESET:
1764 		if ((stcb == NULL) || (inp == NULL)) {
1765 			break;
1766 		}
1767 		if (sctp_strreset_timer(inp, stcb, net)) {
1768 			/* no need to unlock on tcb its gone */
1769 			goto out_decr;
1770 		}
1771 		SCTP_STAT_INCR(sctps_timostrmrst);
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1773 		break;
1774 	case SCTP_TIMER_TYPE_ASCONF:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		if (sctp_asconf_timer(inp, stcb, net)) {
1779 			/* no need to unlock on tcb its gone */
1780 			goto out_decr;
1781 		}
1782 		SCTP_STAT_INCR(sctps_timoasconf);
1783 #ifdef SCTP_AUDITING_ENABLED
1784 		sctp_auditing(4, inp, stcb, net);
1785 #endif
1786 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1787 		break;
1788 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		sctp_delete_prim_timer(inp, stcb, net);
1793 		SCTP_STAT_INCR(sctps_timodelprim);
1794 		break;
1795 
1796 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1797 		if ((stcb == NULL) || (inp == NULL)) {
1798 			break;
1799 		}
1800 		SCTP_STAT_INCR(sctps_timoautoclose);
1801 		sctp_autoclose_timer(inp, stcb, net);
1802 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_ASOCKILL:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		SCTP_STAT_INCR(sctps_timoassockill);
1810 		/* Can we free it yet? */
1811 		SCTP_INP_DECR_REF(inp);
1812 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1813 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1814 		so = SCTP_INP_SO(inp);
1815 		atomic_add_int(&stcb->asoc.refcnt, 1);
1816 		SCTP_TCB_UNLOCK(stcb);
1817 		SCTP_SOCKET_LOCK(so, 1);
1818 		SCTP_TCB_LOCK(stcb);
1819 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1820 #endif
1821 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1822 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1823 		SCTP_SOCKET_UNLOCK(so, 1);
1824 #endif
1825 		/*
1826 		 * free asoc, always unlocks (or destroy's) so prevent
1827 		 * duplicate unlock or unlock of a free mtx :-0
1828 		 */
1829 		stcb = NULL;
1830 		goto out_no_decr;
1831 	case SCTP_TIMER_TYPE_INPKILL:
1832 		SCTP_STAT_INCR(sctps_timoinpkill);
1833 		if (inp == NULL) {
1834 			break;
1835 		}
1836 		/*
1837 		 * special case, take away our increment since WE are the
1838 		 * killer
1839 		 */
1840 		SCTP_INP_DECR_REF(inp);
1841 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1842 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1843 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1844 		inp = NULL;
1845 		goto out_no_decr;
1846 	default:
1847 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1848 		    tmr->type);
1849 		break;
1850 	}
1851 #ifdef SCTP_AUDITING_ENABLED
1852 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1853 	if (inp)
1854 		sctp_auditing(5, inp, stcb, net);
1855 #endif
1856 	if ((did_output) && stcb) {
1857 		/*
1858 		 * Now we need to clean up the control chunk chain if an
1859 		 * ECNE is on it. It must be marked as UNSENT again so next
1860 		 * call will continue to send it until such time that we get
1861 		 * a CWR, to remove it. It is, however, less likely that we
1862 		 * will find a ecn echo on the chain though.
1863 		 */
1864 		sctp_fix_ecn_echo(&stcb->asoc);
1865 	}
1866 get_out:
1867 	if (stcb) {
1868 		SCTP_TCB_UNLOCK(stcb);
1869 	}
1870 out_decr:
1871 	if (inp) {
1872 		SCTP_INP_DECR_REF(inp);
1873 	}
1874 out_no_decr:
1875 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1876 	    type);
1877 	CURVNET_RESTORE();
1878 }
1879 
1880 void
1881 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1882     struct sctp_nets *net)
1883 {
1884 	uint32_t to_ticks;
1885 	struct sctp_timer *tmr;
1886 
1887 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1888 		return;
1889 
1890 	tmr = NULL;
1891 	if (stcb) {
1892 		SCTP_TCB_LOCK_ASSERT(stcb);
1893 	}
1894 	switch (t_type) {
1895 	case SCTP_TIMER_TYPE_ZERO_COPY:
1896 		tmr = &inp->sctp_ep.zero_copy_timer;
1897 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1898 		break;
1899 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1900 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1901 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1902 		break;
1903 	case SCTP_TIMER_TYPE_ADDR_WQ:
1904 		/* Only 1 tick away :-) */
1905 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1906 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1907 		break;
1908 	case SCTP_TIMER_TYPE_SEND:
1909 		/* Here we use the RTO timer */
1910 		{
1911 			int rto_val;
1912 
1913 			if ((stcb == NULL) || (net == NULL)) {
1914 				return;
1915 			}
1916 			tmr = &net->rxt_timer;
1917 			if (net->RTO == 0) {
1918 				rto_val = stcb->asoc.initial_rto;
1919 			} else {
1920 				rto_val = net->RTO;
1921 			}
1922 			to_ticks = MSEC_TO_TICKS(rto_val);
1923 		}
1924 		break;
1925 	case SCTP_TIMER_TYPE_INIT:
1926 		/*
1927 		 * Here we use the INIT timer default usually about 1
1928 		 * minute.
1929 		 */
1930 		if ((stcb == NULL) || (net == NULL)) {
1931 			return;
1932 		}
1933 		tmr = &net->rxt_timer;
1934 		if (net->RTO == 0) {
1935 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1936 		} else {
1937 			to_ticks = MSEC_TO_TICKS(net->RTO);
1938 		}
1939 		break;
1940 	case SCTP_TIMER_TYPE_RECV:
1941 		/*
1942 		 * Here we use the Delayed-Ack timer value from the inp
1943 		 * ususually about 200ms.
1944 		 */
1945 		if (stcb == NULL) {
1946 			return;
1947 		}
1948 		tmr = &stcb->asoc.dack_timer;
1949 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1950 		break;
1951 	case SCTP_TIMER_TYPE_SHUTDOWN:
1952 		/* Here we use the RTO of the destination. */
1953 		if ((stcb == NULL) || (net == NULL)) {
1954 			return;
1955 		}
1956 		if (net->RTO == 0) {
1957 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1958 		} else {
1959 			to_ticks = MSEC_TO_TICKS(net->RTO);
1960 		}
1961 		tmr = &net->rxt_timer;
1962 		break;
1963 	case SCTP_TIMER_TYPE_HEARTBEAT:
1964 		/*
1965 		 * the net is used here so that we can add in the RTO. Even
1966 		 * though we use a different timer. We also add the HB timer
1967 		 * PLUS a random jitter.
1968 		 */
1969 		if ((stcb == NULL) || (net == NULL)) {
1970 			return;
1971 		} else {
1972 			uint32_t rndval;
1973 			uint32_t jitter;
1974 
1975 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1976 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1977 				return;
1978 			}
1979 			if (net->RTO == 0) {
1980 				to_ticks = stcb->asoc.initial_rto;
1981 			} else {
1982 				to_ticks = net->RTO;
1983 			}
1984 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1985 			jitter = rndval % to_ticks;
1986 			if (jitter >= (to_ticks >> 1)) {
1987 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1988 			} else {
1989 				to_ticks = to_ticks - jitter;
1990 			}
1991 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1992 			    !(net->dest_state & SCTP_ADDR_PF)) {
1993 				to_ticks += net->heart_beat_delay;
1994 			}
1995 			/*
1996 			 * Now we must convert the to_ticks that are now in
1997 			 * ms to ticks.
1998 			 */
1999 			to_ticks = MSEC_TO_TICKS(to_ticks);
2000 			tmr = &net->hb_timer;
2001 		}
2002 		break;
2003 	case SCTP_TIMER_TYPE_COOKIE:
2004 		/*
2005 		 * Here we can use the RTO timer from the network since one
2006 		 * RTT was compelete. If a retran happened then we will be
2007 		 * using the RTO initial value.
2008 		 */
2009 		if ((stcb == NULL) || (net == NULL)) {
2010 			return;
2011 		}
2012 		if (net->RTO == 0) {
2013 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2014 		} else {
2015 			to_ticks = MSEC_TO_TICKS(net->RTO);
2016 		}
2017 		tmr = &net->rxt_timer;
2018 		break;
2019 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2020 		/*
2021 		 * nothing needed but the endpoint here ususually about 60
2022 		 * minutes.
2023 		 */
2024 		tmr = &inp->sctp_ep.signature_change;
2025 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2026 		break;
2027 	case SCTP_TIMER_TYPE_ASOCKILL:
2028 		if (stcb == NULL) {
2029 			return;
2030 		}
2031 		tmr = &stcb->asoc.strreset_timer;
2032 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2033 		break;
2034 	case SCTP_TIMER_TYPE_INPKILL:
2035 		/*
2036 		 * The inp is setup to die. We re-use the signature_chage
2037 		 * timer since that has stopped and we are in the GONE
2038 		 * state.
2039 		 */
2040 		tmr = &inp->sctp_ep.signature_change;
2041 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2042 		break;
2043 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2044 		/*
2045 		 * Here we use the value found in the EP for PMTU ususually
2046 		 * about 10 minutes.
2047 		 */
2048 		if ((stcb == NULL) || (net == NULL)) {
2049 			return;
2050 		}
2051 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2052 			return;
2053 		}
2054 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2055 		tmr = &net->pmtu_timer;
2056 		break;
2057 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2058 		/* Here we use the RTO of the destination */
2059 		if ((stcb == NULL) || (net == NULL)) {
2060 			return;
2061 		}
2062 		if (net->RTO == 0) {
2063 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2064 		} else {
2065 			to_ticks = MSEC_TO_TICKS(net->RTO);
2066 		}
2067 		tmr = &net->rxt_timer;
2068 		break;
2069 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2070 		/*
2071 		 * Here we use the endpoints shutdown guard timer usually
2072 		 * about 3 minutes.
2073 		 */
2074 		if (stcb == NULL) {
2075 			return;
2076 		}
2077 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2078 		tmr = &stcb->asoc.shut_guard_timer;
2079 		break;
2080 	case SCTP_TIMER_TYPE_STRRESET:
2081 		/*
2082 		 * Here the timer comes from the stcb but its value is from
2083 		 * the net's RTO.
2084 		 */
2085 		if ((stcb == NULL) || (net == NULL)) {
2086 			return;
2087 		}
2088 		if (net->RTO == 0) {
2089 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2090 		} else {
2091 			to_ticks = MSEC_TO_TICKS(net->RTO);
2092 		}
2093 		tmr = &stcb->asoc.strreset_timer;
2094 		break;
2095 	case SCTP_TIMER_TYPE_ASCONF:
2096 		/*
2097 		 * Here the timer comes from the stcb but its value is from
2098 		 * the net's RTO.
2099 		 */
2100 		if ((stcb == NULL) || (net == NULL)) {
2101 			return;
2102 		}
2103 		if (net->RTO == 0) {
2104 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2105 		} else {
2106 			to_ticks = MSEC_TO_TICKS(net->RTO);
2107 		}
2108 		tmr = &stcb->asoc.asconf_timer;
2109 		break;
2110 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2111 		if ((stcb == NULL) || (net != NULL)) {
2112 			return;
2113 		}
2114 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2115 		tmr = &stcb->asoc.delete_prim_timer;
2116 		break;
2117 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2118 		if (stcb == NULL) {
2119 			return;
2120 		}
2121 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2122 			/*
2123 			 * Really an error since stcb is NOT set to
2124 			 * autoclose
2125 			 */
2126 			return;
2127 		}
2128 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2129 		tmr = &stcb->asoc.autoclose_timer;
2130 		break;
2131 	default:
2132 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2133 		    __FUNCTION__, t_type);
2134 		return;
2135 		break;
2136 	}
2137 	if ((to_ticks <= 0) || (tmr == NULL)) {
2138 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2139 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2140 		return;
2141 	}
2142 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2143 		/*
2144 		 * we do NOT allow you to have it already running. if it is
2145 		 * we leave the current one up unchanged
2146 		 */
2147 		return;
2148 	}
2149 	/* At this point we can proceed */
2150 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2151 		stcb->asoc.num_send_timers_up++;
2152 	}
2153 	tmr->stopped_from = 0;
2154 	tmr->type = t_type;
2155 	tmr->ep = (void *)inp;
2156 	tmr->tcb = (void *)stcb;
2157 	tmr->net = (void *)net;
2158 	tmr->self = (void *)tmr;
2159 	tmr->vnet = (void *)curvnet;
2160 	tmr->ticks = sctp_get_tick_count();
2161 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2162 	return;
2163 }
2164 
2165 void
2166 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2167     struct sctp_nets *net, uint32_t from)
2168 {
2169 	struct sctp_timer *tmr;
2170 
2171 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2172 	    (inp == NULL))
2173 		return;
2174 
2175 	tmr = NULL;
2176 	if (stcb) {
2177 		SCTP_TCB_LOCK_ASSERT(stcb);
2178 	}
2179 	switch (t_type) {
2180 	case SCTP_TIMER_TYPE_ZERO_COPY:
2181 		tmr = &inp->sctp_ep.zero_copy_timer;
2182 		break;
2183 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2184 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_ADDR_WQ:
2187 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2188 		break;
2189 	case SCTP_TIMER_TYPE_SEND:
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		tmr = &net->rxt_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_INIT:
2196 		if ((stcb == NULL) || (net == NULL)) {
2197 			return;
2198 		}
2199 		tmr = &net->rxt_timer;
2200 		break;
2201 	case SCTP_TIMER_TYPE_RECV:
2202 		if (stcb == NULL) {
2203 			return;
2204 		}
2205 		tmr = &stcb->asoc.dack_timer;
2206 		break;
2207 	case SCTP_TIMER_TYPE_SHUTDOWN:
2208 		if ((stcb == NULL) || (net == NULL)) {
2209 			return;
2210 		}
2211 		tmr = &net->rxt_timer;
2212 		break;
2213 	case SCTP_TIMER_TYPE_HEARTBEAT:
2214 		if ((stcb == NULL) || (net == NULL)) {
2215 			return;
2216 		}
2217 		tmr = &net->hb_timer;
2218 		break;
2219 	case SCTP_TIMER_TYPE_COOKIE:
2220 		if ((stcb == NULL) || (net == NULL)) {
2221 			return;
2222 		}
2223 		tmr = &net->rxt_timer;
2224 		break;
2225 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2226 		/* nothing needed but the endpoint here */
2227 		tmr = &inp->sctp_ep.signature_change;
2228 		/*
2229 		 * We re-use the newcookie timer for the INP kill timer. We
2230 		 * must assure that we do not kill it by accident.
2231 		 */
2232 		break;
2233 	case SCTP_TIMER_TYPE_ASOCKILL:
2234 		/*
2235 		 * Stop the asoc kill timer.
2236 		 */
2237 		if (stcb == NULL) {
2238 			return;
2239 		}
2240 		tmr = &stcb->asoc.strreset_timer;
2241 		break;
2242 
2243 	case SCTP_TIMER_TYPE_INPKILL:
2244 		/*
2245 		 * The inp is setup to die. We re-use the signature_chage
2246 		 * timer since that has stopped and we are in the GONE
2247 		 * state.
2248 		 */
2249 		tmr = &inp->sctp_ep.signature_change;
2250 		break;
2251 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2252 		if ((stcb == NULL) || (net == NULL)) {
2253 			return;
2254 		}
2255 		tmr = &net->pmtu_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2258 		if ((stcb == NULL) || (net == NULL)) {
2259 			return;
2260 		}
2261 		tmr = &net->rxt_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2264 		if (stcb == NULL) {
2265 			return;
2266 		}
2267 		tmr = &stcb->asoc.shut_guard_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_STRRESET:
2270 		if (stcb == NULL) {
2271 			return;
2272 		}
2273 		tmr = &stcb->asoc.strreset_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_ASCONF:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.asconf_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2282 		if (stcb == NULL) {
2283 			return;
2284 		}
2285 		tmr = &stcb->asoc.delete_prim_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2288 		if (stcb == NULL) {
2289 			return;
2290 		}
2291 		tmr = &stcb->asoc.autoclose_timer;
2292 		break;
2293 	default:
2294 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2295 		    __FUNCTION__, t_type);
2296 		break;
2297 	}
2298 	if (tmr == NULL) {
2299 		return;
2300 	}
2301 	if ((tmr->type != t_type) && tmr->type) {
2302 		/*
2303 		 * Ok we have a timer that is under joint use. Cookie timer
2304 		 * per chance with the SEND timer. We therefore are NOT
2305 		 * running the timer that the caller wants stopped.  So just
2306 		 * return.
2307 		 */
2308 		return;
2309 	}
2310 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2311 		stcb->asoc.num_send_timers_up--;
2312 		if (stcb->asoc.num_send_timers_up < 0) {
2313 			stcb->asoc.num_send_timers_up = 0;
2314 		}
2315 	}
2316 	tmr->self = NULL;
2317 	tmr->stopped_from = from;
2318 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2319 	return;
2320 }
2321 
2322 uint32_t
2323 sctp_calculate_len(struct mbuf *m)
2324 {
2325 	uint32_t tlen = 0;
2326 	struct mbuf *at;
2327 
2328 	at = m;
2329 	while (at) {
2330 		tlen += SCTP_BUF_LEN(at);
2331 		at = SCTP_BUF_NEXT(at);
2332 	}
2333 	return (tlen);
2334 }
2335 
2336 void
2337 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2338     struct sctp_association *asoc, uint32_t mtu)
2339 {
2340 	/*
2341 	 * Reset the P-MTU size on this association, this involves changing
2342 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2343 	 * allow the DF flag to be cleared.
2344 	 */
2345 	struct sctp_tmit_chunk *chk;
2346 	unsigned int eff_mtu, ovh;
2347 
2348 	asoc->smallest_mtu = mtu;
2349 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2350 		ovh = SCTP_MIN_OVERHEAD;
2351 	} else {
2352 		ovh = SCTP_MIN_V4_OVERHEAD;
2353 	}
2354 	eff_mtu = mtu - ovh;
2355 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2356 		if (chk->send_size > eff_mtu) {
2357 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2358 		}
2359 	}
2360 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2361 		if (chk->send_size > eff_mtu) {
2362 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2363 		}
2364 	}
2365 }
2366 
2367 
2368 /*
2369  * given an association and starting time of the current RTT period return
2370  * RTO in number of msecs net should point to the current network
2371  */
2372 
2373 uint32_t
2374 sctp_calculate_rto(struct sctp_tcb *stcb,
2375     struct sctp_association *asoc,
2376     struct sctp_nets *net,
2377     struct timeval *told,
2378     int safe, int rtt_from_sack)
2379 {
2380 	/*-
2381 	 * given an association and the starting time of the current RTT
2382 	 * period (in value1/value2) return RTO in number of msecs.
2383 	 */
2384 	int32_t rtt;		/* RTT in ms */
2385 	uint32_t new_rto;
2386 	int first_measure = 0;
2387 	struct timeval now, then, *old;
2388 
2389 	/* Copy it out for sparc64 */
2390 	if (safe == sctp_align_unsafe_makecopy) {
2391 		old = &then;
2392 		memcpy(&then, told, sizeof(struct timeval));
2393 	} else if (safe == sctp_align_safe_nocopy) {
2394 		old = told;
2395 	} else {
2396 		/* error */
2397 		SCTP_PRINTF("Huh, bad rto calc call\n");
2398 		return (0);
2399 	}
2400 	/************************/
2401 	/* 1. calculate new RTT */
2402 	/************************/
2403 	/* get the current time */
2404 	if (stcb->asoc.use_precise_time) {
2405 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2406 	} else {
2407 		(void)SCTP_GETTIME_TIMEVAL(&now);
2408 	}
2409 	timevalsub(&now, old);
2410 	/* store the current RTT in us */
2411 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2412 	        (uint64_t) now.tv_usec;
2413 
2414 	/* compute rtt in ms */
2415 	rtt = (int32_t) (net->rtt / 1000);
2416 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2417 		/*
2418 		 * Tell the CC module that a new update has just occurred
2419 		 * from a sack
2420 		 */
2421 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2422 	}
2423 	/*
2424 	 * Do we need to determine the lan? We do this only on sacks i.e.
2425 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2426 	 */
2427 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2428 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2429 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2430 			net->lan_type = SCTP_LAN_INTERNET;
2431 		} else {
2432 			net->lan_type = SCTP_LAN_LOCAL;
2433 		}
2434 	}
2435 	/***************************/
2436 	/* 2. update RTTVAR & SRTT */
2437 	/***************************/
2438 	/*-
2439 	 * Compute the scaled average lastsa and the
2440 	 * scaled variance lastsv as described in van Jacobson
2441 	 * Paper "Congestion Avoidance and Control", Annex A.
2442 	 *
2443 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2444 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2445 	 */
2446 	if (net->RTO_measured) {
2447 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2448 		net->lastsa += rtt;
2449 		if (rtt < 0) {
2450 			rtt = -rtt;
2451 		}
2452 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2453 		net->lastsv += rtt;
2454 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2455 			rto_logging(net, SCTP_LOG_RTTVAR);
2456 		}
2457 	} else {
2458 		/* First RTO measurment */
2459 		net->RTO_measured = 1;
2460 		first_measure = 1;
2461 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2462 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2463 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2464 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2465 		}
2466 	}
2467 	if (net->lastsv == 0) {
2468 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2469 	}
2470 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2471 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2472 	    (stcb->asoc.sat_network_lockout == 0)) {
2473 		stcb->asoc.sat_network = 1;
2474 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2475 		stcb->asoc.sat_network = 0;
2476 		stcb->asoc.sat_network_lockout = 1;
2477 	}
2478 	/* bound it, per C6/C7 in Section 5.3.1 */
2479 	if (new_rto < stcb->asoc.minrto) {
2480 		new_rto = stcb->asoc.minrto;
2481 	}
2482 	if (new_rto > stcb->asoc.maxrto) {
2483 		new_rto = stcb->asoc.maxrto;
2484 	}
2485 	/* we are now returning the RTO */
2486 	return (new_rto);
2487 }
2488 
2489 /*
2490  * return a pointer to a contiguous piece of data from the given mbuf chain
2491  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2492  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2493  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2494  */
2495 caddr_t
2496 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2497 {
2498 	uint32_t count;
2499 	uint8_t *ptr;
2500 
2501 	ptr = in_ptr;
2502 	if ((off < 0) || (len <= 0))
2503 		return (NULL);
2504 
2505 	/* find the desired start location */
2506 	while ((m != NULL) && (off > 0)) {
2507 		if (off < SCTP_BUF_LEN(m))
2508 			break;
2509 		off -= SCTP_BUF_LEN(m);
2510 		m = SCTP_BUF_NEXT(m);
2511 	}
2512 	if (m == NULL)
2513 		return (NULL);
2514 
2515 	/* is the current mbuf large enough (eg. contiguous)? */
2516 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2517 		return (mtod(m, caddr_t)+off);
2518 	} else {
2519 		/* else, it spans more than one mbuf, so save a temp copy... */
2520 		while ((m != NULL) && (len > 0)) {
2521 			count = min(SCTP_BUF_LEN(m) - off, len);
2522 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2523 			len -= count;
2524 			ptr += count;
2525 			off = 0;
2526 			m = SCTP_BUF_NEXT(m);
2527 		}
2528 		if ((m == NULL) && (len > 0))
2529 			return (NULL);
2530 		else
2531 			return ((caddr_t)in_ptr);
2532 	}
2533 }
2534 
2535 
2536 
2537 struct sctp_paramhdr *
2538 sctp_get_next_param(struct mbuf *m,
2539     int offset,
2540     struct sctp_paramhdr *pull,
2541     int pull_limit)
2542 {
2543 	/* This just provides a typed signature to Peter's Pull routine */
2544 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2545 	    (uint8_t *) pull));
2546 }
2547 
2548 
2549 struct mbuf *
2550 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2551 {
2552 	struct mbuf *m_last;
2553 	caddr_t dp;
2554 
2555 	if (padlen > 3) {
2556 		return (NULL);
2557 	}
2558 	if (padlen <= M_TRAILINGSPACE(m)) {
2559 		/*
2560 		 * The easy way. We hope the majority of the time we hit
2561 		 * here :)
2562 		 */
2563 		m_last = m;
2564 	} else {
2565 		/* Hard way we must grow the mbuf chain */
2566 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2567 		if (m_last == NULL) {
2568 			return (NULL);
2569 		}
2570 		SCTP_BUF_LEN(m_last) = 0;
2571 		SCTP_BUF_NEXT(m_last) = NULL;
2572 		SCTP_BUF_NEXT(m) = m_last;
2573 	}
2574 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2575 	SCTP_BUF_LEN(m_last) += padlen;
2576 	memset(dp, 0, padlen);
2577 	return (m_last);
2578 }
2579 
2580 struct mbuf *
2581 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2582 {
2583 	/* find the last mbuf in chain and pad it */
2584 	struct mbuf *m_at;
2585 
2586 	if (last_mbuf != NULL) {
2587 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2588 	} else {
2589 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2590 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2591 				return (sctp_add_pad_tombuf(m_at, padval));
2592 			}
2593 		}
2594 	}
2595 	return (NULL);
2596 }
2597 
2598 static void
2599 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2600     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2601 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2602     SCTP_UNUSED
2603 #endif
2604 )
2605 {
2606 	struct mbuf *m_notify;
2607 	struct sctp_assoc_change *sac;
2608 	struct sctp_queued_to_read *control;
2609 	size_t notif_len, abort_len;
2610 	unsigned int i;
2611 
2612 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2613 	struct socket *so;
2614 
2615 #endif
2616 
2617 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2618 		notif_len = sizeof(struct sctp_assoc_change);
2619 		if (abort != NULL) {
2620 			abort_len = ntohs(abort->ch.chunk_length);
2621 		} else {
2622 			abort_len = 0;
2623 		}
2624 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2625 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2626 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2627 			notif_len += abort_len;
2628 		}
2629 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2630 		if (m_notify == NULL) {
2631 			/* Retry with smaller value. */
2632 			notif_len = sizeof(struct sctp_assoc_change);
2633 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2634 			if (m_notify == NULL) {
2635 				goto set_error;
2636 			}
2637 		}
2638 		SCTP_BUF_NEXT(m_notify) = NULL;
2639 		sac = mtod(m_notify, struct sctp_assoc_change *);
2640 		memset(sac, 0, notif_len);
2641 		sac->sac_type = SCTP_ASSOC_CHANGE;
2642 		sac->sac_flags = 0;
2643 		sac->sac_length = sizeof(struct sctp_assoc_change);
2644 		sac->sac_state = state;
2645 		sac->sac_error = error;
2646 		/* XXX verify these stream counts */
2647 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2648 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2649 		sac->sac_assoc_id = sctp_get_associd(stcb);
2650 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2651 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2652 				i = 0;
2653 				if (stcb->asoc.prsctp_supported == 1) {
2654 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2655 				}
2656 				if (stcb->asoc.auth_supported == 1) {
2657 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2658 				}
2659 				if (stcb->asoc.asconf_supported == 1) {
2660 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2661 				}
2662 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2663 				if (stcb->asoc.reconfig_supported == 1) {
2664 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2665 				}
2666 				sac->sac_length += i;
2667 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2668 				memcpy(sac->sac_info, abort, abort_len);
2669 				sac->sac_length += abort_len;
2670 			}
2671 		}
2672 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2673 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2674 		    0, 0, stcb->asoc.context, 0, 0, 0,
2675 		    m_notify);
2676 		if (control != NULL) {
2677 			control->length = SCTP_BUF_LEN(m_notify);
2678 			/* not that we need this */
2679 			control->tail_mbuf = m_notify;
2680 			control->spec_flags = M_NOTIFICATION;
2681 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2682 			    control,
2683 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2684 			    so_locked);
2685 		} else {
2686 			sctp_m_freem(m_notify);
2687 		}
2688 	}
2689 	/*
2690 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2691 	 * comes in.
2692 	 */
2693 set_error:
2694 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2695 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2696 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2697 		SOCK_LOCK(stcb->sctp_socket);
2698 		if (from_peer) {
2699 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2700 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2701 				stcb->sctp_socket->so_error = ECONNREFUSED;
2702 			} else {
2703 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2704 				stcb->sctp_socket->so_error = ECONNRESET;
2705 			}
2706 		} else {
2707 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2708 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2709 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2710 				stcb->sctp_socket->so_error = ETIMEDOUT;
2711 			} else {
2712 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2713 				stcb->sctp_socket->so_error = ECONNABORTED;
2714 			}
2715 		}
2716 	}
2717 	/* Wake ANY sleepers */
2718 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2719 	so = SCTP_INP_SO(stcb->sctp_ep);
2720 	if (!so_locked) {
2721 		atomic_add_int(&stcb->asoc.refcnt, 1);
2722 		SCTP_TCB_UNLOCK(stcb);
2723 		SCTP_SOCKET_LOCK(so, 1);
2724 		SCTP_TCB_LOCK(stcb);
2725 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2726 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2727 			SCTP_SOCKET_UNLOCK(so, 1);
2728 			return;
2729 		}
2730 	}
2731 #endif
2732 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2733 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2734 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2735 		socantrcvmore_locked(stcb->sctp_socket);
2736 	}
2737 	sorwakeup(stcb->sctp_socket);
2738 	sowwakeup(stcb->sctp_socket);
2739 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2740 	if (!so_locked) {
2741 		SCTP_SOCKET_UNLOCK(so, 1);
2742 	}
2743 #endif
2744 }
2745 
2746 static void
2747 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2748     struct sockaddr *sa, uint32_t error, int so_locked
2749 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2750     SCTP_UNUSED
2751 #endif
2752 )
2753 {
2754 	struct mbuf *m_notify;
2755 	struct sctp_paddr_change *spc;
2756 	struct sctp_queued_to_read *control;
2757 
2758 	if ((stcb == NULL) ||
2759 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2760 		/* event not enabled */
2761 		return;
2762 	}
2763 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2764 	if (m_notify == NULL)
2765 		return;
2766 	SCTP_BUF_LEN(m_notify) = 0;
2767 	spc = mtod(m_notify, struct sctp_paddr_change *);
2768 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2769 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2770 	spc->spc_flags = 0;
2771 	spc->spc_length = sizeof(struct sctp_paddr_change);
2772 	switch (sa->sa_family) {
2773 #ifdef INET
2774 	case AF_INET:
2775 #ifdef INET6
2776 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2777 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2778 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2779 		} else {
2780 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2781 		}
2782 #else
2783 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2784 #endif
2785 		break;
2786 #endif
2787 #ifdef INET6
2788 	case AF_INET6:
2789 		{
2790 			struct sockaddr_in6 *sin6;
2791 
2792 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2793 
2794 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2795 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2796 				if (sin6->sin6_scope_id == 0) {
2797 					/* recover scope_id for user */
2798 					(void)sa6_recoverscope(sin6);
2799 				} else {
2800 					/* clear embedded scope_id for user */
2801 					in6_clearscope(&sin6->sin6_addr);
2802 				}
2803 			}
2804 			break;
2805 		}
2806 #endif
2807 	default:
2808 		/* TSNH */
2809 		break;
2810 	}
2811 	spc->spc_state = state;
2812 	spc->spc_error = error;
2813 	spc->spc_assoc_id = sctp_get_associd(stcb);
2814 
2815 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2816 	SCTP_BUF_NEXT(m_notify) = NULL;
2817 
2818 	/* append to socket */
2819 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2820 	    0, 0, stcb->asoc.context, 0, 0, 0,
2821 	    m_notify);
2822 	if (control == NULL) {
2823 		/* no memory */
2824 		sctp_m_freem(m_notify);
2825 		return;
2826 	}
2827 	control->length = SCTP_BUF_LEN(m_notify);
2828 	control->spec_flags = M_NOTIFICATION;
2829 	/* not that we need this */
2830 	control->tail_mbuf = m_notify;
2831 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2832 	    control,
2833 	    &stcb->sctp_socket->so_rcv, 1,
2834 	    SCTP_READ_LOCK_NOT_HELD,
2835 	    so_locked);
2836 }
2837 
2838 
2839 static void
2840 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2841     struct sctp_tmit_chunk *chk, int so_locked
2842 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2843     SCTP_UNUSED
2844 #endif
2845 )
2846 {
2847 	struct mbuf *m_notify;
2848 	struct sctp_send_failed *ssf;
2849 	struct sctp_send_failed_event *ssfe;
2850 	struct sctp_queued_to_read *control;
2851 	int length;
2852 
2853 	if ((stcb == NULL) ||
2854 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2855 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2856 		/* event not enabled */
2857 		return;
2858 	}
2859 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2860 		length = sizeof(struct sctp_send_failed_event);
2861 	} else {
2862 		length = sizeof(struct sctp_send_failed);
2863 	}
2864 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2865 	if (m_notify == NULL)
2866 		/* no space left */
2867 		return;
2868 	SCTP_BUF_LEN(m_notify) = 0;
2869 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2870 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2871 		memset(ssfe, 0, length);
2872 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2873 		if (sent) {
2874 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2875 		} else {
2876 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2877 		}
2878 		length += chk->send_size;
2879 		length -= sizeof(struct sctp_data_chunk);
2880 		ssfe->ssfe_length = length;
2881 		ssfe->ssfe_error = error;
2882 		/* not exactly what the user sent in, but should be close :) */
2883 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2884 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2885 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2886 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2887 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2888 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2889 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2890 	} else {
2891 		ssf = mtod(m_notify, struct sctp_send_failed *);
2892 		memset(ssf, 0, length);
2893 		ssf->ssf_type = SCTP_SEND_FAILED;
2894 		if (sent) {
2895 			ssf->ssf_flags = SCTP_DATA_SENT;
2896 		} else {
2897 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2898 		}
2899 		length += chk->send_size;
2900 		length -= sizeof(struct sctp_data_chunk);
2901 		ssf->ssf_length = length;
2902 		ssf->ssf_error = error;
2903 		/* not exactly what the user sent in, but should be close :) */
2904 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2905 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2906 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2907 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2908 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2909 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2910 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2911 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2912 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2913 	}
2914 	if (chk->data) {
2915 		/*
2916 		 * trim off the sctp chunk header(it should be there)
2917 		 */
2918 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2919 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2920 			sctp_mbuf_crush(chk->data);
2921 			chk->send_size -= sizeof(struct sctp_data_chunk);
2922 		}
2923 	}
2924 	SCTP_BUF_NEXT(m_notify) = chk->data;
2925 	/* Steal off the mbuf */
2926 	chk->data = NULL;
2927 	/*
2928 	 * For this case, we check the actual socket buffer, since the assoc
2929 	 * is going away we don't want to overfill the socket buffer for a
2930 	 * non-reader
2931 	 */
2932 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2933 		sctp_m_freem(m_notify);
2934 		return;
2935 	}
2936 	/* append to socket */
2937 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2938 	    0, 0, stcb->asoc.context, 0, 0, 0,
2939 	    m_notify);
2940 	if (control == NULL) {
2941 		/* no memory */
2942 		sctp_m_freem(m_notify);
2943 		return;
2944 	}
2945 	control->spec_flags = M_NOTIFICATION;
2946 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2947 	    control,
2948 	    &stcb->sctp_socket->so_rcv, 1,
2949 	    SCTP_READ_LOCK_NOT_HELD,
2950 	    so_locked);
2951 }
2952 
2953 
2954 static void
2955 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2956     struct sctp_stream_queue_pending *sp, int so_locked
2957 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2958     SCTP_UNUSED
2959 #endif
2960 )
2961 {
2962 	struct mbuf *m_notify;
2963 	struct sctp_send_failed *ssf;
2964 	struct sctp_send_failed_event *ssfe;
2965 	struct sctp_queued_to_read *control;
2966 	int length;
2967 
2968 	if ((stcb == NULL) ||
2969 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2970 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2971 		/* event not enabled */
2972 		return;
2973 	}
2974 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2975 		length = sizeof(struct sctp_send_failed_event);
2976 	} else {
2977 		length = sizeof(struct sctp_send_failed);
2978 	}
2979 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2980 	if (m_notify == NULL) {
2981 		/* no space left */
2982 		return;
2983 	}
2984 	SCTP_BUF_LEN(m_notify) = 0;
2985 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2986 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2987 		memset(ssfe, 0, length);
2988 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2989 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2990 		length += sp->length;
2991 		ssfe->ssfe_length = length;
2992 		ssfe->ssfe_error = error;
2993 		/* not exactly what the user sent in, but should be close :) */
2994 		ssfe->ssfe_info.snd_sid = sp->stream;
2995 		if (sp->some_taken) {
2996 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2997 		} else {
2998 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2999 		}
3000 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3001 		ssfe->ssfe_info.snd_context = sp->context;
3002 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3003 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3004 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3005 	} else {
3006 		ssf = mtod(m_notify, struct sctp_send_failed *);
3007 		memset(ssf, 0, length);
3008 		ssf->ssf_type = SCTP_SEND_FAILED;
3009 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3010 		length += sp->length;
3011 		ssf->ssf_length = length;
3012 		ssf->ssf_error = error;
3013 		/* not exactly what the user sent in, but should be close :) */
3014 		ssf->ssf_info.sinfo_stream = sp->stream;
3015 		ssf->ssf_info.sinfo_ssn = 0;
3016 		if (sp->some_taken) {
3017 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3018 		} else {
3019 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3020 		}
3021 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3022 		ssf->ssf_info.sinfo_context = sp->context;
3023 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3024 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3025 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3026 	}
3027 	SCTP_BUF_NEXT(m_notify) = sp->data;
3028 
3029 	/* Steal off the mbuf */
3030 	sp->data = NULL;
3031 	/*
3032 	 * For this case, we check the actual socket buffer, since the assoc
3033 	 * is going away we don't want to overfill the socket buffer for a
3034 	 * non-reader
3035 	 */
3036 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3037 		sctp_m_freem(m_notify);
3038 		return;
3039 	}
3040 	/* append to socket */
3041 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3042 	    0, 0, stcb->asoc.context, 0, 0, 0,
3043 	    m_notify);
3044 	if (control == NULL) {
3045 		/* no memory */
3046 		sctp_m_freem(m_notify);
3047 		return;
3048 	}
3049 	control->spec_flags = M_NOTIFICATION;
3050 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3051 	    control,
3052 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3053 }
3054 
3055 
3056 
3057 static void
3058 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3059 {
3060 	struct mbuf *m_notify;
3061 	struct sctp_adaptation_event *sai;
3062 	struct sctp_queued_to_read *control;
3063 
3064 	if ((stcb == NULL) ||
3065 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3066 		/* event not enabled */
3067 		return;
3068 	}
3069 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3070 	if (m_notify == NULL)
3071 		/* no space left */
3072 		return;
3073 	SCTP_BUF_LEN(m_notify) = 0;
3074 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3075 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3076 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3077 	sai->sai_flags = 0;
3078 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3079 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3080 	sai->sai_assoc_id = sctp_get_associd(stcb);
3081 
3082 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3083 	SCTP_BUF_NEXT(m_notify) = NULL;
3084 
3085 	/* append to socket */
3086 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3087 	    0, 0, stcb->asoc.context, 0, 0, 0,
3088 	    m_notify);
3089 	if (control == NULL) {
3090 		/* no memory */
3091 		sctp_m_freem(m_notify);
3092 		return;
3093 	}
3094 	control->length = SCTP_BUF_LEN(m_notify);
3095 	control->spec_flags = M_NOTIFICATION;
3096 	/* not that we need this */
3097 	control->tail_mbuf = m_notify;
3098 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3099 	    control,
3100 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3101 }
3102 
3103 /* This always must be called with the read-queue LOCKED in the INP */
3104 static void
3105 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3106     uint32_t val, int so_locked
3107 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3108     SCTP_UNUSED
3109 #endif
3110 )
3111 {
3112 	struct mbuf *m_notify;
3113 	struct sctp_pdapi_event *pdapi;
3114 	struct sctp_queued_to_read *control;
3115 	struct sockbuf *sb;
3116 
3117 	if ((stcb == NULL) ||
3118 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3119 		/* event not enabled */
3120 		return;
3121 	}
3122 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3123 		return;
3124 	}
3125 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3126 	if (m_notify == NULL)
3127 		/* no space left */
3128 		return;
3129 	SCTP_BUF_LEN(m_notify) = 0;
3130 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3131 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3132 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3133 	pdapi->pdapi_flags = 0;
3134 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3135 	pdapi->pdapi_indication = error;
3136 	pdapi->pdapi_stream = (val >> 16);
3137 	pdapi->pdapi_seq = (val & 0x0000ffff);
3138 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3139 
3140 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3141 	SCTP_BUF_NEXT(m_notify) = NULL;
3142 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3143 	    0, 0, stcb->asoc.context, 0, 0, 0,
3144 	    m_notify);
3145 	if (control == NULL) {
3146 		/* no memory */
3147 		sctp_m_freem(m_notify);
3148 		return;
3149 	}
3150 	control->spec_flags = M_NOTIFICATION;
3151 	control->length = SCTP_BUF_LEN(m_notify);
3152 	/* not that we need this */
3153 	control->tail_mbuf = m_notify;
3154 	control->held_length = 0;
3155 	control->length = 0;
3156 	sb = &stcb->sctp_socket->so_rcv;
3157 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3158 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3159 	}
3160 	sctp_sballoc(stcb, sb, m_notify);
3161 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3162 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3163 	}
3164 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3165 	control->end_added = 1;
3166 	if (stcb->asoc.control_pdapi)
3167 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3168 	else {
3169 		/* we really should not see this case */
3170 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3171 	}
3172 	if (stcb->sctp_ep && stcb->sctp_socket) {
3173 		/* This should always be the case */
3174 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3175 		struct socket *so;
3176 
3177 		so = SCTP_INP_SO(stcb->sctp_ep);
3178 		if (!so_locked) {
3179 			atomic_add_int(&stcb->asoc.refcnt, 1);
3180 			SCTP_TCB_UNLOCK(stcb);
3181 			SCTP_SOCKET_LOCK(so, 1);
3182 			SCTP_TCB_LOCK(stcb);
3183 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3184 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3185 				SCTP_SOCKET_UNLOCK(so, 1);
3186 				return;
3187 			}
3188 		}
3189 #endif
3190 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3191 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3192 		if (!so_locked) {
3193 			SCTP_SOCKET_UNLOCK(so, 1);
3194 		}
3195 #endif
3196 	}
3197 }
3198 
3199 static void
3200 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3201 {
3202 	struct mbuf *m_notify;
3203 	struct sctp_shutdown_event *sse;
3204 	struct sctp_queued_to_read *control;
3205 
3206 	/*
3207 	 * For TCP model AND UDP connected sockets we will send an error up
3208 	 * when an SHUTDOWN completes
3209 	 */
3210 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3211 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3212 		/* mark socket closed for read/write and wakeup! */
3213 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3214 		struct socket *so;
3215 
3216 		so = SCTP_INP_SO(stcb->sctp_ep);
3217 		atomic_add_int(&stcb->asoc.refcnt, 1);
3218 		SCTP_TCB_UNLOCK(stcb);
3219 		SCTP_SOCKET_LOCK(so, 1);
3220 		SCTP_TCB_LOCK(stcb);
3221 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3222 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3223 			SCTP_SOCKET_UNLOCK(so, 1);
3224 			return;
3225 		}
3226 #endif
3227 		socantsendmore(stcb->sctp_socket);
3228 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3229 		SCTP_SOCKET_UNLOCK(so, 1);
3230 #endif
3231 	}
3232 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3233 		/* event not enabled */
3234 		return;
3235 	}
3236 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3237 	if (m_notify == NULL)
3238 		/* no space left */
3239 		return;
3240 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3241 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3242 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3243 	sse->sse_flags = 0;
3244 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3245 	sse->sse_assoc_id = sctp_get_associd(stcb);
3246 
3247 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3248 	SCTP_BUF_NEXT(m_notify) = NULL;
3249 
3250 	/* append to socket */
3251 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3252 	    0, 0, stcb->asoc.context, 0, 0, 0,
3253 	    m_notify);
3254 	if (control == NULL) {
3255 		/* no memory */
3256 		sctp_m_freem(m_notify);
3257 		return;
3258 	}
3259 	control->spec_flags = M_NOTIFICATION;
3260 	control->length = SCTP_BUF_LEN(m_notify);
3261 	/* not that we need this */
3262 	control->tail_mbuf = m_notify;
3263 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3264 	    control,
3265 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3266 }
3267 
3268 static void
3269 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3270     int so_locked
3271 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3272     SCTP_UNUSED
3273 #endif
3274 )
3275 {
3276 	struct mbuf *m_notify;
3277 	struct sctp_sender_dry_event *event;
3278 	struct sctp_queued_to_read *control;
3279 
3280 	if ((stcb == NULL) ||
3281 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3282 		/* event not enabled */
3283 		return;
3284 	}
3285 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3286 	if (m_notify == NULL) {
3287 		/* no space left */
3288 		return;
3289 	}
3290 	SCTP_BUF_LEN(m_notify) = 0;
3291 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3292 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3293 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3294 	event->sender_dry_flags = 0;
3295 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3296 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3297 
3298 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3299 	SCTP_BUF_NEXT(m_notify) = NULL;
3300 
3301 	/* append to socket */
3302 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3303 	    0, 0, stcb->asoc.context, 0, 0, 0,
3304 	    m_notify);
3305 	if (control == NULL) {
3306 		/* no memory */
3307 		sctp_m_freem(m_notify);
3308 		return;
3309 	}
3310 	control->length = SCTP_BUF_LEN(m_notify);
3311 	control->spec_flags = M_NOTIFICATION;
3312 	/* not that we need this */
3313 	control->tail_mbuf = m_notify;
3314 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3315 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3316 }
3317 
3318 
3319 void
3320 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3321 {
3322 	struct mbuf *m_notify;
3323 	struct sctp_queued_to_read *control;
3324 	struct sctp_stream_change_event *stradd;
3325 
3326 	if ((stcb == NULL) ||
3327 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3328 		/* event not enabled */
3329 		return;
3330 	}
3331 	if ((stcb->asoc.peer_req_out) && flag) {
3332 		/* Peer made the request, don't tell the local user */
3333 		stcb->asoc.peer_req_out = 0;
3334 		return;
3335 	}
3336 	stcb->asoc.peer_req_out = 0;
3337 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3338 	if (m_notify == NULL)
3339 		/* no space left */
3340 		return;
3341 	SCTP_BUF_LEN(m_notify) = 0;
3342 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3343 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3344 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3345 	stradd->strchange_flags = flag;
3346 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3347 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3348 	stradd->strchange_instrms = numberin;
3349 	stradd->strchange_outstrms = numberout;
3350 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3351 	SCTP_BUF_NEXT(m_notify) = NULL;
3352 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3353 		/* no space */
3354 		sctp_m_freem(m_notify);
3355 		return;
3356 	}
3357 	/* append to socket */
3358 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3359 	    0, 0, stcb->asoc.context, 0, 0, 0,
3360 	    m_notify);
3361 	if (control == NULL) {
3362 		/* no memory */
3363 		sctp_m_freem(m_notify);
3364 		return;
3365 	}
3366 	control->spec_flags = M_NOTIFICATION;
3367 	control->length = SCTP_BUF_LEN(m_notify);
3368 	/* not that we need this */
3369 	control->tail_mbuf = m_notify;
3370 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3371 	    control,
3372 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3373 }
3374 
3375 void
3376 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3377 {
3378 	struct mbuf *m_notify;
3379 	struct sctp_queued_to_read *control;
3380 	struct sctp_assoc_reset_event *strasoc;
3381 
3382 	if ((stcb == NULL) ||
3383 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3384 		/* event not enabled */
3385 		return;
3386 	}
3387 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3388 	if (m_notify == NULL)
3389 		/* no space left */
3390 		return;
3391 	SCTP_BUF_LEN(m_notify) = 0;
3392 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3393 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3394 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3395 	strasoc->assocreset_flags = flag;
3396 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3397 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3398 	strasoc->assocreset_local_tsn = sending_tsn;
3399 	strasoc->assocreset_remote_tsn = recv_tsn;
3400 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3401 	SCTP_BUF_NEXT(m_notify) = NULL;
3402 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3403 		/* no space */
3404 		sctp_m_freem(m_notify);
3405 		return;
3406 	}
3407 	/* append to socket */
3408 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3409 	    0, 0, stcb->asoc.context, 0, 0, 0,
3410 	    m_notify);
3411 	if (control == NULL) {
3412 		/* no memory */
3413 		sctp_m_freem(m_notify);
3414 		return;
3415 	}
3416 	control->spec_flags = M_NOTIFICATION;
3417 	control->length = SCTP_BUF_LEN(m_notify);
3418 	/* not that we need this */
3419 	control->tail_mbuf = m_notify;
3420 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3421 	    control,
3422 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3423 }
3424 
3425 
3426 
3427 static void
3428 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3429     int number_entries, uint16_t * list, int flag)
3430 {
3431 	struct mbuf *m_notify;
3432 	struct sctp_queued_to_read *control;
3433 	struct sctp_stream_reset_event *strreset;
3434 	int len;
3435 
3436 	if ((stcb == NULL) ||
3437 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3438 		/* event not enabled */
3439 		return;
3440 	}
3441 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3442 	if (m_notify == NULL)
3443 		/* no space left */
3444 		return;
3445 	SCTP_BUF_LEN(m_notify) = 0;
3446 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3447 	if (len > M_TRAILINGSPACE(m_notify)) {
3448 		/* never enough room */
3449 		sctp_m_freem(m_notify);
3450 		return;
3451 	}
3452 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3453 	memset(strreset, 0, len);
3454 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3455 	strreset->strreset_flags = flag;
3456 	strreset->strreset_length = len;
3457 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3458 	if (number_entries) {
3459 		int i;
3460 
3461 		for (i = 0; i < number_entries; i++) {
3462 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3463 		}
3464 	}
3465 	SCTP_BUF_LEN(m_notify) = len;
3466 	SCTP_BUF_NEXT(m_notify) = NULL;
3467 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3468 		/* no space */
3469 		sctp_m_freem(m_notify);
3470 		return;
3471 	}
3472 	/* append to socket */
3473 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3474 	    0, 0, stcb->asoc.context, 0, 0, 0,
3475 	    m_notify);
3476 	if (control == NULL) {
3477 		/* no memory */
3478 		sctp_m_freem(m_notify);
3479 		return;
3480 	}
3481 	control->spec_flags = M_NOTIFICATION;
3482 	control->length = SCTP_BUF_LEN(m_notify);
3483 	/* not that we need this */
3484 	control->tail_mbuf = m_notify;
3485 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3486 	    control,
3487 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3488 }
3489 
3490 
3491 static void
3492 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3493 {
3494 	struct mbuf *m_notify;
3495 	struct sctp_remote_error *sre;
3496 	struct sctp_queued_to_read *control;
3497 	size_t notif_len, chunk_len;
3498 
3499 	if ((stcb == NULL) ||
3500 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3501 		return;
3502 	}
3503 	if (chunk != NULL) {
3504 		chunk_len = ntohs(chunk->ch.chunk_length);
3505 	} else {
3506 		chunk_len = 0;
3507 	}
3508 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3509 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3510 	if (m_notify == NULL) {
3511 		/* Retry with smaller value. */
3512 		notif_len = sizeof(struct sctp_remote_error);
3513 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3514 		if (m_notify == NULL) {
3515 			return;
3516 		}
3517 	}
3518 	SCTP_BUF_NEXT(m_notify) = NULL;
3519 	sre = mtod(m_notify, struct sctp_remote_error *);
3520 	memset(sre, 0, notif_len);
3521 	sre->sre_type = SCTP_REMOTE_ERROR;
3522 	sre->sre_flags = 0;
3523 	sre->sre_length = sizeof(struct sctp_remote_error);
3524 	sre->sre_error = error;
3525 	sre->sre_assoc_id = sctp_get_associd(stcb);
3526 	if (notif_len > sizeof(struct sctp_remote_error)) {
3527 		memcpy(sre->sre_data, chunk, chunk_len);
3528 		sre->sre_length += chunk_len;
3529 	}
3530 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3531 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3532 	    0, 0, stcb->asoc.context, 0, 0, 0,
3533 	    m_notify);
3534 	if (control != NULL) {
3535 		control->length = SCTP_BUF_LEN(m_notify);
3536 		/* not that we need this */
3537 		control->tail_mbuf = m_notify;
3538 		control->spec_flags = M_NOTIFICATION;
3539 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3540 		    control,
3541 		    &stcb->sctp_socket->so_rcv, 1,
3542 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3543 	} else {
3544 		sctp_m_freem(m_notify);
3545 	}
3546 }
3547 
3548 
3549 void
3550 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3551     uint32_t error, void *data, int so_locked
3552 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3553     SCTP_UNUSED
3554 #endif
3555 )
3556 {
3557 	if ((stcb == NULL) ||
3558 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3559 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3560 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3561 		/* If the socket is gone we are out of here */
3562 		return;
3563 	}
3564 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3565 		return;
3566 	}
3567 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3568 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3569 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3570 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3571 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3572 			/* Don't report these in front states */
3573 			return;
3574 		}
3575 	}
3576 	switch (notification) {
3577 	case SCTP_NOTIFY_ASSOC_UP:
3578 		if (stcb->asoc.assoc_up_sent == 0) {
3579 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3580 			stcb->asoc.assoc_up_sent = 1;
3581 		}
3582 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3583 			sctp_notify_adaptation_layer(stcb);
3584 		}
3585 		if (stcb->asoc.auth_supported == 0) {
3586 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3587 			    NULL, so_locked);
3588 		}
3589 		break;
3590 	case SCTP_NOTIFY_ASSOC_DOWN:
3591 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3592 		break;
3593 	case SCTP_NOTIFY_INTERFACE_DOWN:
3594 		{
3595 			struct sctp_nets *net;
3596 
3597 			net = (struct sctp_nets *)data;
3598 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3599 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3600 			break;
3601 		}
3602 	case SCTP_NOTIFY_INTERFACE_UP:
3603 		{
3604 			struct sctp_nets *net;
3605 
3606 			net = (struct sctp_nets *)data;
3607 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3608 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3609 			break;
3610 		}
3611 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3612 		{
3613 			struct sctp_nets *net;
3614 
3615 			net = (struct sctp_nets *)data;
3616 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3617 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3618 			break;
3619 		}
3620 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3621 		sctp_notify_send_failed2(stcb, error,
3622 		    (struct sctp_stream_queue_pending *)data, so_locked);
3623 		break;
3624 	case SCTP_NOTIFY_SENT_DG_FAIL:
3625 		sctp_notify_send_failed(stcb, 1, error,
3626 		    (struct sctp_tmit_chunk *)data, so_locked);
3627 		break;
3628 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3629 		sctp_notify_send_failed(stcb, 0, error,
3630 		    (struct sctp_tmit_chunk *)data, so_locked);
3631 		break;
3632 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3633 		{
3634 			uint32_t val;
3635 
3636 			val = *((uint32_t *) data);
3637 
3638 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3639 			break;
3640 		}
3641 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3642 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3643 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3644 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3645 		} else {
3646 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3647 		}
3648 		break;
3649 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3650 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3651 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3652 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3653 		} else {
3654 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3655 		}
3656 		break;
3657 	case SCTP_NOTIFY_ASSOC_RESTART:
3658 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3659 		if (stcb->asoc.auth_supported == 0) {
3660 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3661 			    NULL, so_locked);
3662 		}
3663 		break;
3664 	case SCTP_NOTIFY_STR_RESET_SEND:
3665 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3666 		break;
3667 	case SCTP_NOTIFY_STR_RESET_RECV:
3668 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3669 		break;
3670 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3671 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3672 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3673 		break;
3674 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3675 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3676 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3677 		break;
3678 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3679 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3680 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3681 		break;
3682 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3683 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3684 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3685 		break;
3686 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3687 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3688 		    error, so_locked);
3689 		break;
3690 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3691 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3692 		    error, so_locked);
3693 		break;
3694 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3695 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3696 		    error, so_locked);
3697 		break;
3698 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3699 		sctp_notify_shutdown_event(stcb);
3700 		break;
3701 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3702 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3703 		    (uint16_t) (uintptr_t) data,
3704 		    so_locked);
3705 		break;
3706 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3707 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3708 		    (uint16_t) (uintptr_t) data,
3709 		    so_locked);
3710 		break;
3711 	case SCTP_NOTIFY_NO_PEER_AUTH:
3712 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3713 		    (uint16_t) (uintptr_t) data,
3714 		    so_locked);
3715 		break;
3716 	case SCTP_NOTIFY_SENDER_DRY:
3717 		sctp_notify_sender_dry_event(stcb, so_locked);
3718 		break;
3719 	case SCTP_NOTIFY_REMOTE_ERROR:
3720 		sctp_notify_remote_error(stcb, error, data);
3721 		break;
3722 	default:
3723 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3724 		    __FUNCTION__, notification, notification);
3725 		break;
3726 	}			/* end switch */
3727 }
3728 
3729 void
3730 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3731 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3732     SCTP_UNUSED
3733 #endif
3734 )
3735 {
3736 	struct sctp_association *asoc;
3737 	struct sctp_stream_out *outs;
3738 	struct sctp_tmit_chunk *chk, *nchk;
3739 	struct sctp_stream_queue_pending *sp, *nsp;
3740 	int i;
3741 
3742 	if (stcb == NULL) {
3743 		return;
3744 	}
3745 	asoc = &stcb->asoc;
3746 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3747 		/* already being freed */
3748 		return;
3749 	}
3750 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3751 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3752 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3753 		return;
3754 	}
3755 	/* now through all the gunk freeing chunks */
3756 	if (holds_lock == 0) {
3757 		SCTP_TCB_SEND_LOCK(stcb);
3758 	}
3759 	/* sent queue SHOULD be empty */
3760 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3761 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3762 		asoc->sent_queue_cnt--;
3763 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3764 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3765 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3766 #ifdef INVARIANTS
3767 			} else {
3768 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3769 #endif
3770 			}
3771 		}
3772 		if (chk->data != NULL) {
3773 			sctp_free_bufspace(stcb, asoc, chk, 1);
3774 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3775 			    error, chk, so_locked);
3776 			if (chk->data) {
3777 				sctp_m_freem(chk->data);
3778 				chk->data = NULL;
3779 			}
3780 		}
3781 		sctp_free_a_chunk(stcb, chk, so_locked);
3782 		/* sa_ignore FREED_MEMORY */
3783 	}
3784 	/* pending send queue SHOULD be empty */
3785 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3786 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3787 		asoc->send_queue_cnt--;
3788 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3789 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3790 #ifdef INVARIANTS
3791 		} else {
3792 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3793 #endif
3794 		}
3795 		if (chk->data != NULL) {
3796 			sctp_free_bufspace(stcb, asoc, chk, 1);
3797 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3798 			    error, chk, so_locked);
3799 			if (chk->data) {
3800 				sctp_m_freem(chk->data);
3801 				chk->data = NULL;
3802 			}
3803 		}
3804 		sctp_free_a_chunk(stcb, chk, so_locked);
3805 		/* sa_ignore FREED_MEMORY */
3806 	}
3807 	for (i = 0; i < asoc->streamoutcnt; i++) {
3808 		/* For each stream */
3809 		outs = &asoc->strmout[i];
3810 		/* clean up any sends there */
3811 		asoc->locked_on_sending = NULL;
3812 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3813 			asoc->stream_queue_cnt--;
3814 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3815 			sctp_free_spbufspace(stcb, asoc, sp);
3816 			if (sp->data) {
3817 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3818 				    error, (void *)sp, so_locked);
3819 				if (sp->data) {
3820 					sctp_m_freem(sp->data);
3821 					sp->data = NULL;
3822 					sp->tail_mbuf = NULL;
3823 					sp->length = 0;
3824 				}
3825 			}
3826 			if (sp->net) {
3827 				sctp_free_remote_addr(sp->net);
3828 				sp->net = NULL;
3829 			}
3830 			/* Free the chunk */
3831 			sctp_free_a_strmoq(stcb, sp, so_locked);
3832 			/* sa_ignore FREED_MEMORY */
3833 		}
3834 	}
3835 
3836 	if (holds_lock == 0) {
3837 		SCTP_TCB_SEND_UNLOCK(stcb);
3838 	}
3839 }
3840 
3841 void
3842 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3843     struct sctp_abort_chunk *abort, int so_locked
3844 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3845     SCTP_UNUSED
3846 #endif
3847 )
3848 {
3849 	if (stcb == NULL) {
3850 		return;
3851 	}
3852 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3853 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3854 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3855 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3856 	}
3857 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3858 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3859 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3860 		return;
3861 	}
3862 	/* Tell them we lost the asoc */
3863 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3864 	if (from_peer) {
3865 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3866 	} else {
3867 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3868 	}
3869 }
3870 
3871 void
3872 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3873     struct mbuf *m, int iphlen,
3874     struct sockaddr *src, struct sockaddr *dst,
3875     struct sctphdr *sh, struct mbuf *op_err,
3876     uint8_t mflowtype, uint32_t mflowid,
3877     uint32_t vrf_id, uint16_t port)
3878 {
3879 	uint32_t vtag;
3880 
3881 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3882 	struct socket *so;
3883 
3884 #endif
3885 
3886 	vtag = 0;
3887 	if (stcb != NULL) {
3888 		/* We have a TCB to abort, send notification too */
3889 		vtag = stcb->asoc.peer_vtag;
3890 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3891 		/* get the assoc vrf id and table id */
3892 		vrf_id = stcb->asoc.vrf_id;
3893 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3894 	}
3895 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3896 	    mflowtype, mflowid,
3897 	    vrf_id, port);
3898 	if (stcb != NULL) {
3899 		/* Ok, now lets free it */
3900 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3901 		so = SCTP_INP_SO(inp);
3902 		atomic_add_int(&stcb->asoc.refcnt, 1);
3903 		SCTP_TCB_UNLOCK(stcb);
3904 		SCTP_SOCKET_LOCK(so, 1);
3905 		SCTP_TCB_LOCK(stcb);
3906 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3907 #endif
3908 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3909 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3910 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3911 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3912 		}
3913 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3914 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3915 		SCTP_SOCKET_UNLOCK(so, 1);
3916 #endif
3917 	}
3918 }
3919 
3920 #ifdef SCTP_ASOCLOG_OF_TSNS
3921 void
3922 sctp_print_out_track_log(struct sctp_tcb *stcb)
3923 {
3924 #ifdef NOSIY_PRINTS
3925 	int i;
3926 
3927 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3928 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3929 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3930 		SCTP_PRINTF("None rcvd\n");
3931 		goto none_in;
3932 	}
3933 	if (stcb->asoc.tsn_in_wrapped) {
3934 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3935 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3936 			    stcb->asoc.in_tsnlog[i].tsn,
3937 			    stcb->asoc.in_tsnlog[i].strm,
3938 			    stcb->asoc.in_tsnlog[i].seq,
3939 			    stcb->asoc.in_tsnlog[i].flgs,
3940 			    stcb->asoc.in_tsnlog[i].sz);
3941 		}
3942 	}
3943 	if (stcb->asoc.tsn_in_at) {
3944 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3945 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3946 			    stcb->asoc.in_tsnlog[i].tsn,
3947 			    stcb->asoc.in_tsnlog[i].strm,
3948 			    stcb->asoc.in_tsnlog[i].seq,
3949 			    stcb->asoc.in_tsnlog[i].flgs,
3950 			    stcb->asoc.in_tsnlog[i].sz);
3951 		}
3952 	}
3953 none_in:
3954 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3955 	if ((stcb->asoc.tsn_out_at == 0) &&
3956 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3957 		SCTP_PRINTF("None sent\n");
3958 	}
3959 	if (stcb->asoc.tsn_out_wrapped) {
3960 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3961 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3962 			    stcb->asoc.out_tsnlog[i].tsn,
3963 			    stcb->asoc.out_tsnlog[i].strm,
3964 			    stcb->asoc.out_tsnlog[i].seq,
3965 			    stcb->asoc.out_tsnlog[i].flgs,
3966 			    stcb->asoc.out_tsnlog[i].sz);
3967 		}
3968 	}
3969 	if (stcb->asoc.tsn_out_at) {
3970 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3971 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3972 			    stcb->asoc.out_tsnlog[i].tsn,
3973 			    stcb->asoc.out_tsnlog[i].strm,
3974 			    stcb->asoc.out_tsnlog[i].seq,
3975 			    stcb->asoc.out_tsnlog[i].flgs,
3976 			    stcb->asoc.out_tsnlog[i].sz);
3977 		}
3978 	}
3979 #endif
3980 }
3981 
3982 #endif
3983 
3984 void
3985 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3986     struct mbuf *op_err,
3987     int so_locked
3988 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3989     SCTP_UNUSED
3990 #endif
3991 )
3992 {
3993 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3994 	struct socket *so;
3995 
3996 #endif
3997 
3998 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3999 	so = SCTP_INP_SO(inp);
4000 #endif
4001 	if (stcb == NULL) {
4002 		/* Got to have a TCB */
4003 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4004 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4005 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4006 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4007 			}
4008 		}
4009 		return;
4010 	} else {
4011 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4012 	}
4013 	/* notify the ulp */
4014 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4015 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4016 	}
4017 	/* notify the peer */
4018 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4019 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4020 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4021 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4022 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4023 	}
4024 	/* now free the asoc */
4025 #ifdef SCTP_ASOCLOG_OF_TSNS
4026 	sctp_print_out_track_log(stcb);
4027 #endif
4028 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4029 	if (!so_locked) {
4030 		atomic_add_int(&stcb->asoc.refcnt, 1);
4031 		SCTP_TCB_UNLOCK(stcb);
4032 		SCTP_SOCKET_LOCK(so, 1);
4033 		SCTP_TCB_LOCK(stcb);
4034 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4035 	}
4036 #endif
4037 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4038 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4039 	if (!so_locked) {
4040 		SCTP_SOCKET_UNLOCK(so, 1);
4041 	}
4042 #endif
4043 }
4044 
4045 void
4046 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4047     struct sockaddr *src, struct sockaddr *dst,
4048     struct sctphdr *sh, struct sctp_inpcb *inp,
4049     struct mbuf *cause,
4050     uint8_t mflowtype, uint32_t mflowid,
4051     uint32_t vrf_id, uint16_t port)
4052 {
4053 	struct sctp_chunkhdr *ch, chunk_buf;
4054 	unsigned int chk_length;
4055 	int contains_init_chunk;
4056 
4057 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4058 	/* Generate a TO address for future reference */
4059 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4060 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4061 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4062 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4063 		}
4064 	}
4065 	contains_init_chunk = 0;
4066 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4067 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4068 	while (ch != NULL) {
4069 		chk_length = ntohs(ch->chunk_length);
4070 		if (chk_length < sizeof(*ch)) {
4071 			/* break to abort land */
4072 			break;
4073 		}
4074 		switch (ch->chunk_type) {
4075 		case SCTP_INIT:
4076 			contains_init_chunk = 1;
4077 			break;
4078 		case SCTP_PACKET_DROPPED:
4079 			/* we don't respond to pkt-dropped */
4080 			return;
4081 		case SCTP_ABORT_ASSOCIATION:
4082 			/* we don't respond with an ABORT to an ABORT */
4083 			return;
4084 		case SCTP_SHUTDOWN_COMPLETE:
4085 			/*
4086 			 * we ignore it since we are not waiting for it and
4087 			 * peer is gone
4088 			 */
4089 			return;
4090 		case SCTP_SHUTDOWN_ACK:
4091 			sctp_send_shutdown_complete2(src, dst, sh,
4092 			    mflowtype, mflowid,
4093 			    vrf_id, port);
4094 			return;
4095 		default:
4096 			break;
4097 		}
4098 		offset += SCTP_SIZE32(chk_length);
4099 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4100 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4101 	}
4102 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4103 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4104 	    (contains_init_chunk == 0))) {
4105 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4106 		    mflowtype, mflowid,
4107 		    vrf_id, port);
4108 	}
4109 }
4110 
4111 /*
4112  * check the inbound datagram to make sure there is not an abort inside it,
4113  * if there is return 1, else return 0.
4114  */
4115 int
4116 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4117 {
4118 	struct sctp_chunkhdr *ch;
4119 	struct sctp_init_chunk *init_chk, chunk_buf;
4120 	int offset;
4121 	unsigned int chk_length;
4122 
4123 	offset = iphlen + sizeof(struct sctphdr);
4124 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4125 	    (uint8_t *) & chunk_buf);
4126 	while (ch != NULL) {
4127 		chk_length = ntohs(ch->chunk_length);
4128 		if (chk_length < sizeof(*ch)) {
4129 			/* packet is probably corrupt */
4130 			break;
4131 		}
4132 		/* we seem to be ok, is it an abort? */
4133 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4134 			/* yep, tell them */
4135 			return (1);
4136 		}
4137 		if (ch->chunk_type == SCTP_INITIATION) {
4138 			/* need to update the Vtag */
4139 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4140 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4141 			if (init_chk != NULL) {
4142 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4143 			}
4144 		}
4145 		/* Nope, move to the next chunk */
4146 		offset += SCTP_SIZE32(chk_length);
4147 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4148 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4149 	}
4150 	return (0);
4151 }
4152 
4153 /*
4154  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4155  * set (i.e. it's 0) so, create this function to compare link local scopes
4156  */
4157 #ifdef INET6
4158 uint32_t
4159 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4160 {
4161 	struct sockaddr_in6 a, b;
4162 
4163 	/* save copies */
4164 	a = *addr1;
4165 	b = *addr2;
4166 
4167 	if (a.sin6_scope_id == 0)
4168 		if (sa6_recoverscope(&a)) {
4169 			/* can't get scope, so can't match */
4170 			return (0);
4171 		}
4172 	if (b.sin6_scope_id == 0)
4173 		if (sa6_recoverscope(&b)) {
4174 			/* can't get scope, so can't match */
4175 			return (0);
4176 		}
4177 	if (a.sin6_scope_id != b.sin6_scope_id)
4178 		return (0);
4179 
4180 	return (1);
4181 }
4182 
4183 /*
4184  * returns a sockaddr_in6 with embedded scope recovered and removed
4185  */
4186 struct sockaddr_in6 *
4187 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4188 {
4189 	/* check and strip embedded scope junk */
4190 	if (addr->sin6_family == AF_INET6) {
4191 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4192 			if (addr->sin6_scope_id == 0) {
4193 				*store = *addr;
4194 				if (!sa6_recoverscope(store)) {
4195 					/* use the recovered scope */
4196 					addr = store;
4197 				}
4198 			} else {
4199 				/* else, return the original "to" addr */
4200 				in6_clearscope(&addr->sin6_addr);
4201 			}
4202 		}
4203 	}
4204 	return (addr);
4205 }
4206 
4207 #endif
4208 
4209 /*
4210  * are the two addresses the same?  currently a "scopeless" check returns: 1
4211  * if same, 0 if not
4212  */
4213 int
4214 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4215 {
4216 
4217 	/* must be valid */
4218 	if (sa1 == NULL || sa2 == NULL)
4219 		return (0);
4220 
4221 	/* must be the same family */
4222 	if (sa1->sa_family != sa2->sa_family)
4223 		return (0);
4224 
4225 	switch (sa1->sa_family) {
4226 #ifdef INET6
4227 	case AF_INET6:
4228 		{
4229 			/* IPv6 addresses */
4230 			struct sockaddr_in6 *sin6_1, *sin6_2;
4231 
4232 			sin6_1 = (struct sockaddr_in6 *)sa1;
4233 			sin6_2 = (struct sockaddr_in6 *)sa2;
4234 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4235 			    sin6_2));
4236 		}
4237 #endif
4238 #ifdef INET
4239 	case AF_INET:
4240 		{
4241 			/* IPv4 addresses */
4242 			struct sockaddr_in *sin_1, *sin_2;
4243 
4244 			sin_1 = (struct sockaddr_in *)sa1;
4245 			sin_2 = (struct sockaddr_in *)sa2;
4246 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4247 		}
4248 #endif
4249 	default:
4250 		/* we don't do these... */
4251 		return (0);
4252 	}
4253 }
4254 
4255 void
4256 sctp_print_address(struct sockaddr *sa)
4257 {
4258 #ifdef INET6
4259 	char ip6buf[INET6_ADDRSTRLEN];
4260 
4261 #endif
4262 
4263 	switch (sa->sa_family) {
4264 #ifdef INET6
4265 	case AF_INET6:
4266 		{
4267 			struct sockaddr_in6 *sin6;
4268 
4269 			sin6 = (struct sockaddr_in6 *)sa;
4270 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4271 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4272 			    ntohs(sin6->sin6_port),
4273 			    sin6->sin6_scope_id);
4274 			break;
4275 		}
4276 #endif
4277 #ifdef INET
4278 	case AF_INET:
4279 		{
4280 			struct sockaddr_in *sin;
4281 			unsigned char *p;
4282 
4283 			sin = (struct sockaddr_in *)sa;
4284 			p = (unsigned char *)&sin->sin_addr;
4285 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4286 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4287 			break;
4288 		}
4289 #endif
4290 	default:
4291 		SCTP_PRINTF("?\n");
4292 		break;
4293 	}
4294 }
4295 
4296 void
4297 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4298     struct sctp_inpcb *new_inp,
4299     struct sctp_tcb *stcb,
4300     int waitflags)
4301 {
4302 	/*
4303 	 * go through our old INP and pull off any control structures that
4304 	 * belong to stcb and move then to the new inp.
4305 	 */
4306 	struct socket *old_so, *new_so;
4307 	struct sctp_queued_to_read *control, *nctl;
4308 	struct sctp_readhead tmp_queue;
4309 	struct mbuf *m;
4310 	int error = 0;
4311 
4312 	old_so = old_inp->sctp_socket;
4313 	new_so = new_inp->sctp_socket;
4314 	TAILQ_INIT(&tmp_queue);
4315 	error = sblock(&old_so->so_rcv, waitflags);
4316 	if (error) {
4317 		/*
4318 		 * Gak, can't get sblock, we have a problem. data will be
4319 		 * left stranded.. and we don't dare look at it since the
4320 		 * other thread may be reading something. Oh well, its a
4321 		 * screwed up app that does a peeloff OR a accept while
4322 		 * reading from the main socket... actually its only the
4323 		 * peeloff() case, since I think read will fail on a
4324 		 * listening socket..
4325 		 */
4326 		return;
4327 	}
4328 	/* lock the socket buffers */
4329 	SCTP_INP_READ_LOCK(old_inp);
4330 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4331 		/* Pull off all for out target stcb */
4332 		if (control->stcb == stcb) {
4333 			/* remove it we want it */
4334 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4335 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4336 			m = control->data;
4337 			while (m) {
4338 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4339 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4340 				}
4341 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4342 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4343 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4344 				}
4345 				m = SCTP_BUF_NEXT(m);
4346 			}
4347 		}
4348 	}
4349 	SCTP_INP_READ_UNLOCK(old_inp);
4350 	/* Remove the sb-lock on the old socket */
4351 
4352 	sbunlock(&old_so->so_rcv);
4353 	/* Now we move them over to the new socket buffer */
4354 	SCTP_INP_READ_LOCK(new_inp);
4355 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4356 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4357 		m = control->data;
4358 		while (m) {
4359 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4360 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4361 			}
4362 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4363 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4364 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4365 			}
4366 			m = SCTP_BUF_NEXT(m);
4367 		}
4368 	}
4369 	SCTP_INP_READ_UNLOCK(new_inp);
4370 }
4371 
4372 void
4373 sctp_add_to_readq(struct sctp_inpcb *inp,
4374     struct sctp_tcb *stcb,
4375     struct sctp_queued_to_read *control,
4376     struct sockbuf *sb,
4377     int end,
4378     int inp_read_lock_held,
4379     int so_locked
4380 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4381     SCTP_UNUSED
4382 #endif
4383 )
4384 {
4385 	/*
4386 	 * Here we must place the control on the end of the socket read
4387 	 * queue AND increment sb_cc so that select will work properly on
4388 	 * read.
4389 	 */
4390 	struct mbuf *m, *prev = NULL;
4391 
4392 	if (inp == NULL) {
4393 		/* Gak, TSNH!! */
4394 #ifdef INVARIANTS
4395 		panic("Gak, inp NULL on add_to_readq");
4396 #endif
4397 		return;
4398 	}
4399 	if (inp_read_lock_held == 0)
4400 		SCTP_INP_READ_LOCK(inp);
4401 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4402 		sctp_free_remote_addr(control->whoFrom);
4403 		if (control->data) {
4404 			sctp_m_freem(control->data);
4405 			control->data = NULL;
4406 		}
4407 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4408 		if (inp_read_lock_held == 0)
4409 			SCTP_INP_READ_UNLOCK(inp);
4410 		return;
4411 	}
4412 	if (!(control->spec_flags & M_NOTIFICATION)) {
4413 		atomic_add_int(&inp->total_recvs, 1);
4414 		if (!control->do_not_ref_stcb) {
4415 			atomic_add_int(&stcb->total_recvs, 1);
4416 		}
4417 	}
4418 	m = control->data;
4419 	control->held_length = 0;
4420 	control->length = 0;
4421 	while (m) {
4422 		if (SCTP_BUF_LEN(m) == 0) {
4423 			/* Skip mbufs with NO length */
4424 			if (prev == NULL) {
4425 				/* First one */
4426 				control->data = sctp_m_free(m);
4427 				m = control->data;
4428 			} else {
4429 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4430 				m = SCTP_BUF_NEXT(prev);
4431 			}
4432 			if (m == NULL) {
4433 				control->tail_mbuf = prev;
4434 			}
4435 			continue;
4436 		}
4437 		prev = m;
4438 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4439 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4440 		}
4441 		sctp_sballoc(stcb, sb, m);
4442 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4443 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4444 		}
4445 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4446 		m = SCTP_BUF_NEXT(m);
4447 	}
4448 	if (prev != NULL) {
4449 		control->tail_mbuf = prev;
4450 	} else {
4451 		/* Everything got collapsed out?? */
4452 		sctp_free_remote_addr(control->whoFrom);
4453 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4454 		if (inp_read_lock_held == 0)
4455 			SCTP_INP_READ_UNLOCK(inp);
4456 		return;
4457 	}
4458 	if (end) {
4459 		control->end_added = 1;
4460 	}
4461 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4462 	if (inp_read_lock_held == 0)
4463 		SCTP_INP_READ_UNLOCK(inp);
4464 	if (inp && inp->sctp_socket) {
4465 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4466 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4467 		} else {
4468 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4469 			struct socket *so;
4470 
4471 			so = SCTP_INP_SO(inp);
4472 			if (!so_locked) {
4473 				if (stcb) {
4474 					atomic_add_int(&stcb->asoc.refcnt, 1);
4475 					SCTP_TCB_UNLOCK(stcb);
4476 				}
4477 				SCTP_SOCKET_LOCK(so, 1);
4478 				if (stcb) {
4479 					SCTP_TCB_LOCK(stcb);
4480 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4481 				}
4482 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4483 					SCTP_SOCKET_UNLOCK(so, 1);
4484 					return;
4485 				}
4486 			}
4487 #endif
4488 			sctp_sorwakeup(inp, inp->sctp_socket);
4489 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4490 			if (!so_locked) {
4491 				SCTP_SOCKET_UNLOCK(so, 1);
4492 			}
4493 #endif
4494 		}
4495 	}
4496 }
4497 
4498 
4499 int
4500 sctp_append_to_readq(struct sctp_inpcb *inp,
4501     struct sctp_tcb *stcb,
4502     struct sctp_queued_to_read *control,
4503     struct mbuf *m,
4504     int end,
4505     int ctls_cumack,
4506     struct sockbuf *sb)
4507 {
4508 	/*
4509 	 * A partial delivery API event is underway. OR we are appending on
4510 	 * the reassembly queue.
4511 	 *
4512 	 * If PDAPI this means we need to add m to the end of the data.
4513 	 * Increase the length in the control AND increment the sb_cc.
4514 	 * Otherwise sb is NULL and all we need to do is put it at the end
4515 	 * of the mbuf chain.
4516 	 */
4517 	int len = 0;
4518 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4519 
4520 	if (inp) {
4521 		SCTP_INP_READ_LOCK(inp);
4522 	}
4523 	if (control == NULL) {
4524 get_out:
4525 		if (inp) {
4526 			SCTP_INP_READ_UNLOCK(inp);
4527 		}
4528 		return (-1);
4529 	}
4530 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4531 		SCTP_INP_READ_UNLOCK(inp);
4532 		return (0);
4533 	}
4534 	if (control->end_added) {
4535 		/* huh this one is complete? */
4536 		goto get_out;
4537 	}
4538 	mm = m;
4539 	if (mm == NULL) {
4540 		goto get_out;
4541 	}
4542 	while (mm) {
4543 		if (SCTP_BUF_LEN(mm) == 0) {
4544 			/* Skip mbufs with NO lenght */
4545 			if (prev == NULL) {
4546 				/* First one */
4547 				m = sctp_m_free(mm);
4548 				mm = m;
4549 			} else {
4550 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4551 				mm = SCTP_BUF_NEXT(prev);
4552 			}
4553 			continue;
4554 		}
4555 		prev = mm;
4556 		len += SCTP_BUF_LEN(mm);
4557 		if (sb) {
4558 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4559 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4560 			}
4561 			sctp_sballoc(stcb, sb, mm);
4562 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4563 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4564 			}
4565 		}
4566 		mm = SCTP_BUF_NEXT(mm);
4567 	}
4568 	if (prev) {
4569 		tail = prev;
4570 	} else {
4571 		/* Really there should always be a prev */
4572 		if (m == NULL) {
4573 			/* Huh nothing left? */
4574 #ifdef INVARIANTS
4575 			panic("Nothing left to add?");
4576 #else
4577 			goto get_out;
4578 #endif
4579 		}
4580 		tail = m;
4581 	}
4582 	if (control->tail_mbuf) {
4583 		/* append */
4584 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4585 		control->tail_mbuf = tail;
4586 	} else {
4587 		/* nothing there */
4588 #ifdef INVARIANTS
4589 		if (control->data != NULL) {
4590 			panic("This should NOT happen");
4591 		}
4592 #endif
4593 		control->data = m;
4594 		control->tail_mbuf = tail;
4595 	}
4596 	atomic_add_int(&control->length, len);
4597 	if (end) {
4598 		/* message is complete */
4599 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4600 			stcb->asoc.control_pdapi = NULL;
4601 		}
4602 		control->held_length = 0;
4603 		control->end_added = 1;
4604 	}
4605 	if (stcb == NULL) {
4606 		control->do_not_ref_stcb = 1;
4607 	}
4608 	/*
4609 	 * When we are appending in partial delivery, the cum-ack is used
4610 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4611 	 * is populated in the outbound sinfo structure from the true cumack
4612 	 * if the association exists...
4613 	 */
4614 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4615 	if (inp) {
4616 		SCTP_INP_READ_UNLOCK(inp);
4617 	}
4618 	if (inp && inp->sctp_socket) {
4619 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4620 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4621 		} else {
4622 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4623 			struct socket *so;
4624 
4625 			so = SCTP_INP_SO(inp);
4626 			if (stcb) {
4627 				atomic_add_int(&stcb->asoc.refcnt, 1);
4628 				SCTP_TCB_UNLOCK(stcb);
4629 			}
4630 			SCTP_SOCKET_LOCK(so, 1);
4631 			if (stcb) {
4632 				SCTP_TCB_LOCK(stcb);
4633 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4634 			}
4635 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4636 				SCTP_SOCKET_UNLOCK(so, 1);
4637 				return (0);
4638 			}
4639 #endif
4640 			sctp_sorwakeup(inp, inp->sctp_socket);
4641 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4642 			SCTP_SOCKET_UNLOCK(so, 1);
4643 #endif
4644 		}
4645 	}
4646 	return (0);
4647 }
4648 
4649 
4650 
4651 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4652  *************ALTERNATE ROUTING CODE
4653  */
4654 
4655 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4656  *************ALTERNATE ROUTING CODE
4657  */
4658 
4659 struct mbuf *
4660 sctp_generate_cause(uint16_t code, char *info)
4661 {
4662 	struct mbuf *m;
4663 	struct sctp_gen_error_cause *cause;
4664 	size_t info_len, len;
4665 
4666 	if ((code == 0) || (info == NULL)) {
4667 		return (NULL);
4668 	}
4669 	info_len = strlen(info);
4670 	len = sizeof(struct sctp_paramhdr) + info_len;
4671 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4672 	if (m != NULL) {
4673 		SCTP_BUF_LEN(m) = len;
4674 		cause = mtod(m, struct sctp_gen_error_cause *);
4675 		cause->code = htons(code);
4676 		cause->length = htons((uint16_t) len);
4677 		memcpy(cause->info, info, info_len);
4678 	}
4679 	return (m);
4680 }
4681 
4682 struct mbuf *
4683 sctp_generate_no_user_data_cause(uint32_t tsn)
4684 {
4685 	struct mbuf *m;
4686 	struct sctp_error_no_user_data *no_user_data_cause;
4687 	size_t len;
4688 
4689 	len = sizeof(struct sctp_error_no_user_data);
4690 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4691 	if (m != NULL) {
4692 		SCTP_BUF_LEN(m) = len;
4693 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4694 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4695 		no_user_data_cause->cause.length = htons((uint16_t) len);
4696 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4697 	}
4698 	return (m);
4699 }
4700 
4701 #ifdef SCTP_MBCNT_LOGGING
4702 void
4703 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4704     struct sctp_tmit_chunk *tp1, int chk_cnt)
4705 {
4706 	if (tp1->data == NULL) {
4707 		return;
4708 	}
4709 	asoc->chunks_on_out_queue -= chk_cnt;
4710 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4711 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4712 		    asoc->total_output_queue_size,
4713 		    tp1->book_size,
4714 		    0,
4715 		    tp1->mbcnt);
4716 	}
4717 	if (asoc->total_output_queue_size >= tp1->book_size) {
4718 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4719 	} else {
4720 		asoc->total_output_queue_size = 0;
4721 	}
4722 
4723 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4724 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4725 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4726 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4727 		} else {
4728 			stcb->sctp_socket->so_snd.sb_cc = 0;
4729 
4730 		}
4731 	}
4732 }
4733 
4734 #endif
4735 
4736 int
4737 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4738     uint8_t sent, int so_locked
4739 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4740     SCTP_UNUSED
4741 #endif
4742 )
4743 {
4744 	struct sctp_stream_out *strq;
4745 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4746 	struct sctp_stream_queue_pending *sp;
4747 	uint16_t stream = 0, seq = 0;
4748 	uint8_t foundeom = 0;
4749 	int ret_sz = 0;
4750 	int notdone;
4751 	int do_wakeup_routine = 0;
4752 
4753 	stream = tp1->rec.data.stream_number;
4754 	seq = tp1->rec.data.stream_seq;
4755 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4756 		stcb->asoc.abandoned_sent[0]++;
4757 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4758 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4759 #if defined(SCTP_DETAILED_STR_STATS)
4760 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4761 #endif
4762 	} else {
4763 		stcb->asoc.abandoned_unsent[0]++;
4764 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4765 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4766 #if defined(SCTP_DETAILED_STR_STATS)
4767 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4768 #endif
4769 	}
4770 	do {
4771 		ret_sz += tp1->book_size;
4772 		if (tp1->data != NULL) {
4773 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4774 				sctp_flight_size_decrease(tp1);
4775 				sctp_total_flight_decrease(stcb, tp1);
4776 			}
4777 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4778 			stcb->asoc.peers_rwnd += tp1->send_size;
4779 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4780 			if (sent) {
4781 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4782 			} else {
4783 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4784 			}
4785 			if (tp1->data) {
4786 				sctp_m_freem(tp1->data);
4787 				tp1->data = NULL;
4788 			}
4789 			do_wakeup_routine = 1;
4790 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4791 				stcb->asoc.sent_queue_cnt_removeable--;
4792 			}
4793 		}
4794 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4795 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4796 		    SCTP_DATA_NOT_FRAG) {
4797 			/* not frag'ed we ae done   */
4798 			notdone = 0;
4799 			foundeom = 1;
4800 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4801 			/* end of frag, we are done */
4802 			notdone = 0;
4803 			foundeom = 1;
4804 		} else {
4805 			/*
4806 			 * Its a begin or middle piece, we must mark all of
4807 			 * it
4808 			 */
4809 			notdone = 1;
4810 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4811 		}
4812 	} while (tp1 && notdone);
4813 	if (foundeom == 0) {
4814 		/*
4815 		 * The multi-part message was scattered across the send and
4816 		 * sent queue.
4817 		 */
4818 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4819 			if ((tp1->rec.data.stream_number != stream) ||
4820 			    (tp1->rec.data.stream_seq != seq)) {
4821 				break;
4822 			}
4823 			/*
4824 			 * save to chk in case we have some on stream out
4825 			 * queue. If so and we have an un-transmitted one we
4826 			 * don't have to fudge the TSN.
4827 			 */
4828 			chk = tp1;
4829 			ret_sz += tp1->book_size;
4830 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4831 			if (sent) {
4832 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4833 			} else {
4834 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4835 			}
4836 			if (tp1->data) {
4837 				sctp_m_freem(tp1->data);
4838 				tp1->data = NULL;
4839 			}
4840 			/* No flight involved here book the size to 0 */
4841 			tp1->book_size = 0;
4842 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4843 				foundeom = 1;
4844 			}
4845 			do_wakeup_routine = 1;
4846 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4847 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4848 			/*
4849 			 * on to the sent queue so we can wait for it to be
4850 			 * passed by.
4851 			 */
4852 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4853 			    sctp_next);
4854 			stcb->asoc.send_queue_cnt--;
4855 			stcb->asoc.sent_queue_cnt++;
4856 		}
4857 	}
4858 	if (foundeom == 0) {
4859 		/*
4860 		 * Still no eom found. That means there is stuff left on the
4861 		 * stream out queue.. yuck.
4862 		 */
4863 		SCTP_TCB_SEND_LOCK(stcb);
4864 		strq = &stcb->asoc.strmout[stream];
4865 		sp = TAILQ_FIRST(&strq->outqueue);
4866 		if (sp != NULL) {
4867 			sp->discard_rest = 1;
4868 			/*
4869 			 * We may need to put a chunk on the queue that
4870 			 * holds the TSN that would have been sent with the
4871 			 * LAST bit.
4872 			 */
4873 			if (chk == NULL) {
4874 				/* Yep, we have to */
4875 				sctp_alloc_a_chunk(stcb, chk);
4876 				if (chk == NULL) {
4877 					/*
4878 					 * we are hosed. All we can do is
4879 					 * nothing.. which will cause an
4880 					 * abort if the peer is paying
4881 					 * attention.
4882 					 */
4883 					goto oh_well;
4884 				}
4885 				memset(chk, 0, sizeof(*chk));
4886 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4887 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4888 				chk->asoc = &stcb->asoc;
4889 				chk->rec.data.stream_seq = strq->next_sequence_send;
4890 				chk->rec.data.stream_number = sp->stream;
4891 				chk->rec.data.payloadtype = sp->ppid;
4892 				chk->rec.data.context = sp->context;
4893 				chk->flags = sp->act_flags;
4894 				chk->whoTo = NULL;
4895 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4896 				strq->chunks_on_queues++;
4897 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4898 				stcb->asoc.sent_queue_cnt++;
4899 				stcb->asoc.pr_sctp_cnt++;
4900 			} else {
4901 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4902 			}
4903 			strq->next_sequence_send++;
4904 	oh_well:
4905 			if (sp->data) {
4906 				/*
4907 				 * Pull any data to free up the SB and allow
4908 				 * sender to "add more" while we will throw
4909 				 * away :-)
4910 				 */
4911 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4912 				ret_sz += sp->length;
4913 				do_wakeup_routine = 1;
4914 				sp->some_taken = 1;
4915 				sctp_m_freem(sp->data);
4916 				sp->data = NULL;
4917 				sp->tail_mbuf = NULL;
4918 				sp->length = 0;
4919 			}
4920 		}
4921 		SCTP_TCB_SEND_UNLOCK(stcb);
4922 	}
4923 	if (do_wakeup_routine) {
4924 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4925 		struct socket *so;
4926 
4927 		so = SCTP_INP_SO(stcb->sctp_ep);
4928 		if (!so_locked) {
4929 			atomic_add_int(&stcb->asoc.refcnt, 1);
4930 			SCTP_TCB_UNLOCK(stcb);
4931 			SCTP_SOCKET_LOCK(so, 1);
4932 			SCTP_TCB_LOCK(stcb);
4933 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4934 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4935 				/* assoc was freed while we were unlocked */
4936 				SCTP_SOCKET_UNLOCK(so, 1);
4937 				return (ret_sz);
4938 			}
4939 		}
4940 #endif
4941 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4942 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4943 		if (!so_locked) {
4944 			SCTP_SOCKET_UNLOCK(so, 1);
4945 		}
4946 #endif
4947 	}
4948 	return (ret_sz);
4949 }
4950 
4951 /*
4952  * checks to see if the given address, sa, is one that is currently known by
4953  * the kernel note: can't distinguish the same address on multiple interfaces
4954  * and doesn't handle multiple addresses with different zone/scope id's note:
4955  * ifa_ifwithaddr() compares the entire sockaddr struct
4956  */
4957 struct sctp_ifa *
4958 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4959     int holds_lock)
4960 {
4961 	struct sctp_laddr *laddr;
4962 
4963 	if (holds_lock == 0) {
4964 		SCTP_INP_RLOCK(inp);
4965 	}
4966 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4967 		if (laddr->ifa == NULL)
4968 			continue;
4969 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4970 			continue;
4971 #ifdef INET
4972 		if (addr->sa_family == AF_INET) {
4973 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4974 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4975 				/* found him. */
4976 				if (holds_lock == 0) {
4977 					SCTP_INP_RUNLOCK(inp);
4978 				}
4979 				return (laddr->ifa);
4980 				break;
4981 			}
4982 		}
4983 #endif
4984 #ifdef INET6
4985 		if (addr->sa_family == AF_INET6) {
4986 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4987 			    &laddr->ifa->address.sin6)) {
4988 				/* found him. */
4989 				if (holds_lock == 0) {
4990 					SCTP_INP_RUNLOCK(inp);
4991 				}
4992 				return (laddr->ifa);
4993 				break;
4994 			}
4995 		}
4996 #endif
4997 	}
4998 	if (holds_lock == 0) {
4999 		SCTP_INP_RUNLOCK(inp);
5000 	}
5001 	return (NULL);
5002 }
5003 
5004 uint32_t
5005 sctp_get_ifa_hash_val(struct sockaddr *addr)
5006 {
5007 	switch (addr->sa_family) {
5008 #ifdef INET
5009 	case AF_INET:
5010 		{
5011 			struct sockaddr_in *sin;
5012 
5013 			sin = (struct sockaddr_in *)addr;
5014 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5015 		}
5016 #endif
5017 #ifdef INET6
5018 	case AF_INET6:
5019 		{
5020 			struct sockaddr_in6 *sin6;
5021 			uint32_t hash_of_addr;
5022 
5023 			sin6 = (struct sockaddr_in6 *)addr;
5024 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5025 			    sin6->sin6_addr.s6_addr32[1] +
5026 			    sin6->sin6_addr.s6_addr32[2] +
5027 			    sin6->sin6_addr.s6_addr32[3]);
5028 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5029 			return (hash_of_addr);
5030 		}
5031 #endif
5032 	default:
5033 		break;
5034 	}
5035 	return (0);
5036 }
5037 
5038 struct sctp_ifa *
5039 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5040 {
5041 	struct sctp_ifa *sctp_ifap;
5042 	struct sctp_vrf *vrf;
5043 	struct sctp_ifalist *hash_head;
5044 	uint32_t hash_of_addr;
5045 
5046 	if (holds_lock == 0)
5047 		SCTP_IPI_ADDR_RLOCK();
5048 
5049 	vrf = sctp_find_vrf(vrf_id);
5050 	if (vrf == NULL) {
5051 		if (holds_lock == 0)
5052 			SCTP_IPI_ADDR_RUNLOCK();
5053 		return (NULL);
5054 	}
5055 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5056 
5057 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5058 	if (hash_head == NULL) {
5059 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5060 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5061 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5062 		sctp_print_address(addr);
5063 		SCTP_PRINTF("No such bucket for address\n");
5064 		if (holds_lock == 0)
5065 			SCTP_IPI_ADDR_RUNLOCK();
5066 
5067 		return (NULL);
5068 	}
5069 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5070 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5071 			continue;
5072 #ifdef INET
5073 		if (addr->sa_family == AF_INET) {
5074 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5075 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5076 				/* found him. */
5077 				if (holds_lock == 0)
5078 					SCTP_IPI_ADDR_RUNLOCK();
5079 				return (sctp_ifap);
5080 				break;
5081 			}
5082 		}
5083 #endif
5084 #ifdef INET6
5085 		if (addr->sa_family == AF_INET6) {
5086 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5087 			    &sctp_ifap->address.sin6)) {
5088 				/* found him. */
5089 				if (holds_lock == 0)
5090 					SCTP_IPI_ADDR_RUNLOCK();
5091 				return (sctp_ifap);
5092 				break;
5093 			}
5094 		}
5095 #endif
5096 	}
5097 	if (holds_lock == 0)
5098 		SCTP_IPI_ADDR_RUNLOCK();
5099 	return (NULL);
5100 }
5101 
5102 static void
5103 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5104     uint32_t rwnd_req)
5105 {
5106 	/* User pulled some data, do we need a rwnd update? */
5107 	int r_unlocked = 0;
5108 	uint32_t dif, rwnd;
5109 	struct socket *so = NULL;
5110 
5111 	if (stcb == NULL)
5112 		return;
5113 
5114 	atomic_add_int(&stcb->asoc.refcnt, 1);
5115 
5116 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5117 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5118 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5119 		/* Pre-check If we are freeing no update */
5120 		goto no_lock;
5121 	}
5122 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5123 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5124 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5125 		goto out;
5126 	}
5127 	so = stcb->sctp_socket;
5128 	if (so == NULL) {
5129 		goto out;
5130 	}
5131 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5132 	/* Have you have freed enough to look */
5133 	*freed_so_far = 0;
5134 	/* Yep, its worth a look and the lock overhead */
5135 
5136 	/* Figure out what the rwnd would be */
5137 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5138 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5139 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5140 	} else {
5141 		dif = 0;
5142 	}
5143 	if (dif >= rwnd_req) {
5144 		if (hold_rlock) {
5145 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5146 			r_unlocked = 1;
5147 		}
5148 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5149 			/*
5150 			 * One last check before we allow the guy possibly
5151 			 * to get in. There is a race, where the guy has not
5152 			 * reached the gate. In that case
5153 			 */
5154 			goto out;
5155 		}
5156 		SCTP_TCB_LOCK(stcb);
5157 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5158 			/* No reports here */
5159 			SCTP_TCB_UNLOCK(stcb);
5160 			goto out;
5161 		}
5162 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5163 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5164 
5165 		sctp_chunk_output(stcb->sctp_ep, stcb,
5166 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5167 		/* make sure no timer is running */
5168 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5169 		SCTP_TCB_UNLOCK(stcb);
5170 	} else {
5171 		/* Update how much we have pending */
5172 		stcb->freed_by_sorcv_sincelast = dif;
5173 	}
5174 out:
5175 	if (so && r_unlocked && hold_rlock) {
5176 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5177 	}
5178 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5179 no_lock:
5180 	atomic_add_int(&stcb->asoc.refcnt, -1);
5181 	return;
5182 }
5183 
5184 int
5185 sctp_sorecvmsg(struct socket *so,
5186     struct uio *uio,
5187     struct mbuf **mp,
5188     struct sockaddr *from,
5189     int fromlen,
5190     int *msg_flags,
5191     struct sctp_sndrcvinfo *sinfo,
5192     int filling_sinfo)
5193 {
5194 	/*
5195 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5196 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5197 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5198 	 * On the way out we may send out any combination of:
5199 	 * MSG_NOTIFICATION MSG_EOR
5200 	 *
5201 	 */
5202 	struct sctp_inpcb *inp = NULL;
5203 	int my_len = 0;
5204 	int cp_len = 0, error = 0;
5205 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5206 	struct mbuf *m = NULL;
5207 	struct sctp_tcb *stcb = NULL;
5208 	int wakeup_read_socket = 0;
5209 	int freecnt_applied = 0;
5210 	int out_flags = 0, in_flags = 0;
5211 	int block_allowed = 1;
5212 	uint32_t freed_so_far = 0;
5213 	uint32_t copied_so_far = 0;
5214 	int in_eeor_mode = 0;
5215 	int no_rcv_needed = 0;
5216 	uint32_t rwnd_req = 0;
5217 	int hold_sblock = 0;
5218 	int hold_rlock = 0;
5219 	int slen = 0;
5220 	uint32_t held_length = 0;
5221 	int sockbuf_lock = 0;
5222 
5223 	if (uio == NULL) {
5224 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5225 		return (EINVAL);
5226 	}
5227 	if (msg_flags) {
5228 		in_flags = *msg_flags;
5229 		if (in_flags & MSG_PEEK)
5230 			SCTP_STAT_INCR(sctps_read_peeks);
5231 	} else {
5232 		in_flags = 0;
5233 	}
5234 	slen = uio->uio_resid;
5235 
5236 	/* Pull in and set up our int flags */
5237 	if (in_flags & MSG_OOB) {
5238 		/* Out of band's NOT supported */
5239 		return (EOPNOTSUPP);
5240 	}
5241 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5242 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5243 		return (EINVAL);
5244 	}
5245 	if ((in_flags & (MSG_DONTWAIT
5246 	    | MSG_NBIO
5247 	    )) ||
5248 	    SCTP_SO_IS_NBIO(so)) {
5249 		block_allowed = 0;
5250 	}
5251 	/* setup the endpoint */
5252 	inp = (struct sctp_inpcb *)so->so_pcb;
5253 	if (inp == NULL) {
5254 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5255 		return (EFAULT);
5256 	}
5257 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5258 	/* Must be at least a MTU's worth */
5259 	if (rwnd_req < SCTP_MIN_RWND)
5260 		rwnd_req = SCTP_MIN_RWND;
5261 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5262 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5263 		sctp_misc_ints(SCTP_SORECV_ENTER,
5264 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5265 	}
5266 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5267 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5268 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5269 	}
5270 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5271 	if (error) {
5272 		goto release_unlocked;
5273 	}
5274 	sockbuf_lock = 1;
5275 restart:
5276 
5277 
5278 restart_nosblocks:
5279 	if (hold_sblock == 0) {
5280 		SOCKBUF_LOCK(&so->so_rcv);
5281 		hold_sblock = 1;
5282 	}
5283 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5284 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5285 		goto out;
5286 	}
5287 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5288 		if (so->so_error) {
5289 			error = so->so_error;
5290 			if ((in_flags & MSG_PEEK) == 0)
5291 				so->so_error = 0;
5292 			goto out;
5293 		} else {
5294 			if (so->so_rcv.sb_cc == 0) {
5295 				/* indicate EOF */
5296 				error = 0;
5297 				goto out;
5298 			}
5299 		}
5300 	}
5301 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5302 		/* we need to wait for data */
5303 		if ((so->so_rcv.sb_cc == 0) &&
5304 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5305 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5306 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5307 				/*
5308 				 * For active open side clear flags for
5309 				 * re-use passive open is blocked by
5310 				 * connect.
5311 				 */
5312 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5313 					/*
5314 					 * You were aborted, passive side
5315 					 * always hits here
5316 					 */
5317 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5318 					error = ECONNRESET;
5319 				}
5320 				so->so_state &= ~(SS_ISCONNECTING |
5321 				    SS_ISDISCONNECTING |
5322 				    SS_ISCONFIRMING |
5323 				    SS_ISCONNECTED);
5324 				if (error == 0) {
5325 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5326 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5327 						error = ENOTCONN;
5328 					}
5329 				}
5330 				goto out;
5331 			}
5332 		}
5333 		error = sbwait(&so->so_rcv);
5334 		if (error) {
5335 			goto out;
5336 		}
5337 		held_length = 0;
5338 		goto restart_nosblocks;
5339 	} else if (so->so_rcv.sb_cc == 0) {
5340 		if (so->so_error) {
5341 			error = so->so_error;
5342 			if ((in_flags & MSG_PEEK) == 0)
5343 				so->so_error = 0;
5344 		} else {
5345 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5346 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5347 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5348 					/*
5349 					 * For active open side clear flags
5350 					 * for re-use passive open is
5351 					 * blocked by connect.
5352 					 */
5353 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5354 						/*
5355 						 * You were aborted, passive
5356 						 * side always hits here
5357 						 */
5358 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5359 						error = ECONNRESET;
5360 					}
5361 					so->so_state &= ~(SS_ISCONNECTING |
5362 					    SS_ISDISCONNECTING |
5363 					    SS_ISCONFIRMING |
5364 					    SS_ISCONNECTED);
5365 					if (error == 0) {
5366 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5367 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5368 							error = ENOTCONN;
5369 						}
5370 					}
5371 					goto out;
5372 				}
5373 			}
5374 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5375 			error = EWOULDBLOCK;
5376 		}
5377 		goto out;
5378 	}
5379 	if (hold_sblock == 1) {
5380 		SOCKBUF_UNLOCK(&so->so_rcv);
5381 		hold_sblock = 0;
5382 	}
5383 	/* we possibly have data we can read */
5384 	/* sa_ignore FREED_MEMORY */
5385 	control = TAILQ_FIRST(&inp->read_queue);
5386 	if (control == NULL) {
5387 		/*
5388 		 * This could be happening since the appender did the
5389 		 * increment but as not yet did the tailq insert onto the
5390 		 * read_queue
5391 		 */
5392 		if (hold_rlock == 0) {
5393 			SCTP_INP_READ_LOCK(inp);
5394 		}
5395 		control = TAILQ_FIRST(&inp->read_queue);
5396 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5397 #ifdef INVARIANTS
5398 			panic("Huh, its non zero and nothing on control?");
5399 #endif
5400 			so->so_rcv.sb_cc = 0;
5401 		}
5402 		SCTP_INP_READ_UNLOCK(inp);
5403 		hold_rlock = 0;
5404 		goto restart;
5405 	}
5406 	if ((control->length == 0) &&
5407 	    (control->do_not_ref_stcb)) {
5408 		/*
5409 		 * Clean up code for freeing assoc that left behind a
5410 		 * pdapi.. maybe a peer in EEOR that just closed after
5411 		 * sending and never indicated a EOR.
5412 		 */
5413 		if (hold_rlock == 0) {
5414 			hold_rlock = 1;
5415 			SCTP_INP_READ_LOCK(inp);
5416 		}
5417 		control->held_length = 0;
5418 		if (control->data) {
5419 			/* Hmm there is data here .. fix */
5420 			struct mbuf *m_tmp;
5421 			int cnt = 0;
5422 
5423 			m_tmp = control->data;
5424 			while (m_tmp) {
5425 				cnt += SCTP_BUF_LEN(m_tmp);
5426 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5427 					control->tail_mbuf = m_tmp;
5428 					control->end_added = 1;
5429 				}
5430 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5431 			}
5432 			control->length = cnt;
5433 		} else {
5434 			/* remove it */
5435 			TAILQ_REMOVE(&inp->read_queue, control, next);
5436 			/* Add back any hiddend data */
5437 			sctp_free_remote_addr(control->whoFrom);
5438 			sctp_free_a_readq(stcb, control);
5439 		}
5440 		if (hold_rlock) {
5441 			hold_rlock = 0;
5442 			SCTP_INP_READ_UNLOCK(inp);
5443 		}
5444 		goto restart;
5445 	}
5446 	if ((control->length == 0) &&
5447 	    (control->end_added == 1)) {
5448 		/*
5449 		 * Do we also need to check for (control->pdapi_aborted ==
5450 		 * 1)?
5451 		 */
5452 		if (hold_rlock == 0) {
5453 			hold_rlock = 1;
5454 			SCTP_INP_READ_LOCK(inp);
5455 		}
5456 		TAILQ_REMOVE(&inp->read_queue, control, next);
5457 		if (control->data) {
5458 #ifdef INVARIANTS
5459 			panic("control->data not null but control->length == 0");
5460 #else
5461 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5462 			sctp_m_freem(control->data);
5463 			control->data = NULL;
5464 #endif
5465 		}
5466 		if (control->aux_data) {
5467 			sctp_m_free(control->aux_data);
5468 			control->aux_data = NULL;
5469 		}
5470 		sctp_free_remote_addr(control->whoFrom);
5471 		sctp_free_a_readq(stcb, control);
5472 		if (hold_rlock) {
5473 			hold_rlock = 0;
5474 			SCTP_INP_READ_UNLOCK(inp);
5475 		}
5476 		goto restart;
5477 	}
5478 	if (control->length == 0) {
5479 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5480 		    (filling_sinfo)) {
5481 			/* find a more suitable one then this */
5482 			ctl = TAILQ_NEXT(control, next);
5483 			while (ctl) {
5484 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5485 				    (ctl->some_taken ||
5486 				    (ctl->spec_flags & M_NOTIFICATION) ||
5487 				    ((ctl->do_not_ref_stcb == 0) &&
5488 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5489 				    ) {
5490 					/*-
5491 					 * If we have a different TCB next, and there is data
5492 					 * present. If we have already taken some (pdapi), OR we can
5493 					 * ref the tcb and no delivery as started on this stream, we
5494 					 * take it. Note we allow a notification on a different
5495 					 * assoc to be delivered..
5496 					 */
5497 					control = ctl;
5498 					goto found_one;
5499 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5500 					    (ctl->length) &&
5501 					    ((ctl->some_taken) ||
5502 					    ((ctl->do_not_ref_stcb == 0) &&
5503 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5504 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5505 					/*-
5506 					 * If we have the same tcb, and there is data present, and we
5507 					 * have the strm interleave feature present. Then if we have
5508 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5509 					 * not started a delivery for this stream, we can take it.
5510 					 * Note we do NOT allow a notificaiton on the same assoc to
5511 					 * be delivered.
5512 					 */
5513 					control = ctl;
5514 					goto found_one;
5515 				}
5516 				ctl = TAILQ_NEXT(ctl, next);
5517 			}
5518 		}
5519 		/*
5520 		 * if we reach here, not suitable replacement is available
5521 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5522 		 * into the our held count, and its time to sleep again.
5523 		 */
5524 		held_length = so->so_rcv.sb_cc;
5525 		control->held_length = so->so_rcv.sb_cc;
5526 		goto restart;
5527 	}
5528 	/* Clear the held length since there is something to read */
5529 	control->held_length = 0;
5530 	if (hold_rlock) {
5531 		SCTP_INP_READ_UNLOCK(inp);
5532 		hold_rlock = 0;
5533 	}
5534 found_one:
5535 	/*
5536 	 * If we reach here, control has a some data for us to read off.
5537 	 * Note that stcb COULD be NULL.
5538 	 */
5539 	control->some_taken++;
5540 	if (hold_sblock) {
5541 		SOCKBUF_UNLOCK(&so->so_rcv);
5542 		hold_sblock = 0;
5543 	}
5544 	stcb = control->stcb;
5545 	if (stcb) {
5546 		if ((control->do_not_ref_stcb == 0) &&
5547 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5548 			if (freecnt_applied == 0)
5549 				stcb = NULL;
5550 		} else if (control->do_not_ref_stcb == 0) {
5551 			/* you can't free it on me please */
5552 			/*
5553 			 * The lock on the socket buffer protects us so the
5554 			 * free code will stop. But since we used the
5555 			 * socketbuf lock and the sender uses the tcb_lock
5556 			 * to increment, we need to use the atomic add to
5557 			 * the refcnt
5558 			 */
5559 			if (freecnt_applied) {
5560 #ifdef INVARIANTS
5561 				panic("refcnt already incremented");
5562 #else
5563 				SCTP_PRINTF("refcnt already incremented?\n");
5564 #endif
5565 			} else {
5566 				atomic_add_int(&stcb->asoc.refcnt, 1);
5567 				freecnt_applied = 1;
5568 			}
5569 			/*
5570 			 * Setup to remember how much we have not yet told
5571 			 * the peer our rwnd has opened up. Note we grab the
5572 			 * value from the tcb from last time. Note too that
5573 			 * sack sending clears this when a sack is sent,
5574 			 * which is fine. Once we hit the rwnd_req, we then
5575 			 * will go to the sctp_user_rcvd() that will not
5576 			 * lock until it KNOWs it MUST send a WUP-SACK.
5577 			 */
5578 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5579 			stcb->freed_by_sorcv_sincelast = 0;
5580 		}
5581 	}
5582 	if (stcb &&
5583 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5584 	    control->do_not_ref_stcb == 0) {
5585 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5586 	}
5587 	/* First lets get off the sinfo and sockaddr info */
5588 	if ((sinfo) && filling_sinfo) {
5589 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5590 		nxt = TAILQ_NEXT(control, next);
5591 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5592 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5593 			struct sctp_extrcvinfo *s_extra;
5594 
5595 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5596 			if ((nxt) &&
5597 			    (nxt->length)) {
5598 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5599 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5600 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5601 				}
5602 				if (nxt->spec_flags & M_NOTIFICATION) {
5603 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5604 				}
5605 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5606 				s_extra->sreinfo_next_length = nxt->length;
5607 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5608 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5609 				if (nxt->tail_mbuf != NULL) {
5610 					if (nxt->end_added) {
5611 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5612 					}
5613 				}
5614 			} else {
5615 				/*
5616 				 * we explicitly 0 this, since the memcpy
5617 				 * got some other things beyond the older
5618 				 * sinfo_ that is on the control's structure
5619 				 * :-D
5620 				 */
5621 				nxt = NULL;
5622 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5623 				s_extra->sreinfo_next_aid = 0;
5624 				s_extra->sreinfo_next_length = 0;
5625 				s_extra->sreinfo_next_ppid = 0;
5626 				s_extra->sreinfo_next_stream = 0;
5627 			}
5628 		}
5629 		/*
5630 		 * update off the real current cum-ack, if we have an stcb.
5631 		 */
5632 		if ((control->do_not_ref_stcb == 0) && stcb)
5633 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5634 		/*
5635 		 * mask off the high bits, we keep the actual chunk bits in
5636 		 * there.
5637 		 */
5638 		sinfo->sinfo_flags &= 0x00ff;
5639 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5640 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5641 		}
5642 	}
5643 #ifdef SCTP_ASOCLOG_OF_TSNS
5644 	{
5645 		int index, newindex;
5646 		struct sctp_pcbtsn_rlog *entry;
5647 
5648 		do {
5649 			index = inp->readlog_index;
5650 			newindex = index + 1;
5651 			if (newindex >= SCTP_READ_LOG_SIZE) {
5652 				newindex = 0;
5653 			}
5654 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5655 		entry = &inp->readlog[index];
5656 		entry->vtag = control->sinfo_assoc_id;
5657 		entry->strm = control->sinfo_stream;
5658 		entry->seq = control->sinfo_ssn;
5659 		entry->sz = control->length;
5660 		entry->flgs = control->sinfo_flags;
5661 	}
5662 #endif
5663 	if ((fromlen > 0) && (from != NULL)) {
5664 		union sctp_sockstore store;
5665 		size_t len;
5666 
5667 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5668 #ifdef INET6
5669 		case AF_INET6:
5670 			len = sizeof(struct sockaddr_in6);
5671 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5672 			store.sin6.sin6_port = control->port_from;
5673 			break;
5674 #endif
5675 #ifdef INET
5676 		case AF_INET:
5677 #ifdef INET6
5678 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5679 				len = sizeof(struct sockaddr_in6);
5680 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5681 				    &store.sin6);
5682 				store.sin6.sin6_port = control->port_from;
5683 			} else {
5684 				len = sizeof(struct sockaddr_in);
5685 				store.sin = control->whoFrom->ro._l_addr.sin;
5686 				store.sin.sin_port = control->port_from;
5687 			}
5688 #else
5689 			len = sizeof(struct sockaddr_in);
5690 			store.sin = control->whoFrom->ro._l_addr.sin;
5691 			store.sin.sin_port = control->port_from;
5692 #endif
5693 			break;
5694 #endif
5695 		default:
5696 			len = 0;
5697 			break;
5698 		}
5699 		memcpy(from, &store, min((size_t)fromlen, len));
5700 #ifdef INET6
5701 		{
5702 			struct sockaddr_in6 lsa6, *from6;
5703 
5704 			from6 = (struct sockaddr_in6 *)from;
5705 			sctp_recover_scope_mac(from6, (&lsa6));
5706 		}
5707 #endif
5708 	}
5709 	/* now copy out what data we can */
5710 	if (mp == NULL) {
5711 		/* copy out each mbuf in the chain up to length */
5712 get_more_data:
5713 		m = control->data;
5714 		while (m) {
5715 			/* Move out all we can */
5716 			cp_len = (int)uio->uio_resid;
5717 			my_len = (int)SCTP_BUF_LEN(m);
5718 			if (cp_len > my_len) {
5719 				/* not enough in this buf */
5720 				cp_len = my_len;
5721 			}
5722 			if (hold_rlock) {
5723 				SCTP_INP_READ_UNLOCK(inp);
5724 				hold_rlock = 0;
5725 			}
5726 			if (cp_len > 0)
5727 				error = uiomove(mtod(m, char *), cp_len, uio);
5728 			/* re-read */
5729 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5730 				goto release;
5731 			}
5732 			if ((control->do_not_ref_stcb == 0) && stcb &&
5733 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5734 				no_rcv_needed = 1;
5735 			}
5736 			if (error) {
5737 				/* error we are out of here */
5738 				goto release;
5739 			}
5740 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5741 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5742 			    ((control->end_added == 0) ||
5743 			    (control->end_added &&
5744 			    (TAILQ_NEXT(control, next) == NULL)))
5745 			    ) {
5746 				SCTP_INP_READ_LOCK(inp);
5747 				hold_rlock = 1;
5748 			}
5749 			if (cp_len == SCTP_BUF_LEN(m)) {
5750 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5751 				    (control->end_added)) {
5752 					out_flags |= MSG_EOR;
5753 					if ((control->do_not_ref_stcb == 0) &&
5754 					    (control->stcb != NULL) &&
5755 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5756 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5757 				}
5758 				if (control->spec_flags & M_NOTIFICATION) {
5759 					out_flags |= MSG_NOTIFICATION;
5760 				}
5761 				/* we ate up the mbuf */
5762 				if (in_flags & MSG_PEEK) {
5763 					/* just looking */
5764 					m = SCTP_BUF_NEXT(m);
5765 					copied_so_far += cp_len;
5766 				} else {
5767 					/* dispose of the mbuf */
5768 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5769 						sctp_sblog(&so->so_rcv,
5770 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5771 					}
5772 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5773 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5774 						sctp_sblog(&so->so_rcv,
5775 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5776 					}
5777 					copied_so_far += cp_len;
5778 					freed_so_far += cp_len;
5779 					freed_so_far += MSIZE;
5780 					atomic_subtract_int(&control->length, cp_len);
5781 					control->data = sctp_m_free(m);
5782 					m = control->data;
5783 					/*
5784 					 * been through it all, must hold sb
5785 					 * lock ok to null tail
5786 					 */
5787 					if (control->data == NULL) {
5788 #ifdef INVARIANTS
5789 						if ((control->end_added == 0) ||
5790 						    (TAILQ_NEXT(control, next) == NULL)) {
5791 							/*
5792 							 * If the end is not
5793 							 * added, OR the
5794 							 * next is NOT null
5795 							 * we MUST have the
5796 							 * lock.
5797 							 */
5798 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5799 								panic("Hmm we don't own the lock?");
5800 							}
5801 						}
5802 #endif
5803 						control->tail_mbuf = NULL;
5804 #ifdef INVARIANTS
5805 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5806 							panic("end_added, nothing left and no MSG_EOR");
5807 						}
5808 #endif
5809 					}
5810 				}
5811 			} else {
5812 				/* Do we need to trim the mbuf? */
5813 				if (control->spec_flags & M_NOTIFICATION) {
5814 					out_flags |= MSG_NOTIFICATION;
5815 				}
5816 				if ((in_flags & MSG_PEEK) == 0) {
5817 					SCTP_BUF_RESV_UF(m, cp_len);
5818 					SCTP_BUF_LEN(m) -= cp_len;
5819 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5820 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5821 					}
5822 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5823 					if ((control->do_not_ref_stcb == 0) &&
5824 					    stcb) {
5825 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5826 					}
5827 					copied_so_far += cp_len;
5828 					freed_so_far += cp_len;
5829 					freed_so_far += MSIZE;
5830 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5831 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5832 						    SCTP_LOG_SBRESULT, 0);
5833 					}
5834 					atomic_subtract_int(&control->length, cp_len);
5835 				} else {
5836 					copied_so_far += cp_len;
5837 				}
5838 			}
5839 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5840 				break;
5841 			}
5842 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5843 			    (control->do_not_ref_stcb == 0) &&
5844 			    (freed_so_far >= rwnd_req)) {
5845 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5846 			}
5847 		}		/* end while(m) */
5848 		/*
5849 		 * At this point we have looked at it all and we either have
5850 		 * a MSG_EOR/or read all the user wants... <OR>
5851 		 * control->length == 0.
5852 		 */
5853 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5854 			/* we are done with this control */
5855 			if (control->length == 0) {
5856 				if (control->data) {
5857 #ifdef INVARIANTS
5858 					panic("control->data not null at read eor?");
5859 #else
5860 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5861 					sctp_m_freem(control->data);
5862 					control->data = NULL;
5863 #endif
5864 				}
5865 		done_with_control:
5866 				if (TAILQ_NEXT(control, next) == NULL) {
5867 					/*
5868 					 * If we don't have a next we need a
5869 					 * lock, if there is a next
5870 					 * interrupt is filling ahead of us
5871 					 * and we don't need a lock to
5872 					 * remove this guy (which is the
5873 					 * head of the queue).
5874 					 */
5875 					if (hold_rlock == 0) {
5876 						SCTP_INP_READ_LOCK(inp);
5877 						hold_rlock = 1;
5878 					}
5879 				}
5880 				TAILQ_REMOVE(&inp->read_queue, control, next);
5881 				/* Add back any hiddend data */
5882 				if (control->held_length) {
5883 					held_length = 0;
5884 					control->held_length = 0;
5885 					wakeup_read_socket = 1;
5886 				}
5887 				if (control->aux_data) {
5888 					sctp_m_free(control->aux_data);
5889 					control->aux_data = NULL;
5890 				}
5891 				no_rcv_needed = control->do_not_ref_stcb;
5892 				sctp_free_remote_addr(control->whoFrom);
5893 				control->data = NULL;
5894 				sctp_free_a_readq(stcb, control);
5895 				control = NULL;
5896 				if ((freed_so_far >= rwnd_req) &&
5897 				    (no_rcv_needed == 0))
5898 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5899 
5900 			} else {
5901 				/*
5902 				 * The user did not read all of this
5903 				 * message, turn off the returned MSG_EOR
5904 				 * since we are leaving more behind on the
5905 				 * control to read.
5906 				 */
5907 #ifdef INVARIANTS
5908 				if (control->end_added &&
5909 				    (control->data == NULL) &&
5910 				    (control->tail_mbuf == NULL)) {
5911 					panic("Gak, control->length is corrupt?");
5912 				}
5913 #endif
5914 				no_rcv_needed = control->do_not_ref_stcb;
5915 				out_flags &= ~MSG_EOR;
5916 			}
5917 		}
5918 		if (out_flags & MSG_EOR) {
5919 			goto release;
5920 		}
5921 		if ((uio->uio_resid == 0) ||
5922 		    ((in_eeor_mode) &&
5923 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5924 			goto release;
5925 		}
5926 		/*
5927 		 * If I hit here the receiver wants more and this message is
5928 		 * NOT done (pd-api). So two questions. Can we block? if not
5929 		 * we are done. Did the user NOT set MSG_WAITALL?
5930 		 */
5931 		if (block_allowed == 0) {
5932 			goto release;
5933 		}
5934 		/*
5935 		 * We need to wait for more data a few things: - We don't
5936 		 * sbunlock() so we don't get someone else reading. - We
5937 		 * must be sure to account for the case where what is added
5938 		 * is NOT to our control when we wakeup.
5939 		 */
5940 
5941 		/*
5942 		 * Do we need to tell the transport a rwnd update might be
5943 		 * needed before we go to sleep?
5944 		 */
5945 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5946 		    ((freed_so_far >= rwnd_req) &&
5947 		    (control->do_not_ref_stcb == 0) &&
5948 		    (no_rcv_needed == 0))) {
5949 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5950 		}
5951 wait_some_more:
5952 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5953 			goto release;
5954 		}
5955 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5956 			goto release;
5957 
5958 		if (hold_rlock == 1) {
5959 			SCTP_INP_READ_UNLOCK(inp);
5960 			hold_rlock = 0;
5961 		}
5962 		if (hold_sblock == 0) {
5963 			SOCKBUF_LOCK(&so->so_rcv);
5964 			hold_sblock = 1;
5965 		}
5966 		if ((copied_so_far) && (control->length == 0) &&
5967 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5968 			goto release;
5969 		}
5970 		if (so->so_rcv.sb_cc <= control->held_length) {
5971 			error = sbwait(&so->so_rcv);
5972 			if (error) {
5973 				goto release;
5974 			}
5975 			control->held_length = 0;
5976 		}
5977 		if (hold_sblock) {
5978 			SOCKBUF_UNLOCK(&so->so_rcv);
5979 			hold_sblock = 0;
5980 		}
5981 		if (control->length == 0) {
5982 			/* still nothing here */
5983 			if (control->end_added == 1) {
5984 				/* he aborted, or is done i.e.did a shutdown */
5985 				out_flags |= MSG_EOR;
5986 				if (control->pdapi_aborted) {
5987 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5988 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5989 
5990 					out_flags |= MSG_TRUNC;
5991 				} else {
5992 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5993 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5994 				}
5995 				goto done_with_control;
5996 			}
5997 			if (so->so_rcv.sb_cc > held_length) {
5998 				control->held_length = so->so_rcv.sb_cc;
5999 				held_length = 0;
6000 			}
6001 			goto wait_some_more;
6002 		} else if (control->data == NULL) {
6003 			/*
6004 			 * we must re-sync since data is probably being
6005 			 * added
6006 			 */
6007 			SCTP_INP_READ_LOCK(inp);
6008 			if ((control->length > 0) && (control->data == NULL)) {
6009 				/*
6010 				 * big trouble.. we have the lock and its
6011 				 * corrupt?
6012 				 */
6013 #ifdef INVARIANTS
6014 				panic("Impossible data==NULL length !=0");
6015 #endif
6016 				out_flags |= MSG_EOR;
6017 				out_flags |= MSG_TRUNC;
6018 				control->length = 0;
6019 				SCTP_INP_READ_UNLOCK(inp);
6020 				goto done_with_control;
6021 			}
6022 			SCTP_INP_READ_UNLOCK(inp);
6023 			/* We will fall around to get more data */
6024 		}
6025 		goto get_more_data;
6026 	} else {
6027 		/*-
6028 		 * Give caller back the mbuf chain,
6029 		 * store in uio_resid the length
6030 		 */
6031 		wakeup_read_socket = 0;
6032 		if ((control->end_added == 0) ||
6033 		    (TAILQ_NEXT(control, next) == NULL)) {
6034 			/* Need to get rlock */
6035 			if (hold_rlock == 0) {
6036 				SCTP_INP_READ_LOCK(inp);
6037 				hold_rlock = 1;
6038 			}
6039 		}
6040 		if (control->end_added) {
6041 			out_flags |= MSG_EOR;
6042 			if ((control->do_not_ref_stcb == 0) &&
6043 			    (control->stcb != NULL) &&
6044 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6045 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6046 		}
6047 		if (control->spec_flags & M_NOTIFICATION) {
6048 			out_flags |= MSG_NOTIFICATION;
6049 		}
6050 		uio->uio_resid = control->length;
6051 		*mp = control->data;
6052 		m = control->data;
6053 		while (m) {
6054 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6055 				sctp_sblog(&so->so_rcv,
6056 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6057 			}
6058 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6059 			freed_so_far += SCTP_BUF_LEN(m);
6060 			freed_so_far += MSIZE;
6061 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6062 				sctp_sblog(&so->so_rcv,
6063 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6064 			}
6065 			m = SCTP_BUF_NEXT(m);
6066 		}
6067 		control->data = control->tail_mbuf = NULL;
6068 		control->length = 0;
6069 		if (out_flags & MSG_EOR) {
6070 			/* Done with this control */
6071 			goto done_with_control;
6072 		}
6073 	}
6074 release:
6075 	if (hold_rlock == 1) {
6076 		SCTP_INP_READ_UNLOCK(inp);
6077 		hold_rlock = 0;
6078 	}
6079 	if (hold_sblock == 1) {
6080 		SOCKBUF_UNLOCK(&so->so_rcv);
6081 		hold_sblock = 0;
6082 	}
6083 	sbunlock(&so->so_rcv);
6084 	sockbuf_lock = 0;
6085 
6086 release_unlocked:
6087 	if (hold_sblock) {
6088 		SOCKBUF_UNLOCK(&so->so_rcv);
6089 		hold_sblock = 0;
6090 	}
6091 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6092 		if ((freed_so_far >= rwnd_req) &&
6093 		    (control && (control->do_not_ref_stcb == 0)) &&
6094 		    (no_rcv_needed == 0))
6095 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6096 	}
6097 out:
6098 	if (msg_flags) {
6099 		*msg_flags = out_flags;
6100 	}
6101 	if (((out_flags & MSG_EOR) == 0) &&
6102 	    ((in_flags & MSG_PEEK) == 0) &&
6103 	    (sinfo) &&
6104 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6105 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6106 		struct sctp_extrcvinfo *s_extra;
6107 
6108 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6109 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6110 	}
6111 	if (hold_rlock == 1) {
6112 		SCTP_INP_READ_UNLOCK(inp);
6113 	}
6114 	if (hold_sblock) {
6115 		SOCKBUF_UNLOCK(&so->so_rcv);
6116 	}
6117 	if (sockbuf_lock) {
6118 		sbunlock(&so->so_rcv);
6119 	}
6120 	if (freecnt_applied) {
6121 		/*
6122 		 * The lock on the socket buffer protects us so the free
6123 		 * code will stop. But since we used the socketbuf lock and
6124 		 * the sender uses the tcb_lock to increment, we need to use
6125 		 * the atomic add to the refcnt.
6126 		 */
6127 		if (stcb == NULL) {
6128 #ifdef INVARIANTS
6129 			panic("stcb for refcnt has gone NULL?");
6130 			goto stage_left;
6131 #else
6132 			goto stage_left;
6133 #endif
6134 		}
6135 		atomic_add_int(&stcb->asoc.refcnt, -1);
6136 		/* Save the value back for next time */
6137 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6138 	}
6139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6140 		if (stcb) {
6141 			sctp_misc_ints(SCTP_SORECV_DONE,
6142 			    freed_so_far,
6143 			    ((uio) ? (slen - uio->uio_resid) : slen),
6144 			    stcb->asoc.my_rwnd,
6145 			    so->so_rcv.sb_cc);
6146 		} else {
6147 			sctp_misc_ints(SCTP_SORECV_DONE,
6148 			    freed_so_far,
6149 			    ((uio) ? (slen - uio->uio_resid) : slen),
6150 			    0,
6151 			    so->so_rcv.sb_cc);
6152 		}
6153 	}
6154 stage_left:
6155 	if (wakeup_read_socket) {
6156 		sctp_sorwakeup(inp, so);
6157 	}
6158 	return (error);
6159 }
6160 
6161 
6162 #ifdef SCTP_MBUF_LOGGING
6163 struct mbuf *
6164 sctp_m_free(struct mbuf *m)
6165 {
6166 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6167 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6168 	}
6169 	return (m_free(m));
6170 }
6171 
6172 void
6173 sctp_m_freem(struct mbuf *mb)
6174 {
6175 	while (mb != NULL)
6176 		mb = sctp_m_free(mb);
6177 }
6178 
6179 #endif
6180 
6181 int
6182 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6183 {
6184 	/*
6185 	 * Given a local address. For all associations that holds the
6186 	 * address, request a peer-set-primary.
6187 	 */
6188 	struct sctp_ifa *ifa;
6189 	struct sctp_laddr *wi;
6190 
6191 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6192 	if (ifa == NULL) {
6193 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6194 		return (EADDRNOTAVAIL);
6195 	}
6196 	/*
6197 	 * Now that we have the ifa we must awaken the iterator with this
6198 	 * message.
6199 	 */
6200 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6201 	if (wi == NULL) {
6202 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6203 		return (ENOMEM);
6204 	}
6205 	/* Now incr the count and int wi structure */
6206 	SCTP_INCR_LADDR_COUNT();
6207 	bzero(wi, sizeof(*wi));
6208 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6209 	wi->ifa = ifa;
6210 	wi->action = SCTP_SET_PRIM_ADDR;
6211 	atomic_add_int(&ifa->refcount, 1);
6212 
6213 	/* Now add it to the work queue */
6214 	SCTP_WQ_ADDR_LOCK();
6215 	/*
6216 	 * Should this really be a tailq? As it is we will process the
6217 	 * newest first :-0
6218 	 */
6219 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6220 	SCTP_WQ_ADDR_UNLOCK();
6221 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6222 	    (struct sctp_inpcb *)NULL,
6223 	    (struct sctp_tcb *)NULL,
6224 	    (struct sctp_nets *)NULL);
6225 	return (0);
6226 }
6227 
6228 
6229 int
6230 sctp_soreceive(struct socket *so,
6231     struct sockaddr **psa,
6232     struct uio *uio,
6233     struct mbuf **mp0,
6234     struct mbuf **controlp,
6235     int *flagsp)
6236 {
6237 	int error, fromlen;
6238 	uint8_t sockbuf[256];
6239 	struct sockaddr *from;
6240 	struct sctp_extrcvinfo sinfo;
6241 	int filling_sinfo = 1;
6242 	struct sctp_inpcb *inp;
6243 
6244 	inp = (struct sctp_inpcb *)so->so_pcb;
6245 	/* pickup the assoc we are reading from */
6246 	if (inp == NULL) {
6247 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6248 		return (EINVAL);
6249 	}
6250 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6251 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6252 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6253 	    (controlp == NULL)) {
6254 		/* user does not want the sndrcv ctl */
6255 		filling_sinfo = 0;
6256 	}
6257 	if (psa) {
6258 		from = (struct sockaddr *)sockbuf;
6259 		fromlen = sizeof(sockbuf);
6260 		from->sa_len = 0;
6261 	} else {
6262 		from = NULL;
6263 		fromlen = 0;
6264 	}
6265 
6266 	if (filling_sinfo) {
6267 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6268 	}
6269 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6270 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6271 	if (controlp != NULL) {
6272 		/* copy back the sinfo in a CMSG format */
6273 		if (filling_sinfo)
6274 			*controlp = sctp_build_ctl_nchunk(inp,
6275 			    (struct sctp_sndrcvinfo *)&sinfo);
6276 		else
6277 			*controlp = NULL;
6278 	}
6279 	if (psa) {
6280 		/* copy back the address info */
6281 		if (from && from->sa_len) {
6282 			*psa = sodupsockaddr(from, M_NOWAIT);
6283 		} else {
6284 			*psa = NULL;
6285 		}
6286 	}
6287 	return (error);
6288 }
6289 
6290 
6291 
6292 
6293 
6294 int
6295 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6296     int totaddr, int *error)
6297 {
6298 	int added = 0;
6299 	int i;
6300 	struct sctp_inpcb *inp;
6301 	struct sockaddr *sa;
6302 	size_t incr = 0;
6303 
6304 #ifdef INET
6305 	struct sockaddr_in *sin;
6306 
6307 #endif
6308 #ifdef INET6
6309 	struct sockaddr_in6 *sin6;
6310 
6311 #endif
6312 
6313 	sa = addr;
6314 	inp = stcb->sctp_ep;
6315 	*error = 0;
6316 	for (i = 0; i < totaddr; i++) {
6317 		switch (sa->sa_family) {
6318 #ifdef INET
6319 		case AF_INET:
6320 			incr = sizeof(struct sockaddr_in);
6321 			sin = (struct sockaddr_in *)sa;
6322 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6323 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6324 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6325 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6326 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6327 				*error = EINVAL;
6328 				goto out_now;
6329 			}
6330 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6331 				/* assoc gone no un-lock */
6332 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6333 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6334 				*error = ENOBUFS;
6335 				goto out_now;
6336 			}
6337 			added++;
6338 			break;
6339 #endif
6340 #ifdef INET6
6341 		case AF_INET6:
6342 			incr = sizeof(struct sockaddr_in6);
6343 			sin6 = (struct sockaddr_in6 *)sa;
6344 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6345 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6346 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6347 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6348 				*error = EINVAL;
6349 				goto out_now;
6350 			}
6351 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6352 				/* assoc gone no un-lock */
6353 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6354 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6355 				*error = ENOBUFS;
6356 				goto out_now;
6357 			}
6358 			added++;
6359 			break;
6360 #endif
6361 		default:
6362 			break;
6363 		}
6364 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6365 	}
6366 out_now:
6367 	return (added);
6368 }
6369 
6370 struct sctp_tcb *
6371 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6372     int *totaddr, int *num_v4, int *num_v6, int *error,
6373     int limit, int *bad_addr)
6374 {
6375 	struct sockaddr *sa;
6376 	struct sctp_tcb *stcb = NULL;
6377 	size_t incr, at, i;
6378 
6379 	at = incr = 0;
6380 	sa = addr;
6381 
6382 	*error = *num_v6 = *num_v4 = 0;
6383 	/* account and validate addresses */
6384 	for (i = 0; i < (size_t)*totaddr; i++) {
6385 		switch (sa->sa_family) {
6386 #ifdef INET
6387 		case AF_INET:
6388 			(*num_v4) += 1;
6389 			incr = sizeof(struct sockaddr_in);
6390 			if (sa->sa_len != incr) {
6391 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6392 				*error = EINVAL;
6393 				*bad_addr = 1;
6394 				return (NULL);
6395 			}
6396 			break;
6397 #endif
6398 #ifdef INET6
6399 		case AF_INET6:
6400 			{
6401 				struct sockaddr_in6 *sin6;
6402 
6403 				sin6 = (struct sockaddr_in6 *)sa;
6404 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6405 					/* Must be non-mapped for connectx */
6406 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6407 					*error = EINVAL;
6408 					*bad_addr = 1;
6409 					return (NULL);
6410 				}
6411 				(*num_v6) += 1;
6412 				incr = sizeof(struct sockaddr_in6);
6413 				if (sa->sa_len != incr) {
6414 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6415 					*error = EINVAL;
6416 					*bad_addr = 1;
6417 					return (NULL);
6418 				}
6419 				break;
6420 			}
6421 #endif
6422 		default:
6423 			*totaddr = i;
6424 			/* we are done */
6425 			break;
6426 		}
6427 		if (i == (size_t)*totaddr) {
6428 			break;
6429 		}
6430 		SCTP_INP_INCR_REF(inp);
6431 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6432 		if (stcb != NULL) {
6433 			/* Already have or am bring up an association */
6434 			return (stcb);
6435 		} else {
6436 			SCTP_INP_DECR_REF(inp);
6437 		}
6438 		if ((at + incr) > (size_t)limit) {
6439 			*totaddr = i;
6440 			break;
6441 		}
6442 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6443 	}
6444 	return ((struct sctp_tcb *)NULL);
6445 }
6446 
6447 /*
6448  * sctp_bindx(ADD) for one address.
6449  * assumes all arguments are valid/checked by caller.
6450  */
6451 void
6452 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6453     struct sockaddr *sa, sctp_assoc_t assoc_id,
6454     uint32_t vrf_id, int *error, void *p)
6455 {
6456 	struct sockaddr *addr_touse;
6457 
6458 #if defined(INET) && defined(INET6)
6459 	struct sockaddr_in sin;
6460 
6461 #endif
6462 
6463 	/* see if we're bound all already! */
6464 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6465 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6466 		*error = EINVAL;
6467 		return;
6468 	}
6469 	addr_touse = sa;
6470 #ifdef INET6
6471 	if (sa->sa_family == AF_INET6) {
6472 #ifdef INET
6473 		struct sockaddr_in6 *sin6;
6474 
6475 #endif
6476 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6477 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6478 			*error = EINVAL;
6479 			return;
6480 		}
6481 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6482 			/* can only bind v6 on PF_INET6 sockets */
6483 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6484 			*error = EINVAL;
6485 			return;
6486 		}
6487 #ifdef INET
6488 		sin6 = (struct sockaddr_in6 *)addr_touse;
6489 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6490 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6491 			    SCTP_IPV6_V6ONLY(inp)) {
6492 				/* can't bind v4-mapped on PF_INET sockets */
6493 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6494 				*error = EINVAL;
6495 				return;
6496 			}
6497 			in6_sin6_2_sin(&sin, sin6);
6498 			addr_touse = (struct sockaddr *)&sin;
6499 		}
6500 #endif
6501 	}
6502 #endif
6503 #ifdef INET
6504 	if (sa->sa_family == AF_INET) {
6505 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6506 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6507 			*error = EINVAL;
6508 			return;
6509 		}
6510 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6511 		    SCTP_IPV6_V6ONLY(inp)) {
6512 			/* can't bind v4 on PF_INET sockets */
6513 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6514 			*error = EINVAL;
6515 			return;
6516 		}
6517 	}
6518 #endif
6519 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6520 		if (p == NULL) {
6521 			/* Can't get proc for Net/Open BSD */
6522 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6523 			*error = EINVAL;
6524 			return;
6525 		}
6526 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6527 		return;
6528 	}
6529 	/*
6530 	 * No locks required here since bind and mgmt_ep_sa all do their own
6531 	 * locking. If we do something for the FIX: below we may need to
6532 	 * lock in that case.
6533 	 */
6534 	if (assoc_id == 0) {
6535 		/* add the address */
6536 		struct sctp_inpcb *lep;
6537 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6538 
6539 		/* validate the incoming port */
6540 		if ((lsin->sin_port != 0) &&
6541 		    (lsin->sin_port != inp->sctp_lport)) {
6542 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6543 			*error = EINVAL;
6544 			return;
6545 		} else {
6546 			/* user specified 0 port, set it to existing port */
6547 			lsin->sin_port = inp->sctp_lport;
6548 		}
6549 
6550 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6551 		if (lep != NULL) {
6552 			/*
6553 			 * We must decrement the refcount since we have the
6554 			 * ep already and are binding. No remove going on
6555 			 * here.
6556 			 */
6557 			SCTP_INP_DECR_REF(lep);
6558 		}
6559 		if (lep == inp) {
6560 			/* already bound to it.. ok */
6561 			return;
6562 		} else if (lep == NULL) {
6563 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6564 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6565 			    SCTP_ADD_IP_ADDRESS,
6566 			    vrf_id, NULL);
6567 		} else {
6568 			*error = EADDRINUSE;
6569 		}
6570 		if (*error)
6571 			return;
6572 	} else {
6573 		/*
6574 		 * FIX: decide whether we allow assoc based bindx
6575 		 */
6576 	}
6577 }
6578 
6579 /*
6580  * sctp_bindx(DELETE) for one address.
6581  * assumes all arguments are valid/checked by caller.
6582  */
6583 void
6584 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6585     struct sockaddr *sa, sctp_assoc_t assoc_id,
6586     uint32_t vrf_id, int *error)
6587 {
6588 	struct sockaddr *addr_touse;
6589 
6590 #if defined(INET) && defined(INET6)
6591 	struct sockaddr_in sin;
6592 
6593 #endif
6594 
6595 	/* see if we're bound all already! */
6596 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6597 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6598 		*error = EINVAL;
6599 		return;
6600 	}
6601 	addr_touse = sa;
6602 #ifdef INET6
6603 	if (sa->sa_family == AF_INET6) {
6604 #ifdef INET
6605 		struct sockaddr_in6 *sin6;
6606 
6607 #endif
6608 
6609 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6610 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6611 			*error = EINVAL;
6612 			return;
6613 		}
6614 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6615 			/* can only bind v6 on PF_INET6 sockets */
6616 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6617 			*error = EINVAL;
6618 			return;
6619 		}
6620 #ifdef INET
6621 		sin6 = (struct sockaddr_in6 *)addr_touse;
6622 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6623 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6624 			    SCTP_IPV6_V6ONLY(inp)) {
6625 				/* can't bind mapped-v4 on PF_INET sockets */
6626 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6627 				*error = EINVAL;
6628 				return;
6629 			}
6630 			in6_sin6_2_sin(&sin, sin6);
6631 			addr_touse = (struct sockaddr *)&sin;
6632 		}
6633 #endif
6634 	}
6635 #endif
6636 #ifdef INET
6637 	if (sa->sa_family == AF_INET) {
6638 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6639 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6640 			*error = EINVAL;
6641 			return;
6642 		}
6643 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6644 		    SCTP_IPV6_V6ONLY(inp)) {
6645 			/* can't bind v4 on PF_INET sockets */
6646 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6647 			*error = EINVAL;
6648 			return;
6649 		}
6650 	}
6651 #endif
6652 	/*
6653 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6654 	 * below is ever changed we may need to lock before calling
6655 	 * association level binding.
6656 	 */
6657 	if (assoc_id == 0) {
6658 		/* delete the address */
6659 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6660 		    SCTP_DEL_IP_ADDRESS,
6661 		    vrf_id, NULL);
6662 	} else {
6663 		/*
6664 		 * FIX: decide whether we allow assoc based bindx
6665 		 */
6666 	}
6667 }
6668 
6669 /*
6670  * returns the valid local address count for an assoc, taking into account
6671  * all scoping rules
6672  */
6673 int
6674 sctp_local_addr_count(struct sctp_tcb *stcb)
6675 {
6676 	int loopback_scope;
6677 
6678 #if defined(INET)
6679 	int ipv4_local_scope, ipv4_addr_legal;
6680 
6681 #endif
6682 #if defined (INET6)
6683 	int local_scope, site_scope, ipv6_addr_legal;
6684 
6685 #endif
6686 	struct sctp_vrf *vrf;
6687 	struct sctp_ifn *sctp_ifn;
6688 	struct sctp_ifa *sctp_ifa;
6689 	int count = 0;
6690 
6691 	/* Turn on all the appropriate scopes */
6692 	loopback_scope = stcb->asoc.scope.loopback_scope;
6693 #if defined(INET)
6694 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6695 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6696 #endif
6697 #if defined(INET6)
6698 	local_scope = stcb->asoc.scope.local_scope;
6699 	site_scope = stcb->asoc.scope.site_scope;
6700 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6701 #endif
6702 	SCTP_IPI_ADDR_RLOCK();
6703 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6704 	if (vrf == NULL) {
6705 		/* no vrf, no addresses */
6706 		SCTP_IPI_ADDR_RUNLOCK();
6707 		return (0);
6708 	}
6709 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6710 		/*
6711 		 * bound all case: go through all ifns on the vrf
6712 		 */
6713 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6714 			if ((loopback_scope == 0) &&
6715 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6716 				continue;
6717 			}
6718 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6719 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6720 					continue;
6721 				switch (sctp_ifa->address.sa.sa_family) {
6722 #ifdef INET
6723 				case AF_INET:
6724 					if (ipv4_addr_legal) {
6725 						struct sockaddr_in *sin;
6726 
6727 						sin = &sctp_ifa->address.sin;
6728 						if (sin->sin_addr.s_addr == 0) {
6729 							/*
6730 							 * skip unspecified
6731 							 * addrs
6732 							 */
6733 							continue;
6734 						}
6735 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6736 						    &sin->sin_addr) != 0) {
6737 							continue;
6738 						}
6739 						if ((ipv4_local_scope == 0) &&
6740 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6741 							continue;
6742 						}
6743 						/* count this one */
6744 						count++;
6745 					} else {
6746 						continue;
6747 					}
6748 					break;
6749 #endif
6750 #ifdef INET6
6751 				case AF_INET6:
6752 					if (ipv6_addr_legal) {
6753 						struct sockaddr_in6 *sin6;
6754 
6755 						sin6 = &sctp_ifa->address.sin6;
6756 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6757 							continue;
6758 						}
6759 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6760 						    &sin6->sin6_addr) != 0) {
6761 							continue;
6762 						}
6763 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6764 							if (local_scope == 0)
6765 								continue;
6766 							if (sin6->sin6_scope_id == 0) {
6767 								if (sa6_recoverscope(sin6) != 0)
6768 									/*
6769 									 *
6770 									 * bad
6771 									 *
6772 									 * li
6773 									 * nk
6774 									 *
6775 									 * loc
6776 									 * al
6777 									 *
6778 									 * add
6779 									 * re
6780 									 * ss
6781 									 * */
6782 									continue;
6783 							}
6784 						}
6785 						if ((site_scope == 0) &&
6786 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6787 							continue;
6788 						}
6789 						/* count this one */
6790 						count++;
6791 					}
6792 					break;
6793 #endif
6794 				default:
6795 					/* TSNH */
6796 					break;
6797 				}
6798 			}
6799 		}
6800 	} else {
6801 		/*
6802 		 * subset bound case
6803 		 */
6804 		struct sctp_laddr *laddr;
6805 
6806 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6807 		    sctp_nxt_addr) {
6808 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6809 				continue;
6810 			}
6811 			/* count this one */
6812 			count++;
6813 		}
6814 	}
6815 	SCTP_IPI_ADDR_RUNLOCK();
6816 	return (count);
6817 }
6818 
6819 #if defined(SCTP_LOCAL_TRACE_BUF)
6820 
6821 void
6822 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6823 {
6824 	uint32_t saveindex, newindex;
6825 
6826 	do {
6827 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6828 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6829 			newindex = 1;
6830 		} else {
6831 			newindex = saveindex + 1;
6832 		}
6833 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6834 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6835 		saveindex = 0;
6836 	}
6837 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6838 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6839 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6840 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6841 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6842 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6843 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6844 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6845 }
6846 
6847 #endif
6848 static void
6849 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored,
6850     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6851 {
6852 	struct ip *iph;
6853 
6854 #ifdef INET6
6855 	struct ip6_hdr *ip6;
6856 
6857 #endif
6858 	struct mbuf *sp, *last;
6859 	struct udphdr *uhdr;
6860 	uint16_t port;
6861 
6862 	if ((m->m_flags & M_PKTHDR) == 0) {
6863 		/* Can't handle one that is not a pkt hdr */
6864 		goto out;
6865 	}
6866 	/* Pull the src port */
6867 	iph = mtod(m, struct ip *);
6868 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6869 	port = uhdr->uh_sport;
6870 	/*
6871 	 * Split out the mbuf chain. Leave the IP header in m, place the
6872 	 * rest in the sp.
6873 	 */
6874 	sp = m_split(m, off, M_NOWAIT);
6875 	if (sp == NULL) {
6876 		/* Gak, drop packet, we can't do a split */
6877 		goto out;
6878 	}
6879 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6880 		/* Gak, packet can't have an SCTP header in it - too small */
6881 		m_freem(sp);
6882 		goto out;
6883 	}
6884 	/* Now pull up the UDP header and SCTP header together */
6885 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6886 	if (sp == NULL) {
6887 		/* Gak pullup failed */
6888 		goto out;
6889 	}
6890 	/* Trim out the UDP header */
6891 	m_adj(sp, sizeof(struct udphdr));
6892 
6893 	/* Now reconstruct the mbuf chain */
6894 	for (last = m; last->m_next; last = last->m_next);
6895 	last->m_next = sp;
6896 	m->m_pkthdr.len += sp->m_pkthdr.len;
6897 	iph = mtod(m, struct ip *);
6898 	switch (iph->ip_v) {
6899 #ifdef INET
6900 	case IPVERSION:
6901 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6902 		sctp_input_with_port(m, off, port);
6903 		break;
6904 #endif
6905 #ifdef INET6
6906 	case IPV6_VERSION >> 4:
6907 		ip6 = mtod(m, struct ip6_hdr *);
6908 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6909 		sctp6_input_with_port(&m, &off, port);
6910 		break;
6911 #endif
6912 	default:
6913 		goto out;
6914 		break;
6915 	}
6916 	return;
6917 out:
6918 	m_freem(m);
6919 }
6920 
6921 void
6922 sctp_over_udp_stop(void)
6923 {
6924 	/*
6925 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6926 	 * for writting!
6927 	 */
6928 #ifdef INET
6929 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6930 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6931 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6932 	}
6933 #endif
6934 #ifdef INET6
6935 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6936 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6937 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6938 	}
6939 #endif
6940 }
6941 
6942 int
6943 sctp_over_udp_start(void)
6944 {
6945 	uint16_t port;
6946 	int ret;
6947 
6948 #ifdef INET
6949 	struct sockaddr_in sin;
6950 
6951 #endif
6952 #ifdef INET6
6953 	struct sockaddr_in6 sin6;
6954 
6955 #endif
6956 	/*
6957 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6958 	 * for writting!
6959 	 */
6960 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6961 	if (ntohs(port) == 0) {
6962 		/* Must have a port set */
6963 		return (EINVAL);
6964 	}
6965 #ifdef INET
6966 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6967 		/* Already running -- must stop first */
6968 		return (EALREADY);
6969 	}
6970 #endif
6971 #ifdef INET6
6972 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6973 		/* Already running -- must stop first */
6974 		return (EALREADY);
6975 	}
6976 #endif
6977 #ifdef INET
6978 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6979 	    SOCK_DGRAM, IPPROTO_UDP,
6980 	    curthread->td_ucred, curthread))) {
6981 		sctp_over_udp_stop();
6982 		return (ret);
6983 	}
6984 	/* Call the special UDP hook. */
6985 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6986 	    sctp_recv_udp_tunneled_packet, NULL))) {
6987 		sctp_over_udp_stop();
6988 		return (ret);
6989 	}
6990 	/* Ok, we have a socket, bind it to the port. */
6991 	memset(&sin, 0, sizeof(struct sockaddr_in));
6992 	sin.sin_len = sizeof(struct sockaddr_in);
6993 	sin.sin_family = AF_INET;
6994 	sin.sin_port = htons(port);
6995 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6996 	    (struct sockaddr *)&sin, curthread))) {
6997 		sctp_over_udp_stop();
6998 		return (ret);
6999 	}
7000 #endif
7001 #ifdef INET6
7002 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7003 	    SOCK_DGRAM, IPPROTO_UDP,
7004 	    curthread->td_ucred, curthread))) {
7005 		sctp_over_udp_stop();
7006 		return (ret);
7007 	}
7008 	/* Call the special UDP hook. */
7009 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7010 	    sctp_recv_udp_tunneled_packet, NULL))) {
7011 		sctp_over_udp_stop();
7012 		return (ret);
7013 	}
7014 	/* Ok, we have a socket, bind it to the port. */
7015 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7016 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7017 	sin6.sin6_family = AF_INET6;
7018 	sin6.sin6_port = htons(port);
7019 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7020 	    (struct sockaddr *)&sin6, curthread))) {
7021 		sctp_over_udp_stop();
7022 		return (ret);
7023 	}
7024 #endif
7025 	return (0);
7026 }
7027