xref: /freebsd/sys/netinet/sctputil.c (revision 40a8ac8f62b535d30349faf28cf47106b7041b83)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 void
221 sctp_log_mb(struct mbuf *m, int from)
222 {
223 	struct sctp_cwnd_log sctp_clog;
224 
225 	sctp_clog.x.mb.mp = m;
226 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
227 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
228 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
229 	if (SCTP_BUF_IS_EXTENDED(m)) {
230 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
231 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
232 	} else {
233 		sctp_clog.x.mb.ext = 0;
234 		sctp_clog.x.mb.refcnt = 0;
235 	}
236 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
237 	    SCTP_LOG_EVENT_MBUF,
238 	    from,
239 	    sctp_clog.x.misc.log1,
240 	    sctp_clog.x.misc.log2,
241 	    sctp_clog.x.misc.log3,
242 	    sctp_clog.x.misc.log4);
243 }
244 
245 void
246 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
247 {
248 	struct sctp_cwnd_log sctp_clog;
249 
250 	if (control == NULL) {
251 		SCTP_PRINTF("Gak log of NULL?\n");
252 		return;
253 	}
254 	sctp_clog.x.strlog.stcb = control->stcb;
255 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257 	sctp_clog.x.strlog.strm = control->sinfo_stream;
258 	if (poschk != NULL) {
259 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261 	} else {
262 		sctp_clog.x.strlog.e_tsn = 0;
263 		sctp_clog.x.strlog.e_sseq = 0;
264 	}
265 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266 	    SCTP_LOG_EVENT_STRM,
267 	    from,
268 	    sctp_clog.x.misc.log1,
269 	    sctp_clog.x.misc.log2,
270 	    sctp_clog.x.misc.log3,
271 	    sctp_clog.x.misc.log4);
272 }
273 
274 void
275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276 {
277 	struct sctp_cwnd_log sctp_clog;
278 
279 	sctp_clog.x.cwnd.net = net;
280 	if (stcb->asoc.send_queue_cnt > 255)
281 		sctp_clog.x.cwnd.cnt_in_send = 255;
282 	else
283 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284 	if (stcb->asoc.stream_queue_cnt > 255)
285 		sctp_clog.x.cwnd.cnt_in_str = 255;
286 	else
287 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288 
289 	if (net) {
290 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291 		sctp_clog.x.cwnd.inflight = net->flight_size;
292 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295 	}
296 	if (SCTP_CWNDLOG_PRESEND == from) {
297 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298 	}
299 	sctp_clog.x.cwnd.cwnd_augment = augment;
300 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301 	    SCTP_LOG_EVENT_CWND,
302 	    from,
303 	    sctp_clog.x.misc.log1,
304 	    sctp_clog.x.misc.log2,
305 	    sctp_clog.x.misc.log3,
306 	    sctp_clog.x.misc.log4);
307 }
308 
309 void
310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311 {
312 	struct sctp_cwnd_log sctp_clog;
313 
314 	memset(&sctp_clog, 0, sizeof(sctp_clog));
315 	if (inp) {
316 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317 
318 	} else {
319 		sctp_clog.x.lock.sock = (void *)NULL;
320 	}
321 	sctp_clog.x.lock.inp = (void *)inp;
322 	if (stcb) {
323 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324 	} else {
325 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326 	}
327 	if (inp) {
328 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330 	} else {
331 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333 	}
334 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335 	if (inp && (inp->sctp_socket)) {
336 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339 	} else {
340 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345 	    SCTP_LOG_LOCK_EVENT,
346 	    from,
347 	    sctp_clog.x.misc.log1,
348 	    sctp_clog.x.misc.log2,
349 	    sctp_clog.x.misc.log3,
350 	    sctp_clog.x.misc.log4);
351 }
352 
353 void
354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355 {
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	sctp_clog.x.cwnd.net = net;
360 	sctp_clog.x.cwnd.cwnd_new_value = error;
361 	sctp_clog.x.cwnd.inflight = net->flight_size;
362 	sctp_clog.x.cwnd.cwnd_augment = burst;
363 	if (stcb->asoc.send_queue_cnt > 255)
364 		sctp_clog.x.cwnd.cnt_in_send = 255;
365 	else
366 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367 	if (stcb->asoc.stream_queue_cnt > 255)
368 		sctp_clog.x.cwnd.cnt_in_str = 255;
369 	else
370 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372 	    SCTP_LOG_EVENT_MAXBURST,
373 	    from,
374 	    sctp_clog.x.misc.log1,
375 	    sctp_clog.x.misc.log2,
376 	    sctp_clog.x.misc.log3,
377 	    sctp_clog.x.misc.log4);
378 }
379 
380 void
381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382 {
383 	struct sctp_cwnd_log sctp_clog;
384 
385 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386 	sctp_clog.x.rwnd.send_size = snd_size;
387 	sctp_clog.x.rwnd.overhead = overhead;
388 	sctp_clog.x.rwnd.new_rwnd = 0;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_RWND,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = flight_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422 	sctp_clog.x.mbcnt.size_change = book;
423 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_MBCNT,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 void
435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436 {
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_MISC_EVENT,
439 	    from,
440 	    a, b, c, d);
441 }
442 
443 void
444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445 {
446 	struct sctp_cwnd_log sctp_clog;
447 
448 	sctp_clog.x.wake.stcb = (void *)stcb;
449 	sctp_clog.x.wake.wake_cnt = wake_cnt;
450 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453 
454 	if (stcb->asoc.stream_queue_cnt < 0xff)
455 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456 	else
457 		sctp_clog.x.wake.stream_qcnt = 0xff;
458 
459 	if (stcb->asoc.chunks_on_out_queue < 0xff)
460 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461 	else
462 		sctp_clog.x.wake.chunks_on_oque = 0xff;
463 
464 	sctp_clog.x.wake.sctpflags = 0;
465 	/* set in the defered mode stuff */
466 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467 		sctp_clog.x.wake.sctpflags |= 1;
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469 		sctp_clog.x.wake.sctpflags |= 2;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471 		sctp_clog.x.wake.sctpflags |= 4;
472 	/* what about the sb */
473 	if (stcb->sctp_socket) {
474 		struct socket *so = stcb->sctp_socket;
475 
476 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477 	} else {
478 		sctp_clog.x.wake.sbflags = 0xff;
479 	}
480 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 	    SCTP_LOG_EVENT_WAKE,
482 	    from,
483 	    sctp_clog.x.misc.log1,
484 	    sctp_clog.x.misc.log2,
485 	    sctp_clog.x.misc.log3,
486 	    sctp_clog.x.misc.log4);
487 }
488 
489 void
490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491 {
492 	struct sctp_cwnd_log sctp_clog;
493 
494 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500 	sctp_clog.x.blk.sndlen = sendlen;
501 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 	    SCTP_LOG_EVENT_BLOCK,
503 	    from,
504 	    sctp_clog.x.misc.log1,
505 	    sctp_clog.x.misc.log2,
506 	    sctp_clog.x.misc.log3,
507 	    sctp_clog.x.misc.log4);
508 }
509 
510 int
511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512 {
513 	/* May need to fix this if ktrdump does not work */
514 	return (0);
515 }
516 
517 #ifdef SCTP_AUDITING_ENABLED
518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519 static int sctp_audit_indx = 0;
520 
521 static
522 void
523 sctp_print_audit_report(void)
524 {
525 	int i;
526 	int cnt;
527 
528 	cnt = 0;
529 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530 		if ((sctp_audit_data[i][0] == 0xe0) &&
531 		    (sctp_audit_data[i][1] == 0x01)) {
532 			cnt = 0;
533 			SCTP_PRINTF("\n");
534 		} else if (sctp_audit_data[i][0] == 0xf0) {
535 			cnt = 0;
536 			SCTP_PRINTF("\n");
537 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538 		    (sctp_audit_data[i][1] == 0x01)) {
539 			SCTP_PRINTF("\n");
540 			cnt = 0;
541 		}
542 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543 		    (uint32_t) sctp_audit_data[i][1]);
544 		cnt++;
545 		if ((cnt % 14) == 0)
546 			SCTP_PRINTF("\n");
547 	}
548 	for (i = 0; i < sctp_audit_indx; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	SCTP_PRINTF("\n");
568 }
569 
570 void
571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572     struct sctp_nets *net)
573 {
574 	int resend_cnt, tot_out, rep, tot_book_cnt;
575 	struct sctp_nets *lnet;
576 	struct sctp_tmit_chunk *chk;
577 
578 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580 	sctp_audit_indx++;
581 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582 		sctp_audit_indx = 0;
583 	}
584 	if (inp == NULL) {
585 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587 		sctp_audit_indx++;
588 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589 			sctp_audit_indx = 0;
590 		}
591 		return;
592 	}
593 	if (stcb == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603 	sctp_audit_data[sctp_audit_indx][1] =
604 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605 	sctp_audit_indx++;
606 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 		sctp_audit_indx = 0;
608 	}
609 	rep = 0;
610 	tot_book_cnt = 0;
611 	resend_cnt = tot_out = 0;
612 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614 			resend_cnt++;
615 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616 			tot_out += chk->book_size;
617 			tot_book_cnt++;
618 		}
619 	}
620 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623 		sctp_audit_indx++;
624 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 			sctp_audit_indx = 0;
626 		}
627 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629 		rep = 1;
630 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632 		sctp_audit_data[sctp_audit_indx][1] =
633 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634 		sctp_audit_indx++;
635 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 			sctp_audit_indx = 0;
637 		}
638 	}
639 	if (tot_out != stcb->asoc.total_flight) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		rep = 1;
647 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648 		    (int)stcb->asoc.total_flight);
649 		stcb->asoc.total_flight = tot_out;
650 	}
651 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 		rep = 1;
659 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660 
661 		stcb->asoc.total_flight_count = tot_book_cnt;
662 	}
663 	tot_out = 0;
664 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665 		tot_out += lnet->flight_size;
666 	}
667 	if (tot_out != stcb->asoc.total_flight) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("real flight:%d net total was %d\n",
676 		    stcb->asoc.total_flight, tot_out);
677 		/* now corrective action */
678 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679 
680 			tot_out = 0;
681 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682 				if ((chk->whoTo == lnet) &&
683 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684 					tot_out += chk->book_size;
685 				}
686 			}
687 			if (lnet->flight_size != tot_out) {
688 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689 				    (void *)lnet, lnet->flight_size,
690 				    tot_out);
691 				lnet->flight_size = tot_out;
692 			}
693 		}
694 	}
695 	if (rep) {
696 		sctp_print_audit_report();
697 	}
698 }
699 
700 void
701 sctp_audit_log(uint8_t ev, uint8_t fd)
702 {
703 
704 	sctp_audit_data[sctp_audit_indx][0] = ev;
705 	sctp_audit_data[sctp_audit_indx][1] = fd;
706 	sctp_audit_indx++;
707 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708 		sctp_audit_indx = 0;
709 	}
710 }
711 
712 #endif
713 
714 /*
715  * sctp_stop_timers_for_shutdown() should be called
716  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717  * state to make sure that all timers are stopped.
718  */
719 void
720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721 {
722 	struct sctp_association *asoc;
723 	struct sctp_nets *net;
724 
725 	asoc = &stcb->asoc;
726 
727 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735 	}
736 }
737 
738 /*
739  * a list of sizes based on typical mtu's, used only if next hop size not
740  * returned.
741  */
742 static uint32_t sctp_mtu_sizes[] = {
743 	68,
744 	296,
745 	508,
746 	512,
747 	544,
748 	576,
749 	1006,
750 	1492,
751 	1500,
752 	1536,
753 	2002,
754 	2048,
755 	4352,
756 	4464,
757 	8166,
758 	17914,
759 	32000,
760 	65535
761 };
762 
763 /*
764  * Return the largest MTU smaller than val. If there is no
765  * entry, just return val.
766  */
767 uint32_t
768 sctp_get_prev_mtu(uint32_t val)
769 {
770 	uint32_t i;
771 
772 	if (val <= sctp_mtu_sizes[0]) {
773 		return (val);
774 	}
775 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776 		if (val <= sctp_mtu_sizes[i]) {
777 			break;
778 		}
779 	}
780 	return (sctp_mtu_sizes[i - 1]);
781 }
782 
783 /*
784  * Return the smallest MTU larger than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_next_mtu(uint32_t val)
789 {
790 	/* select another MTU that is just bigger than this one */
791 	uint32_t i;
792 
793 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794 		if (val < sctp_mtu_sizes[i]) {
795 			return (sctp_mtu_sizes[i]);
796 		}
797 	}
798 	return (val);
799 }
800 
801 void
802 sctp_fill_random_store(struct sctp_pcb *m)
803 {
804 	/*
805 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806 	 * our counter. The result becomes our good random numbers and we
807 	 * then setup to give these out. Note that we do no locking to
808 	 * protect this. This is ok, since if competing folks call this we
809 	 * will get more gobbled gook in the random store which is what we
810 	 * want. There is a danger that two guys will use the same random
811 	 * numbers, but thats ok too since that is random as well :->
812 	 */
813 	m->store_at = 0;
814 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817 	m->random_counter++;
818 }
819 
820 uint32_t
821 sctp_select_initial_TSN(struct sctp_pcb *inp)
822 {
823 	/*
824 	 * A true implementation should use random selection process to get
825 	 * the initial stream sequence number, using RFC1750 as a good
826 	 * guideline
827 	 */
828 	uint32_t x, *xp;
829 	uint8_t *p;
830 	int store_at, new_store;
831 
832 	if (inp->initial_sequence_debug != 0) {
833 		uint32_t ret;
834 
835 		ret = inp->initial_sequence_debug;
836 		inp->initial_sequence_debug++;
837 		return (ret);
838 	}
839 retry:
840 	store_at = inp->store_at;
841 	new_store = store_at + sizeof(uint32_t);
842 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843 		new_store = 0;
844 	}
845 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846 		goto retry;
847 	}
848 	if (new_store == 0) {
849 		/* Refill the random store */
850 		sctp_fill_random_store(inp);
851 	}
852 	p = &inp->random_store[store_at];
853 	xp = (uint32_t *) p;
854 	x = *xp;
855 	return (x);
856 }
857 
858 uint32_t
859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860 {
861 	uint32_t x;
862 	struct timeval now;
863 
864 	if (check) {
865 		(void)SCTP_GETTIME_TIMEVAL(&now);
866 	}
867 	for (;;) {
868 		x = sctp_select_initial_TSN(&inp->sctp_ep);
869 		if (x == 0) {
870 			/* we never use 0 */
871 			continue;
872 		}
873 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874 			break;
875 		}
876 	}
877 	return (x);
878 }
879 
880 int
881 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
882     uint32_t override_tag, uint32_t vrf_id)
883 {
884 	struct sctp_association *asoc;
885 
886 	/*
887 	 * Anything set to zero is taken care of by the allocation routine's
888 	 * bzero
889 	 */
890 
891 	/*
892 	 * Up front select what scoping to apply on addresses I tell my peer
893 	 * Not sure what to do with these right now, we will need to come up
894 	 * with a way to set them. We may need to pass them through from the
895 	 * caller in the sctp_aloc_assoc() function.
896 	 */
897 	int i;
898 
899 #if defined(SCTP_DETAILED_STR_STATS)
900 	int j;
901 
902 #endif
903 
904 	asoc = &stcb->asoc;
905 	/* init all variables to a known value. */
906 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
907 	asoc->max_burst = inp->sctp_ep.max_burst;
908 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
909 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
910 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
911 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
912 	asoc->ecn_supported = inp->ecn_supported;
913 	asoc->prsctp_supported = inp->prsctp_supported;
914 	asoc->auth_supported = inp->auth_supported;
915 	asoc->asconf_supported = inp->asconf_supported;
916 	asoc->reconfig_supported = inp->reconfig_supported;
917 	asoc->nrsack_supported = inp->nrsack_supported;
918 	asoc->pktdrop_supported = inp->pktdrop_supported;
919 	asoc->sctp_cmt_pf = (uint8_t) 0;
920 	asoc->sctp_frag_point = inp->sctp_frag_point;
921 	asoc->sctp_features = inp->sctp_features;
922 	asoc->default_dscp = inp->sctp_ep.default_dscp;
923 #ifdef INET6
924 	if (inp->sctp_ep.default_flowlabel) {
925 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
926 	} else {
927 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
928 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
929 			asoc->default_flowlabel &= 0x000fffff;
930 			asoc->default_flowlabel |= 0x80000000;
931 		} else {
932 			asoc->default_flowlabel = 0;
933 		}
934 	}
935 #endif
936 	asoc->sb_send_resv = 0;
937 	if (override_tag) {
938 		asoc->my_vtag = override_tag;
939 	} else {
940 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
941 	}
942 	/* Get the nonce tags */
943 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
944 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
945 	asoc->vrf_id = vrf_id;
946 
947 #ifdef SCTP_ASOCLOG_OF_TSNS
948 	asoc->tsn_in_at = 0;
949 	asoc->tsn_out_at = 0;
950 	asoc->tsn_in_wrapped = 0;
951 	asoc->tsn_out_wrapped = 0;
952 	asoc->cumack_log_at = 0;
953 	asoc->cumack_log_atsnt = 0;
954 #endif
955 #ifdef SCTP_FS_SPEC_LOG
956 	asoc->fs_index = 0;
957 #endif
958 	asoc->refcnt = 0;
959 	asoc->assoc_up_sent = 0;
960 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
961 	    sctp_select_initial_TSN(&inp->sctp_ep);
962 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
963 	/* we are optimisitic here */
964 	asoc->peer_supports_nat = 0;
965 	asoc->sent_queue_retran_cnt = 0;
966 
967 	/* for CMT */
968 	asoc->last_net_cmt_send_started = NULL;
969 
970 	/* This will need to be adjusted */
971 	asoc->last_acked_seq = asoc->init_seq_number - 1;
972 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
973 	asoc->asconf_seq_in = asoc->last_acked_seq;
974 
975 	/* here we are different, we hold the next one we expect */
976 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
977 
978 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
979 	asoc->initial_rto = inp->sctp_ep.initial_rto;
980 
981 	asoc->max_init_times = inp->sctp_ep.max_init_times;
982 	asoc->max_send_times = inp->sctp_ep.max_send_times;
983 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
984 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
985 	asoc->free_chunk_cnt = 0;
986 
987 	asoc->iam_blocking = 0;
988 	asoc->context = inp->sctp_context;
989 	asoc->local_strreset_support = inp->local_strreset_support;
990 	asoc->def_send = inp->def_send;
991 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
992 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
993 	asoc->pr_sctp_cnt = 0;
994 	asoc->total_output_queue_size = 0;
995 
996 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
997 		asoc->scope.ipv6_addr_legal = 1;
998 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
999 			asoc->scope.ipv4_addr_legal = 1;
1000 		} else {
1001 			asoc->scope.ipv4_addr_legal = 0;
1002 		}
1003 	} else {
1004 		asoc->scope.ipv6_addr_legal = 0;
1005 		asoc->scope.ipv4_addr_legal = 1;
1006 	}
1007 
1008 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1009 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1010 
1011 	asoc->smallest_mtu = inp->sctp_frag_point;
1012 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1013 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1014 
1015 	asoc->locked_on_sending = NULL;
1016 	asoc->stream_locked_on = 0;
1017 	asoc->ecn_echo_cnt_onq = 0;
1018 	asoc->stream_locked = 0;
1019 
1020 	asoc->send_sack = 1;
1021 
1022 	LIST_INIT(&asoc->sctp_restricted_addrs);
1023 
1024 	TAILQ_INIT(&asoc->nets);
1025 	TAILQ_INIT(&asoc->pending_reply_queue);
1026 	TAILQ_INIT(&asoc->asconf_ack_sent);
1027 	/* Setup to fill the hb random cache at first HB */
1028 	asoc->hb_random_idx = 4;
1029 
1030 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1031 
1032 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1033 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1034 
1035 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1036 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1037 
1038 	/*
1039 	 * Now the stream parameters, here we allocate space for all streams
1040 	 * that we request by default.
1041 	 */
1042 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1043 	    inp->sctp_ep.pre_open_stream_count;
1044 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1045 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1046 	    SCTP_M_STRMO);
1047 	if (asoc->strmout == NULL) {
1048 		/* big trouble no memory */
1049 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1050 		return (ENOMEM);
1051 	}
1052 	for (i = 0; i < asoc->streamoutcnt; i++) {
1053 		/*
1054 		 * inbound side must be set to 0xffff, also NOTE when we get
1055 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1056 		 * count (streamoutcnt) but first check if we sent to any of
1057 		 * the upper streams that were dropped (if some were). Those
1058 		 * that were dropped must be notified to the upper layer as
1059 		 * failed to send.
1060 		 */
1061 		asoc->strmout[i].next_sequence_send = 0x0;
1062 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1063 		asoc->strmout[i].chunks_on_queues = 0;
1064 #if defined(SCTP_DETAILED_STR_STATS)
1065 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1066 			asoc->strmout[i].abandoned_sent[j] = 0;
1067 			asoc->strmout[i].abandoned_unsent[j] = 0;
1068 		}
1069 #else
1070 		asoc->strmout[i].abandoned_sent[0] = 0;
1071 		asoc->strmout[i].abandoned_unsent[0] = 0;
1072 #endif
1073 		asoc->strmout[i].stream_no = i;
1074 		asoc->strmout[i].last_msg_incomplete = 0;
1075 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1076 	}
1077 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1078 
1079 	/* Now the mapping array */
1080 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1081 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1082 	    SCTP_M_MAP);
1083 	if (asoc->mapping_array == NULL) {
1084 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1085 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1086 		return (ENOMEM);
1087 	}
1088 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1089 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1090 	    SCTP_M_MAP);
1091 	if (asoc->nr_mapping_array == NULL) {
1092 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1093 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1094 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1095 		return (ENOMEM);
1096 	}
1097 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1098 
1099 	/* Now the init of the other outqueues */
1100 	TAILQ_INIT(&asoc->free_chunks);
1101 	TAILQ_INIT(&asoc->control_send_queue);
1102 	TAILQ_INIT(&asoc->asconf_send_queue);
1103 	TAILQ_INIT(&asoc->send_queue);
1104 	TAILQ_INIT(&asoc->sent_queue);
1105 	TAILQ_INIT(&asoc->reasmqueue);
1106 	TAILQ_INIT(&asoc->resetHead);
1107 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1108 	TAILQ_INIT(&asoc->asconf_queue);
1109 	/* authentication fields */
1110 	asoc->authinfo.random = NULL;
1111 	asoc->authinfo.active_keyid = 0;
1112 	asoc->authinfo.assoc_key = NULL;
1113 	asoc->authinfo.assoc_keyid = 0;
1114 	asoc->authinfo.recv_key = NULL;
1115 	asoc->authinfo.recv_keyid = 0;
1116 	LIST_INIT(&asoc->shared_keys);
1117 	asoc->marked_retrans = 0;
1118 	asoc->port = inp->sctp_ep.port;
1119 	asoc->timoinit = 0;
1120 	asoc->timodata = 0;
1121 	asoc->timosack = 0;
1122 	asoc->timoshutdown = 0;
1123 	asoc->timoheartbeat = 0;
1124 	asoc->timocookie = 0;
1125 	asoc->timoshutdownack = 0;
1126 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1127 	asoc->discontinuity_time = asoc->start_time;
1128 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1129 		asoc->abandoned_unsent[i] = 0;
1130 		asoc->abandoned_sent[i] = 0;
1131 	}
1132 	/*
1133 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1134 	 * freed later when the association is freed.
1135 	 */
1136 	return (0);
1137 }
1138 
1139 void
1140 sctp_print_mapping_array(struct sctp_association *asoc)
1141 {
1142 	unsigned int i, limit;
1143 
1144 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1145 	    asoc->mapping_array_size,
1146 	    asoc->mapping_array_base_tsn,
1147 	    asoc->cumulative_tsn,
1148 	    asoc->highest_tsn_inside_map,
1149 	    asoc->highest_tsn_inside_nr_map);
1150 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1151 		if (asoc->mapping_array[limit - 1] != 0) {
1152 			break;
1153 		}
1154 	}
1155 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1156 	for (i = 0; i < limit; i++) {
1157 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1158 	}
1159 	if (limit % 16)
1160 		SCTP_PRINTF("\n");
1161 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1162 		if (asoc->nr_mapping_array[limit - 1]) {
1163 			break;
1164 		}
1165 	}
1166 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1167 	for (i = 0; i < limit; i++) {
1168 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1169 	}
1170 	if (limit % 16)
1171 		SCTP_PRINTF("\n");
1172 }
1173 
1174 int
1175 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1176 {
1177 	/* mapping array needs to grow */
1178 	uint8_t *new_array1, *new_array2;
1179 	uint32_t new_size;
1180 
1181 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1182 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1183 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1184 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1185 		/* can't get more, forget it */
1186 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1187 		if (new_array1) {
1188 			SCTP_FREE(new_array1, SCTP_M_MAP);
1189 		}
1190 		if (new_array2) {
1191 			SCTP_FREE(new_array2, SCTP_M_MAP);
1192 		}
1193 		return (-1);
1194 	}
1195 	memset(new_array1, 0, new_size);
1196 	memset(new_array2, 0, new_size);
1197 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1198 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1199 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1200 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1201 	asoc->mapping_array = new_array1;
1202 	asoc->nr_mapping_array = new_array2;
1203 	asoc->mapping_array_size = new_size;
1204 	return (0);
1205 }
1206 
1207 
1208 static void
1209 sctp_iterator_work(struct sctp_iterator *it)
1210 {
1211 	int iteration_count = 0;
1212 	int inp_skip = 0;
1213 	int first_in = 1;
1214 	struct sctp_inpcb *tinp;
1215 
1216 	SCTP_INP_INFO_RLOCK();
1217 	SCTP_ITERATOR_LOCK();
1218 	if (it->inp) {
1219 		SCTP_INP_RLOCK(it->inp);
1220 		SCTP_INP_DECR_REF(it->inp);
1221 	}
1222 	if (it->inp == NULL) {
1223 		/* iterator is complete */
1224 done_with_iterator:
1225 		SCTP_ITERATOR_UNLOCK();
1226 		SCTP_INP_INFO_RUNLOCK();
1227 		if (it->function_atend != NULL) {
1228 			(*it->function_atend) (it->pointer, it->val);
1229 		}
1230 		SCTP_FREE(it, SCTP_M_ITER);
1231 		return;
1232 	}
1233 select_a_new_ep:
1234 	if (first_in) {
1235 		first_in = 0;
1236 	} else {
1237 		SCTP_INP_RLOCK(it->inp);
1238 	}
1239 	while (((it->pcb_flags) &&
1240 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1241 	    ((it->pcb_features) &&
1242 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1243 		/* endpoint flags or features don't match, so keep looking */
1244 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1245 			SCTP_INP_RUNLOCK(it->inp);
1246 			goto done_with_iterator;
1247 		}
1248 		tinp = it->inp;
1249 		it->inp = LIST_NEXT(it->inp, sctp_list);
1250 		SCTP_INP_RUNLOCK(tinp);
1251 		if (it->inp == NULL) {
1252 			goto done_with_iterator;
1253 		}
1254 		SCTP_INP_RLOCK(it->inp);
1255 	}
1256 	/* now go through each assoc which is in the desired state */
1257 	if (it->done_current_ep == 0) {
1258 		if (it->function_inp != NULL)
1259 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1260 		it->done_current_ep = 1;
1261 	}
1262 	if (it->stcb == NULL) {
1263 		/* run the per instance function */
1264 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1265 	}
1266 	if ((inp_skip) || it->stcb == NULL) {
1267 		if (it->function_inp_end != NULL) {
1268 			inp_skip = (*it->function_inp_end) (it->inp,
1269 			    it->pointer,
1270 			    it->val);
1271 		}
1272 		SCTP_INP_RUNLOCK(it->inp);
1273 		goto no_stcb;
1274 	}
1275 	while (it->stcb) {
1276 		SCTP_TCB_LOCK(it->stcb);
1277 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1278 			/* not in the right state... keep looking */
1279 			SCTP_TCB_UNLOCK(it->stcb);
1280 			goto next_assoc;
1281 		}
1282 		/* see if we have limited out the iterator loop */
1283 		iteration_count++;
1284 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1285 			/* Pause to let others grab the lock */
1286 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1287 			SCTP_TCB_UNLOCK(it->stcb);
1288 			SCTP_INP_INCR_REF(it->inp);
1289 			SCTP_INP_RUNLOCK(it->inp);
1290 			SCTP_ITERATOR_UNLOCK();
1291 			SCTP_INP_INFO_RUNLOCK();
1292 			SCTP_INP_INFO_RLOCK();
1293 			SCTP_ITERATOR_LOCK();
1294 			if (sctp_it_ctl.iterator_flags) {
1295 				/* We won't be staying here */
1296 				SCTP_INP_DECR_REF(it->inp);
1297 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1298 				if (sctp_it_ctl.iterator_flags &
1299 				    SCTP_ITERATOR_STOP_CUR_IT) {
1300 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1301 					goto done_with_iterator;
1302 				}
1303 				if (sctp_it_ctl.iterator_flags &
1304 				    SCTP_ITERATOR_STOP_CUR_INP) {
1305 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1306 					goto no_stcb;
1307 				}
1308 				/* If we reach here huh? */
1309 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1310 				    sctp_it_ctl.iterator_flags);
1311 				sctp_it_ctl.iterator_flags = 0;
1312 			}
1313 			SCTP_INP_RLOCK(it->inp);
1314 			SCTP_INP_DECR_REF(it->inp);
1315 			SCTP_TCB_LOCK(it->stcb);
1316 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1317 			iteration_count = 0;
1318 		}
1319 		/* run function on this one */
1320 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1321 
1322 		/*
1323 		 * we lie here, it really needs to have its own type but
1324 		 * first I must verify that this won't effect things :-0
1325 		 */
1326 		if (it->no_chunk_output == 0)
1327 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1328 
1329 		SCTP_TCB_UNLOCK(it->stcb);
1330 next_assoc:
1331 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1332 		if (it->stcb == NULL) {
1333 			/* Run last function */
1334 			if (it->function_inp_end != NULL) {
1335 				inp_skip = (*it->function_inp_end) (it->inp,
1336 				    it->pointer,
1337 				    it->val);
1338 			}
1339 		}
1340 	}
1341 	SCTP_INP_RUNLOCK(it->inp);
1342 no_stcb:
1343 	/* done with all assocs on this endpoint, move on to next endpoint */
1344 	it->done_current_ep = 0;
1345 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1346 		it->inp = NULL;
1347 	} else {
1348 		it->inp = LIST_NEXT(it->inp, sctp_list);
1349 	}
1350 	if (it->inp == NULL) {
1351 		goto done_with_iterator;
1352 	}
1353 	goto select_a_new_ep;
1354 }
1355 
1356 void
1357 sctp_iterator_worker(void)
1358 {
1359 	struct sctp_iterator *it, *nit;
1360 
1361 	/* This function is called with the WQ lock in place */
1362 
1363 	sctp_it_ctl.iterator_running = 1;
1364 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1365 		sctp_it_ctl.cur_it = it;
1366 		/* now lets work on this one */
1367 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1368 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1369 		CURVNET_SET(it->vn);
1370 		sctp_iterator_work(it);
1371 		sctp_it_ctl.cur_it = NULL;
1372 		CURVNET_RESTORE();
1373 		SCTP_IPI_ITERATOR_WQ_LOCK();
1374 		/* sa_ignore FREED_MEMORY */
1375 	}
1376 	sctp_it_ctl.iterator_running = 0;
1377 	return;
1378 }
1379 
1380 
1381 static void
1382 sctp_handle_addr_wq(void)
1383 {
1384 	/* deal with the ADDR wq from the rtsock calls */
1385 	struct sctp_laddr *wi, *nwi;
1386 	struct sctp_asconf_iterator *asc;
1387 
1388 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1389 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1390 	if (asc == NULL) {
1391 		/* Try later, no memory */
1392 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1393 		    (struct sctp_inpcb *)NULL,
1394 		    (struct sctp_tcb *)NULL,
1395 		    (struct sctp_nets *)NULL);
1396 		return;
1397 	}
1398 	LIST_INIT(&asc->list_of_work);
1399 	asc->cnt = 0;
1400 
1401 	SCTP_WQ_ADDR_LOCK();
1402 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1403 		LIST_REMOVE(wi, sctp_nxt_addr);
1404 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1405 		asc->cnt++;
1406 	}
1407 	SCTP_WQ_ADDR_UNLOCK();
1408 
1409 	if (asc->cnt == 0) {
1410 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1411 	} else {
1412 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1413 		    sctp_asconf_iterator_stcb,
1414 		    NULL,	/* No ep end for boundall */
1415 		    SCTP_PCB_FLAGS_BOUNDALL,
1416 		    SCTP_PCB_ANY_FEATURES,
1417 		    SCTP_ASOC_ANY_STATE,
1418 		    (void *)asc, 0,
1419 		    sctp_asconf_iterator_end, NULL, 0);
1420 	}
1421 }
1422 
1423 void
1424 sctp_timeout_handler(void *t)
1425 {
1426 	struct sctp_inpcb *inp;
1427 	struct sctp_tcb *stcb;
1428 	struct sctp_nets *net;
1429 	struct sctp_timer *tmr;
1430 
1431 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1432 	struct socket *so;
1433 
1434 #endif
1435 	int did_output, type;
1436 
1437 	tmr = (struct sctp_timer *)t;
1438 	inp = (struct sctp_inpcb *)tmr->ep;
1439 	stcb = (struct sctp_tcb *)tmr->tcb;
1440 	net = (struct sctp_nets *)tmr->net;
1441 	CURVNET_SET((struct vnet *)tmr->vnet);
1442 	did_output = 1;
1443 
1444 #ifdef SCTP_AUDITING_ENABLED
1445 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1446 	sctp_auditing(3, inp, stcb, net);
1447 #endif
1448 
1449 	/* sanity checks... */
1450 	if (tmr->self != (void *)tmr) {
1451 		/*
1452 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1453 		 * (void *)tmr);
1454 		 */
1455 		CURVNET_RESTORE();
1456 		return;
1457 	}
1458 	tmr->stopped_from = 0xa001;
1459 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1460 		/*
1461 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1462 		 * tmr->type);
1463 		 */
1464 		CURVNET_RESTORE();
1465 		return;
1466 	}
1467 	tmr->stopped_from = 0xa002;
1468 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1469 		CURVNET_RESTORE();
1470 		return;
1471 	}
1472 	/* if this is an iterator timeout, get the struct and clear inp */
1473 	tmr->stopped_from = 0xa003;
1474 	type = tmr->type;
1475 	if (inp) {
1476 		SCTP_INP_INCR_REF(inp);
1477 		if ((inp->sctp_socket == NULL) &&
1478 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1479 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1480 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1481 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1482 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1483 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1484 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1485 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1486 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1487 		    ) {
1488 			SCTP_INP_DECR_REF(inp);
1489 			CURVNET_RESTORE();
1490 			return;
1491 		}
1492 	}
1493 	tmr->stopped_from = 0xa004;
1494 	if (stcb) {
1495 		atomic_add_int(&stcb->asoc.refcnt, 1);
1496 		if (stcb->asoc.state == 0) {
1497 			atomic_add_int(&stcb->asoc.refcnt, -1);
1498 			if (inp) {
1499 				SCTP_INP_DECR_REF(inp);
1500 			}
1501 			CURVNET_RESTORE();
1502 			return;
1503 		}
1504 	}
1505 	tmr->stopped_from = 0xa005;
1506 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1507 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1508 		if (inp) {
1509 			SCTP_INP_DECR_REF(inp);
1510 		}
1511 		if (stcb) {
1512 			atomic_add_int(&stcb->asoc.refcnt, -1);
1513 		}
1514 		CURVNET_RESTORE();
1515 		return;
1516 	}
1517 	tmr->stopped_from = 0xa006;
1518 
1519 	if (stcb) {
1520 		SCTP_TCB_LOCK(stcb);
1521 		atomic_add_int(&stcb->asoc.refcnt, -1);
1522 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1523 		    ((stcb->asoc.state == 0) ||
1524 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1525 			SCTP_TCB_UNLOCK(stcb);
1526 			if (inp) {
1527 				SCTP_INP_DECR_REF(inp);
1528 			}
1529 			CURVNET_RESTORE();
1530 			return;
1531 		}
1532 	}
1533 	/* record in stopped what t-o occured */
1534 	tmr->stopped_from = tmr->type;
1535 
1536 	/* mark as being serviced now */
1537 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1538 		/*
1539 		 * Callout has been rescheduled.
1540 		 */
1541 		goto get_out;
1542 	}
1543 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1544 		/*
1545 		 * Not active, so no action.
1546 		 */
1547 		goto get_out;
1548 	}
1549 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1550 
1551 	/* call the handler for the appropriate timer type */
1552 	switch (tmr->type) {
1553 	case SCTP_TIMER_TYPE_ZERO_COPY:
1554 		if (inp == NULL) {
1555 			break;
1556 		}
1557 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1558 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1559 		}
1560 		break;
1561 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1562 		if (inp == NULL) {
1563 			break;
1564 		}
1565 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1566 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1567 		}
1568 		break;
1569 	case SCTP_TIMER_TYPE_ADDR_WQ:
1570 		sctp_handle_addr_wq();
1571 		break;
1572 	case SCTP_TIMER_TYPE_SEND:
1573 		if ((stcb == NULL) || (inp == NULL)) {
1574 			break;
1575 		}
1576 		SCTP_STAT_INCR(sctps_timodata);
1577 		stcb->asoc.timodata++;
1578 		stcb->asoc.num_send_timers_up--;
1579 		if (stcb->asoc.num_send_timers_up < 0) {
1580 			stcb->asoc.num_send_timers_up = 0;
1581 		}
1582 		SCTP_TCB_LOCK_ASSERT(stcb);
1583 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1584 			/* no need to unlock on tcb its gone */
1585 
1586 			goto out_decr;
1587 		}
1588 		SCTP_TCB_LOCK_ASSERT(stcb);
1589 #ifdef SCTP_AUDITING_ENABLED
1590 		sctp_auditing(4, inp, stcb, net);
1591 #endif
1592 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1593 		if ((stcb->asoc.num_send_timers_up == 0) &&
1594 		    (stcb->asoc.sent_queue_cnt > 0)) {
1595 			struct sctp_tmit_chunk *chk;
1596 
1597 			/*
1598 			 * safeguard. If there on some on the sent queue
1599 			 * somewhere but no timers running something is
1600 			 * wrong... so we start a timer on the first chunk
1601 			 * on the send queue on whatever net it is sent to.
1602 			 */
1603 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1604 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1605 			    chk->whoTo);
1606 		}
1607 		break;
1608 	case SCTP_TIMER_TYPE_INIT:
1609 		if ((stcb == NULL) || (inp == NULL)) {
1610 			break;
1611 		}
1612 		SCTP_STAT_INCR(sctps_timoinit);
1613 		stcb->asoc.timoinit++;
1614 		if (sctp_t1init_timer(inp, stcb, net)) {
1615 			/* no need to unlock on tcb its gone */
1616 			goto out_decr;
1617 		}
1618 		/* We do output but not here */
1619 		did_output = 0;
1620 		break;
1621 	case SCTP_TIMER_TYPE_RECV:
1622 		if ((stcb == NULL) || (inp == NULL)) {
1623 			break;
1624 		}
1625 		SCTP_STAT_INCR(sctps_timosack);
1626 		stcb->asoc.timosack++;
1627 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1628 #ifdef SCTP_AUDITING_ENABLED
1629 		sctp_auditing(4, inp, stcb, net);
1630 #endif
1631 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1632 		break;
1633 	case SCTP_TIMER_TYPE_SHUTDOWN:
1634 		if ((stcb == NULL) || (inp == NULL)) {
1635 			break;
1636 		}
1637 		if (sctp_shutdown_timer(inp, stcb, net)) {
1638 			/* no need to unlock on tcb its gone */
1639 			goto out_decr;
1640 		}
1641 		SCTP_STAT_INCR(sctps_timoshutdown);
1642 		stcb->asoc.timoshutdown++;
1643 #ifdef SCTP_AUDITING_ENABLED
1644 		sctp_auditing(4, inp, stcb, net);
1645 #endif
1646 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1647 		break;
1648 	case SCTP_TIMER_TYPE_HEARTBEAT:
1649 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1650 			break;
1651 		}
1652 		SCTP_STAT_INCR(sctps_timoheartbeat);
1653 		stcb->asoc.timoheartbeat++;
1654 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1655 			/* no need to unlock on tcb its gone */
1656 			goto out_decr;
1657 		}
1658 #ifdef SCTP_AUDITING_ENABLED
1659 		sctp_auditing(4, inp, stcb, net);
1660 #endif
1661 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1662 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1663 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1664 		}
1665 		break;
1666 	case SCTP_TIMER_TYPE_COOKIE:
1667 		if ((stcb == NULL) || (inp == NULL)) {
1668 			break;
1669 		}
1670 		if (sctp_cookie_timer(inp, stcb, net)) {
1671 			/* no need to unlock on tcb its gone */
1672 			goto out_decr;
1673 		}
1674 		SCTP_STAT_INCR(sctps_timocookie);
1675 		stcb->asoc.timocookie++;
1676 #ifdef SCTP_AUDITING_ENABLED
1677 		sctp_auditing(4, inp, stcb, net);
1678 #endif
1679 		/*
1680 		 * We consider T3 and Cookie timer pretty much the same with
1681 		 * respect to where from in chunk_output.
1682 		 */
1683 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1684 		break;
1685 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1686 		{
1687 			struct timeval tv;
1688 			int i, secret;
1689 
1690 			if (inp == NULL) {
1691 				break;
1692 			}
1693 			SCTP_STAT_INCR(sctps_timosecret);
1694 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1695 			SCTP_INP_WLOCK(inp);
1696 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1697 			inp->sctp_ep.last_secret_number =
1698 			    inp->sctp_ep.current_secret_number;
1699 			inp->sctp_ep.current_secret_number++;
1700 			if (inp->sctp_ep.current_secret_number >=
1701 			    SCTP_HOW_MANY_SECRETS) {
1702 				inp->sctp_ep.current_secret_number = 0;
1703 			}
1704 			secret = (int)inp->sctp_ep.current_secret_number;
1705 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1706 				inp->sctp_ep.secret_key[secret][i] =
1707 				    sctp_select_initial_TSN(&inp->sctp_ep);
1708 			}
1709 			SCTP_INP_WUNLOCK(inp);
1710 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1711 		}
1712 		did_output = 0;
1713 		break;
1714 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1715 		if ((stcb == NULL) || (inp == NULL)) {
1716 			break;
1717 		}
1718 		SCTP_STAT_INCR(sctps_timopathmtu);
1719 		sctp_pathmtu_timer(inp, stcb, net);
1720 		did_output = 0;
1721 		break;
1722 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1723 		if ((stcb == NULL) || (inp == NULL)) {
1724 			break;
1725 		}
1726 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1727 			/* no need to unlock on tcb its gone */
1728 			goto out_decr;
1729 		}
1730 		SCTP_STAT_INCR(sctps_timoshutdownack);
1731 		stcb->asoc.timoshutdownack++;
1732 #ifdef SCTP_AUDITING_ENABLED
1733 		sctp_auditing(4, inp, stcb, net);
1734 #endif
1735 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1736 		break;
1737 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1738 		if ((stcb == NULL) || (inp == NULL)) {
1739 			break;
1740 		}
1741 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1742 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1743 		/* no need to unlock on tcb its gone */
1744 		goto out_decr;
1745 
1746 	case SCTP_TIMER_TYPE_STRRESET:
1747 		if ((stcb == NULL) || (inp == NULL)) {
1748 			break;
1749 		}
1750 		if (sctp_strreset_timer(inp, stcb, net)) {
1751 			/* no need to unlock on tcb its gone */
1752 			goto out_decr;
1753 		}
1754 		SCTP_STAT_INCR(sctps_timostrmrst);
1755 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1756 		break;
1757 	case SCTP_TIMER_TYPE_ASCONF:
1758 		if ((stcb == NULL) || (inp == NULL)) {
1759 			break;
1760 		}
1761 		if (sctp_asconf_timer(inp, stcb, net)) {
1762 			/* no need to unlock on tcb its gone */
1763 			goto out_decr;
1764 		}
1765 		SCTP_STAT_INCR(sctps_timoasconf);
1766 #ifdef SCTP_AUDITING_ENABLED
1767 		sctp_auditing(4, inp, stcb, net);
1768 #endif
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1770 		break;
1771 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1772 		if ((stcb == NULL) || (inp == NULL)) {
1773 			break;
1774 		}
1775 		sctp_delete_prim_timer(inp, stcb, net);
1776 		SCTP_STAT_INCR(sctps_timodelprim);
1777 		break;
1778 
1779 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1780 		if ((stcb == NULL) || (inp == NULL)) {
1781 			break;
1782 		}
1783 		SCTP_STAT_INCR(sctps_timoautoclose);
1784 		sctp_autoclose_timer(inp, stcb, net);
1785 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_ASOCKILL:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		SCTP_STAT_INCR(sctps_timoassockill);
1793 		/* Can we free it yet? */
1794 		SCTP_INP_DECR_REF(inp);
1795 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1796 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1797 		so = SCTP_INP_SO(inp);
1798 		atomic_add_int(&stcb->asoc.refcnt, 1);
1799 		SCTP_TCB_UNLOCK(stcb);
1800 		SCTP_SOCKET_LOCK(so, 1);
1801 		SCTP_TCB_LOCK(stcb);
1802 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1803 #endif
1804 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1805 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1806 		SCTP_SOCKET_UNLOCK(so, 1);
1807 #endif
1808 		/*
1809 		 * free asoc, always unlocks (or destroy's) so prevent
1810 		 * duplicate unlock or unlock of a free mtx :-0
1811 		 */
1812 		stcb = NULL;
1813 		goto out_no_decr;
1814 	case SCTP_TIMER_TYPE_INPKILL:
1815 		SCTP_STAT_INCR(sctps_timoinpkill);
1816 		if (inp == NULL) {
1817 			break;
1818 		}
1819 		/*
1820 		 * special case, take away our increment since WE are the
1821 		 * killer
1822 		 */
1823 		SCTP_INP_DECR_REF(inp);
1824 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1825 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1826 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1827 		inp = NULL;
1828 		goto out_no_decr;
1829 	default:
1830 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1831 		    tmr->type);
1832 		break;
1833 	}
1834 #ifdef SCTP_AUDITING_ENABLED
1835 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1836 	if (inp)
1837 		sctp_auditing(5, inp, stcb, net);
1838 #endif
1839 	if ((did_output) && stcb) {
1840 		/*
1841 		 * Now we need to clean up the control chunk chain if an
1842 		 * ECNE is on it. It must be marked as UNSENT again so next
1843 		 * call will continue to send it until such time that we get
1844 		 * a CWR, to remove it. It is, however, less likely that we
1845 		 * will find a ecn echo on the chain though.
1846 		 */
1847 		sctp_fix_ecn_echo(&stcb->asoc);
1848 	}
1849 get_out:
1850 	if (stcb) {
1851 		SCTP_TCB_UNLOCK(stcb);
1852 	}
1853 out_decr:
1854 	if (inp) {
1855 		SCTP_INP_DECR_REF(inp);
1856 	}
1857 out_no_decr:
1858 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1859 	    type);
1860 	CURVNET_RESTORE();
1861 }
1862 
1863 void
1864 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1865     struct sctp_nets *net)
1866 {
1867 	uint32_t to_ticks;
1868 	struct sctp_timer *tmr;
1869 
1870 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1871 		return;
1872 
1873 	tmr = NULL;
1874 	if (stcb) {
1875 		SCTP_TCB_LOCK_ASSERT(stcb);
1876 	}
1877 	switch (t_type) {
1878 	case SCTP_TIMER_TYPE_ZERO_COPY:
1879 		tmr = &inp->sctp_ep.zero_copy_timer;
1880 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1881 		break;
1882 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1883 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1884 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1885 		break;
1886 	case SCTP_TIMER_TYPE_ADDR_WQ:
1887 		/* Only 1 tick away :-) */
1888 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1889 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1890 		break;
1891 	case SCTP_TIMER_TYPE_SEND:
1892 		/* Here we use the RTO timer */
1893 		{
1894 			int rto_val;
1895 
1896 			if ((stcb == NULL) || (net == NULL)) {
1897 				return;
1898 			}
1899 			tmr = &net->rxt_timer;
1900 			if (net->RTO == 0) {
1901 				rto_val = stcb->asoc.initial_rto;
1902 			} else {
1903 				rto_val = net->RTO;
1904 			}
1905 			to_ticks = MSEC_TO_TICKS(rto_val);
1906 		}
1907 		break;
1908 	case SCTP_TIMER_TYPE_INIT:
1909 		/*
1910 		 * Here we use the INIT timer default usually about 1
1911 		 * minute.
1912 		 */
1913 		if ((stcb == NULL) || (net == NULL)) {
1914 			return;
1915 		}
1916 		tmr = &net->rxt_timer;
1917 		if (net->RTO == 0) {
1918 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1919 		} else {
1920 			to_ticks = MSEC_TO_TICKS(net->RTO);
1921 		}
1922 		break;
1923 	case SCTP_TIMER_TYPE_RECV:
1924 		/*
1925 		 * Here we use the Delayed-Ack timer value from the inp
1926 		 * ususually about 200ms.
1927 		 */
1928 		if (stcb == NULL) {
1929 			return;
1930 		}
1931 		tmr = &stcb->asoc.dack_timer;
1932 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1933 		break;
1934 	case SCTP_TIMER_TYPE_SHUTDOWN:
1935 		/* Here we use the RTO of the destination. */
1936 		if ((stcb == NULL) || (net == NULL)) {
1937 			return;
1938 		}
1939 		if (net->RTO == 0) {
1940 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1941 		} else {
1942 			to_ticks = MSEC_TO_TICKS(net->RTO);
1943 		}
1944 		tmr = &net->rxt_timer;
1945 		break;
1946 	case SCTP_TIMER_TYPE_HEARTBEAT:
1947 		/*
1948 		 * the net is used here so that we can add in the RTO. Even
1949 		 * though we use a different timer. We also add the HB timer
1950 		 * PLUS a random jitter.
1951 		 */
1952 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1953 			return;
1954 		} else {
1955 			uint32_t rndval;
1956 			uint32_t jitter;
1957 
1958 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1959 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1960 				return;
1961 			}
1962 			if (net->RTO == 0) {
1963 				to_ticks = stcb->asoc.initial_rto;
1964 			} else {
1965 				to_ticks = net->RTO;
1966 			}
1967 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1968 			jitter = rndval % to_ticks;
1969 			if (jitter >= (to_ticks >> 1)) {
1970 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1971 			} else {
1972 				to_ticks = to_ticks - jitter;
1973 			}
1974 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1975 			    !(net->dest_state & SCTP_ADDR_PF)) {
1976 				to_ticks += net->heart_beat_delay;
1977 			}
1978 			/*
1979 			 * Now we must convert the to_ticks that are now in
1980 			 * ms to ticks.
1981 			 */
1982 			to_ticks = MSEC_TO_TICKS(to_ticks);
1983 			tmr = &net->hb_timer;
1984 		}
1985 		break;
1986 	case SCTP_TIMER_TYPE_COOKIE:
1987 		/*
1988 		 * Here we can use the RTO timer from the network since one
1989 		 * RTT was compelete. If a retran happened then we will be
1990 		 * using the RTO initial value.
1991 		 */
1992 		if ((stcb == NULL) || (net == NULL)) {
1993 			return;
1994 		}
1995 		if (net->RTO == 0) {
1996 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1997 		} else {
1998 			to_ticks = MSEC_TO_TICKS(net->RTO);
1999 		}
2000 		tmr = &net->rxt_timer;
2001 		break;
2002 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2003 		/*
2004 		 * nothing needed but the endpoint here ususually about 60
2005 		 * minutes.
2006 		 */
2007 		if (inp == NULL) {
2008 			return;
2009 		}
2010 		tmr = &inp->sctp_ep.signature_change;
2011 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2012 		break;
2013 	case SCTP_TIMER_TYPE_ASOCKILL:
2014 		if (stcb == NULL) {
2015 			return;
2016 		}
2017 		tmr = &stcb->asoc.strreset_timer;
2018 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2019 		break;
2020 	case SCTP_TIMER_TYPE_INPKILL:
2021 		/*
2022 		 * The inp is setup to die. We re-use the signature_chage
2023 		 * timer since that has stopped and we are in the GONE
2024 		 * state.
2025 		 */
2026 		if (inp == NULL) {
2027 			return;
2028 		}
2029 		tmr = &inp->sctp_ep.signature_change;
2030 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2031 		break;
2032 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2033 		/*
2034 		 * Here we use the value found in the EP for PMTU ususually
2035 		 * about 10 minutes.
2036 		 */
2037 		if ((stcb == NULL) || (inp == NULL)) {
2038 			return;
2039 		}
2040 		if (net == NULL) {
2041 			return;
2042 		}
2043 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2044 			return;
2045 		}
2046 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2047 		tmr = &net->pmtu_timer;
2048 		break;
2049 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2050 		/* Here we use the RTO of the destination */
2051 		if ((stcb == NULL) || (net == NULL)) {
2052 			return;
2053 		}
2054 		if (net->RTO == 0) {
2055 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2056 		} else {
2057 			to_ticks = MSEC_TO_TICKS(net->RTO);
2058 		}
2059 		tmr = &net->rxt_timer;
2060 		break;
2061 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2062 		/*
2063 		 * Here we use the endpoints shutdown guard timer usually
2064 		 * about 3 minutes.
2065 		 */
2066 		if ((inp == NULL) || (stcb == NULL)) {
2067 			return;
2068 		}
2069 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2070 		tmr = &stcb->asoc.shut_guard_timer;
2071 		break;
2072 	case SCTP_TIMER_TYPE_STRRESET:
2073 		/*
2074 		 * Here the timer comes from the stcb but its value is from
2075 		 * the net's RTO.
2076 		 */
2077 		if ((stcb == NULL) || (net == NULL)) {
2078 			return;
2079 		}
2080 		if (net->RTO == 0) {
2081 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2082 		} else {
2083 			to_ticks = MSEC_TO_TICKS(net->RTO);
2084 		}
2085 		tmr = &stcb->asoc.strreset_timer;
2086 		break;
2087 	case SCTP_TIMER_TYPE_ASCONF:
2088 		/*
2089 		 * Here the timer comes from the stcb but its value is from
2090 		 * the net's RTO.
2091 		 */
2092 		if ((stcb == NULL) || (net == NULL)) {
2093 			return;
2094 		}
2095 		if (net->RTO == 0) {
2096 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2097 		} else {
2098 			to_ticks = MSEC_TO_TICKS(net->RTO);
2099 		}
2100 		tmr = &stcb->asoc.asconf_timer;
2101 		break;
2102 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2103 		if ((stcb == NULL) || (net != NULL)) {
2104 			return;
2105 		}
2106 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2107 		tmr = &stcb->asoc.delete_prim_timer;
2108 		break;
2109 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2110 		if (stcb == NULL) {
2111 			return;
2112 		}
2113 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2114 			/*
2115 			 * Really an error since stcb is NOT set to
2116 			 * autoclose
2117 			 */
2118 			return;
2119 		}
2120 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2121 		tmr = &stcb->asoc.autoclose_timer;
2122 		break;
2123 	default:
2124 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2125 		    __FUNCTION__, t_type);
2126 		return;
2127 		break;
2128 	}
2129 	if ((to_ticks <= 0) || (tmr == NULL)) {
2130 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2131 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2132 		return;
2133 	}
2134 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2135 		/*
2136 		 * we do NOT allow you to have it already running. if it is
2137 		 * we leave the current one up unchanged
2138 		 */
2139 		return;
2140 	}
2141 	/* At this point we can proceed */
2142 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2143 		stcb->asoc.num_send_timers_up++;
2144 	}
2145 	tmr->stopped_from = 0;
2146 	tmr->type = t_type;
2147 	tmr->ep = (void *)inp;
2148 	tmr->tcb = (void *)stcb;
2149 	tmr->net = (void *)net;
2150 	tmr->self = (void *)tmr;
2151 	tmr->vnet = (void *)curvnet;
2152 	tmr->ticks = sctp_get_tick_count();
2153 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2154 	return;
2155 }
2156 
2157 void
2158 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2159     struct sctp_nets *net, uint32_t from)
2160 {
2161 	struct sctp_timer *tmr;
2162 
2163 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2164 	    (inp == NULL))
2165 		return;
2166 
2167 	tmr = NULL;
2168 	if (stcb) {
2169 		SCTP_TCB_LOCK_ASSERT(stcb);
2170 	}
2171 	switch (t_type) {
2172 	case SCTP_TIMER_TYPE_ZERO_COPY:
2173 		tmr = &inp->sctp_ep.zero_copy_timer;
2174 		break;
2175 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2176 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2177 		break;
2178 	case SCTP_TIMER_TYPE_ADDR_WQ:
2179 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2180 		break;
2181 	case SCTP_TIMER_TYPE_SEND:
2182 		if ((stcb == NULL) || (net == NULL)) {
2183 			return;
2184 		}
2185 		tmr = &net->rxt_timer;
2186 		break;
2187 	case SCTP_TIMER_TYPE_INIT:
2188 		if ((stcb == NULL) || (net == NULL)) {
2189 			return;
2190 		}
2191 		tmr = &net->rxt_timer;
2192 		break;
2193 	case SCTP_TIMER_TYPE_RECV:
2194 		if (stcb == NULL) {
2195 			return;
2196 		}
2197 		tmr = &stcb->asoc.dack_timer;
2198 		break;
2199 	case SCTP_TIMER_TYPE_SHUTDOWN:
2200 		if ((stcb == NULL) || (net == NULL)) {
2201 			return;
2202 		}
2203 		tmr = &net->rxt_timer;
2204 		break;
2205 	case SCTP_TIMER_TYPE_HEARTBEAT:
2206 		if ((stcb == NULL) || (net == NULL)) {
2207 			return;
2208 		}
2209 		tmr = &net->hb_timer;
2210 		break;
2211 	case SCTP_TIMER_TYPE_COOKIE:
2212 		if ((stcb == NULL) || (net == NULL)) {
2213 			return;
2214 		}
2215 		tmr = &net->rxt_timer;
2216 		break;
2217 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2218 		/* nothing needed but the endpoint here */
2219 		tmr = &inp->sctp_ep.signature_change;
2220 		/*
2221 		 * We re-use the newcookie timer for the INP kill timer. We
2222 		 * must assure that we do not kill it by accident.
2223 		 */
2224 		break;
2225 	case SCTP_TIMER_TYPE_ASOCKILL:
2226 		/*
2227 		 * Stop the asoc kill timer.
2228 		 */
2229 		if (stcb == NULL) {
2230 			return;
2231 		}
2232 		tmr = &stcb->asoc.strreset_timer;
2233 		break;
2234 
2235 	case SCTP_TIMER_TYPE_INPKILL:
2236 		/*
2237 		 * The inp is setup to die. We re-use the signature_chage
2238 		 * timer since that has stopped and we are in the GONE
2239 		 * state.
2240 		 */
2241 		tmr = &inp->sctp_ep.signature_change;
2242 		break;
2243 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2244 		if ((stcb == NULL) || (net == NULL)) {
2245 			return;
2246 		}
2247 		tmr = &net->pmtu_timer;
2248 		break;
2249 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2250 		if ((stcb == NULL) || (net == NULL)) {
2251 			return;
2252 		}
2253 		tmr = &net->rxt_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2256 		if (stcb == NULL) {
2257 			return;
2258 		}
2259 		tmr = &stcb->asoc.shut_guard_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_STRRESET:
2262 		if (stcb == NULL) {
2263 			return;
2264 		}
2265 		tmr = &stcb->asoc.strreset_timer;
2266 		break;
2267 	case SCTP_TIMER_TYPE_ASCONF:
2268 		if (stcb == NULL) {
2269 			return;
2270 		}
2271 		tmr = &stcb->asoc.asconf_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2274 		if (stcb == NULL) {
2275 			return;
2276 		}
2277 		tmr = &stcb->asoc.delete_prim_timer;
2278 		break;
2279 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2280 		if (stcb == NULL) {
2281 			return;
2282 		}
2283 		tmr = &stcb->asoc.autoclose_timer;
2284 		break;
2285 	default:
2286 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2287 		    __FUNCTION__, t_type);
2288 		break;
2289 	}
2290 	if (tmr == NULL) {
2291 		return;
2292 	}
2293 	if ((tmr->type != t_type) && tmr->type) {
2294 		/*
2295 		 * Ok we have a timer that is under joint use. Cookie timer
2296 		 * per chance with the SEND timer. We therefore are NOT
2297 		 * running the timer that the caller wants stopped.  So just
2298 		 * return.
2299 		 */
2300 		return;
2301 	}
2302 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2303 		stcb->asoc.num_send_timers_up--;
2304 		if (stcb->asoc.num_send_timers_up < 0) {
2305 			stcb->asoc.num_send_timers_up = 0;
2306 		}
2307 	}
2308 	tmr->self = NULL;
2309 	tmr->stopped_from = from;
2310 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2311 	return;
2312 }
2313 
2314 uint32_t
2315 sctp_calculate_len(struct mbuf *m)
2316 {
2317 	uint32_t tlen = 0;
2318 	struct mbuf *at;
2319 
2320 	at = m;
2321 	while (at) {
2322 		tlen += SCTP_BUF_LEN(at);
2323 		at = SCTP_BUF_NEXT(at);
2324 	}
2325 	return (tlen);
2326 }
2327 
2328 void
2329 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2330     struct sctp_association *asoc, uint32_t mtu)
2331 {
2332 	/*
2333 	 * Reset the P-MTU size on this association, this involves changing
2334 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2335 	 * allow the DF flag to be cleared.
2336 	 */
2337 	struct sctp_tmit_chunk *chk;
2338 	unsigned int eff_mtu, ovh;
2339 
2340 	asoc->smallest_mtu = mtu;
2341 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2342 		ovh = SCTP_MIN_OVERHEAD;
2343 	} else {
2344 		ovh = SCTP_MIN_V4_OVERHEAD;
2345 	}
2346 	eff_mtu = mtu - ovh;
2347 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2348 		if (chk->send_size > eff_mtu) {
2349 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2350 		}
2351 	}
2352 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2353 		if (chk->send_size > eff_mtu) {
2354 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2355 		}
2356 	}
2357 }
2358 
2359 
2360 /*
2361  * given an association and starting time of the current RTT period return
2362  * RTO in number of msecs net should point to the current network
2363  */
2364 
2365 uint32_t
2366 sctp_calculate_rto(struct sctp_tcb *stcb,
2367     struct sctp_association *asoc,
2368     struct sctp_nets *net,
2369     struct timeval *told,
2370     int safe, int rtt_from_sack)
2371 {
2372 	/*-
2373 	 * given an association and the starting time of the current RTT
2374 	 * period (in value1/value2) return RTO in number of msecs.
2375 	 */
2376 	int32_t rtt;		/* RTT in ms */
2377 	uint32_t new_rto;
2378 	int first_measure = 0;
2379 	struct timeval now, then, *old;
2380 
2381 	/* Copy it out for sparc64 */
2382 	if (safe == sctp_align_unsafe_makecopy) {
2383 		old = &then;
2384 		memcpy(&then, told, sizeof(struct timeval));
2385 	} else if (safe == sctp_align_safe_nocopy) {
2386 		old = told;
2387 	} else {
2388 		/* error */
2389 		SCTP_PRINTF("Huh, bad rto calc call\n");
2390 		return (0);
2391 	}
2392 	/************************/
2393 	/* 1. calculate new RTT */
2394 	/************************/
2395 	/* get the current time */
2396 	if (stcb->asoc.use_precise_time) {
2397 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2398 	} else {
2399 		(void)SCTP_GETTIME_TIMEVAL(&now);
2400 	}
2401 	timevalsub(&now, old);
2402 	/* store the current RTT in us */
2403 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2404 	        (uint64_t) now.tv_usec;
2405 
2406 	/* compute rtt in ms */
2407 	rtt = (int32_t) (net->rtt / 1000);
2408 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2409 		/*
2410 		 * Tell the CC module that a new update has just occurred
2411 		 * from a sack
2412 		 */
2413 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2414 	}
2415 	/*
2416 	 * Do we need to determine the lan? We do this only on sacks i.e.
2417 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2418 	 */
2419 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2420 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2421 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2422 			net->lan_type = SCTP_LAN_INTERNET;
2423 		} else {
2424 			net->lan_type = SCTP_LAN_LOCAL;
2425 		}
2426 	}
2427 	/***************************/
2428 	/* 2. update RTTVAR & SRTT */
2429 	/***************************/
2430 	/*-
2431 	 * Compute the scaled average lastsa and the
2432 	 * scaled variance lastsv as described in van Jacobson
2433 	 * Paper "Congestion Avoidance and Control", Annex A.
2434 	 *
2435 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2436 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2437 	 */
2438 	if (net->RTO_measured) {
2439 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2440 		net->lastsa += rtt;
2441 		if (rtt < 0) {
2442 			rtt = -rtt;
2443 		}
2444 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2445 		net->lastsv += rtt;
2446 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2447 			rto_logging(net, SCTP_LOG_RTTVAR);
2448 		}
2449 	} else {
2450 		/* First RTO measurment */
2451 		net->RTO_measured = 1;
2452 		first_measure = 1;
2453 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2454 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2455 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2456 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2457 		}
2458 	}
2459 	if (net->lastsv == 0) {
2460 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2461 	}
2462 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2463 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2464 	    (stcb->asoc.sat_network_lockout == 0)) {
2465 		stcb->asoc.sat_network = 1;
2466 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2467 		stcb->asoc.sat_network = 0;
2468 		stcb->asoc.sat_network_lockout = 1;
2469 	}
2470 	/* bound it, per C6/C7 in Section 5.3.1 */
2471 	if (new_rto < stcb->asoc.minrto) {
2472 		new_rto = stcb->asoc.minrto;
2473 	}
2474 	if (new_rto > stcb->asoc.maxrto) {
2475 		new_rto = stcb->asoc.maxrto;
2476 	}
2477 	/* we are now returning the RTO */
2478 	return (new_rto);
2479 }
2480 
2481 /*
2482  * return a pointer to a contiguous piece of data from the given mbuf chain
2483  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2484  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2485  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2486  */
2487 caddr_t
2488 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2489 {
2490 	uint32_t count;
2491 	uint8_t *ptr;
2492 
2493 	ptr = in_ptr;
2494 	if ((off < 0) || (len <= 0))
2495 		return (NULL);
2496 
2497 	/* find the desired start location */
2498 	while ((m != NULL) && (off > 0)) {
2499 		if (off < SCTP_BUF_LEN(m))
2500 			break;
2501 		off -= SCTP_BUF_LEN(m);
2502 		m = SCTP_BUF_NEXT(m);
2503 	}
2504 	if (m == NULL)
2505 		return (NULL);
2506 
2507 	/* is the current mbuf large enough (eg. contiguous)? */
2508 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2509 		return (mtod(m, caddr_t)+off);
2510 	} else {
2511 		/* else, it spans more than one mbuf, so save a temp copy... */
2512 		while ((m != NULL) && (len > 0)) {
2513 			count = min(SCTP_BUF_LEN(m) - off, len);
2514 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2515 			len -= count;
2516 			ptr += count;
2517 			off = 0;
2518 			m = SCTP_BUF_NEXT(m);
2519 		}
2520 		if ((m == NULL) && (len > 0))
2521 			return (NULL);
2522 		else
2523 			return ((caddr_t)in_ptr);
2524 	}
2525 }
2526 
2527 
2528 
2529 struct sctp_paramhdr *
2530 sctp_get_next_param(struct mbuf *m,
2531     int offset,
2532     struct sctp_paramhdr *pull,
2533     int pull_limit)
2534 {
2535 	/* This just provides a typed signature to Peter's Pull routine */
2536 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2537 	    (uint8_t *) pull));
2538 }
2539 
2540 
2541 struct mbuf *
2542 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2543 {
2544 	struct mbuf *m_last;
2545 	caddr_t dp;
2546 
2547 	if (padlen > 3) {
2548 		return (NULL);
2549 	}
2550 	if (padlen <= M_TRAILINGSPACE(m)) {
2551 		/*
2552 		 * The easy way. We hope the majority of the time we hit
2553 		 * here :)
2554 		 */
2555 		m_last = m;
2556 	} else {
2557 		/* Hard way we must grow the mbuf chain */
2558 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2559 		if (m_last == NULL) {
2560 			return (NULL);
2561 		}
2562 		SCTP_BUF_LEN(m_last) = 0;
2563 		SCTP_BUF_NEXT(m_last) = NULL;
2564 		SCTP_BUF_NEXT(m) = m_last;
2565 	}
2566 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2567 	SCTP_BUF_LEN(m_last) += padlen;
2568 	memset(dp, 0, padlen);
2569 	return (m_last);
2570 }
2571 
2572 struct mbuf *
2573 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2574 {
2575 	/* find the last mbuf in chain and pad it */
2576 	struct mbuf *m_at;
2577 
2578 	if (last_mbuf != NULL) {
2579 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2580 	} else {
2581 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2582 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2583 				return (sctp_add_pad_tombuf(m_at, padval));
2584 			}
2585 		}
2586 	}
2587 	return (NULL);
2588 }
2589 
2590 static void
2591 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2592     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2593 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2594     SCTP_UNUSED
2595 #endif
2596 )
2597 {
2598 	struct mbuf *m_notify;
2599 	struct sctp_assoc_change *sac;
2600 	struct sctp_queued_to_read *control;
2601 	size_t notif_len, abort_len;
2602 	unsigned int i;
2603 
2604 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2605 	struct socket *so;
2606 
2607 #endif
2608 
2609 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2610 		notif_len = sizeof(struct sctp_assoc_change);
2611 		if (abort != NULL) {
2612 			abort_len = ntohs(abort->ch.chunk_length);
2613 		} else {
2614 			abort_len = 0;
2615 		}
2616 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2617 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2618 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2619 			notif_len += abort_len;
2620 		}
2621 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2622 		if (m_notify == NULL) {
2623 			/* Retry with smaller value. */
2624 			notif_len = sizeof(struct sctp_assoc_change);
2625 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2626 			if (m_notify == NULL) {
2627 				goto set_error;
2628 			}
2629 		}
2630 		SCTP_BUF_NEXT(m_notify) = NULL;
2631 		sac = mtod(m_notify, struct sctp_assoc_change *);
2632 		memset(sac, 0, notif_len);
2633 		sac->sac_type = SCTP_ASSOC_CHANGE;
2634 		sac->sac_flags = 0;
2635 		sac->sac_length = sizeof(struct sctp_assoc_change);
2636 		sac->sac_state = state;
2637 		sac->sac_error = error;
2638 		/* XXX verify these stream counts */
2639 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2640 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2641 		sac->sac_assoc_id = sctp_get_associd(stcb);
2642 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2643 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2644 				i = 0;
2645 				if (stcb->asoc.prsctp_supported == 1) {
2646 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2647 				}
2648 				if (stcb->asoc.auth_supported == 1) {
2649 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2650 				}
2651 				if (stcb->asoc.asconf_supported == 1) {
2652 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2653 				}
2654 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2655 				if (stcb->asoc.reconfig_supported == 1) {
2656 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2657 				}
2658 				sac->sac_length += i;
2659 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2660 				memcpy(sac->sac_info, abort, abort_len);
2661 				sac->sac_length += abort_len;
2662 			}
2663 		}
2664 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2665 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2666 		    0, 0, stcb->asoc.context, 0, 0, 0,
2667 		    m_notify);
2668 		if (control != NULL) {
2669 			control->length = SCTP_BUF_LEN(m_notify);
2670 			/* not that we need this */
2671 			control->tail_mbuf = m_notify;
2672 			control->spec_flags = M_NOTIFICATION;
2673 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2674 			    control,
2675 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2676 			    so_locked);
2677 		} else {
2678 			sctp_m_freem(m_notify);
2679 		}
2680 	}
2681 	/*
2682 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2683 	 * comes in.
2684 	 */
2685 set_error:
2686 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2687 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2688 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2689 		SOCK_LOCK(stcb->sctp_socket);
2690 		if (from_peer) {
2691 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2692 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2693 				stcb->sctp_socket->so_error = ECONNREFUSED;
2694 			} else {
2695 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2696 				stcb->sctp_socket->so_error = ECONNRESET;
2697 			}
2698 		} else {
2699 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2700 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2701 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2702 				stcb->sctp_socket->so_error = ETIMEDOUT;
2703 			} else {
2704 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2705 				stcb->sctp_socket->so_error = ECONNABORTED;
2706 			}
2707 		}
2708 	}
2709 	/* Wake ANY sleepers */
2710 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2711 	so = SCTP_INP_SO(stcb->sctp_ep);
2712 	if (!so_locked) {
2713 		atomic_add_int(&stcb->asoc.refcnt, 1);
2714 		SCTP_TCB_UNLOCK(stcb);
2715 		SCTP_SOCKET_LOCK(so, 1);
2716 		SCTP_TCB_LOCK(stcb);
2717 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2718 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2719 			SCTP_SOCKET_UNLOCK(so, 1);
2720 			return;
2721 		}
2722 	}
2723 #endif
2724 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2725 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2726 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2727 		socantrcvmore_locked(stcb->sctp_socket);
2728 	}
2729 	sorwakeup(stcb->sctp_socket);
2730 	sowwakeup(stcb->sctp_socket);
2731 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2732 	if (!so_locked) {
2733 		SCTP_SOCKET_UNLOCK(so, 1);
2734 	}
2735 #endif
2736 }
2737 
2738 static void
2739 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2740     struct sockaddr *sa, uint32_t error)
2741 {
2742 	struct mbuf *m_notify;
2743 	struct sctp_paddr_change *spc;
2744 	struct sctp_queued_to_read *control;
2745 
2746 	if ((stcb == NULL) ||
2747 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2748 		/* event not enabled */
2749 		return;
2750 	}
2751 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2752 	if (m_notify == NULL)
2753 		return;
2754 	SCTP_BUF_LEN(m_notify) = 0;
2755 	spc = mtod(m_notify, struct sctp_paddr_change *);
2756 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2757 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2758 	spc->spc_flags = 0;
2759 	spc->spc_length = sizeof(struct sctp_paddr_change);
2760 	switch (sa->sa_family) {
2761 #ifdef INET
2762 	case AF_INET:
2763 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2764 		break;
2765 #endif
2766 #ifdef INET6
2767 	case AF_INET6:
2768 		{
2769 			struct sockaddr_in6 *sin6;
2770 
2771 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2772 
2773 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2774 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2775 				if (sin6->sin6_scope_id == 0) {
2776 					/* recover scope_id for user */
2777 					(void)sa6_recoverscope(sin6);
2778 				} else {
2779 					/* clear embedded scope_id for user */
2780 					in6_clearscope(&sin6->sin6_addr);
2781 				}
2782 			}
2783 			break;
2784 		}
2785 #endif
2786 	default:
2787 		/* TSNH */
2788 		break;
2789 	}
2790 	spc->spc_state = state;
2791 	spc->spc_error = error;
2792 	spc->spc_assoc_id = sctp_get_associd(stcb);
2793 
2794 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2795 	SCTP_BUF_NEXT(m_notify) = NULL;
2796 
2797 	/* append to socket */
2798 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2799 	    0, 0, stcb->asoc.context, 0, 0, 0,
2800 	    m_notify);
2801 	if (control == NULL) {
2802 		/* no memory */
2803 		sctp_m_freem(m_notify);
2804 		return;
2805 	}
2806 	control->length = SCTP_BUF_LEN(m_notify);
2807 	control->spec_flags = M_NOTIFICATION;
2808 	/* not that we need this */
2809 	control->tail_mbuf = m_notify;
2810 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2811 	    control,
2812 	    &stcb->sctp_socket->so_rcv, 1,
2813 	    SCTP_READ_LOCK_NOT_HELD,
2814 	    SCTP_SO_NOT_LOCKED);
2815 }
2816 
2817 
2818 static void
2819 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2820     struct sctp_tmit_chunk *chk, int so_locked
2821 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2822     SCTP_UNUSED
2823 #endif
2824 )
2825 {
2826 	struct mbuf *m_notify;
2827 	struct sctp_send_failed *ssf;
2828 	struct sctp_send_failed_event *ssfe;
2829 	struct sctp_queued_to_read *control;
2830 	int length;
2831 
2832 	if ((stcb == NULL) ||
2833 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2834 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2835 		/* event not enabled */
2836 		return;
2837 	}
2838 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2839 		length = sizeof(struct sctp_send_failed_event);
2840 	} else {
2841 		length = sizeof(struct sctp_send_failed);
2842 	}
2843 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2844 	if (m_notify == NULL)
2845 		/* no space left */
2846 		return;
2847 	SCTP_BUF_LEN(m_notify) = 0;
2848 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2849 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2850 		memset(ssfe, 0, length);
2851 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2852 		if (sent) {
2853 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2854 		} else {
2855 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2856 		}
2857 		length += chk->send_size;
2858 		length -= sizeof(struct sctp_data_chunk);
2859 		ssfe->ssfe_length = length;
2860 		ssfe->ssfe_error = error;
2861 		/* not exactly what the user sent in, but should be close :) */
2862 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2863 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2864 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2865 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2866 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2867 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2868 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2869 	} else {
2870 		ssf = mtod(m_notify, struct sctp_send_failed *);
2871 		memset(ssf, 0, length);
2872 		ssf->ssf_type = SCTP_SEND_FAILED;
2873 		if (sent) {
2874 			ssf->ssf_flags = SCTP_DATA_SENT;
2875 		} else {
2876 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2877 		}
2878 		length += chk->send_size;
2879 		length -= sizeof(struct sctp_data_chunk);
2880 		ssf->ssf_length = length;
2881 		ssf->ssf_error = error;
2882 		/* not exactly what the user sent in, but should be close :) */
2883 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2884 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2885 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2886 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2887 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2888 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2889 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2890 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2891 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2892 	}
2893 	if (chk->data) {
2894 		/*
2895 		 * trim off the sctp chunk header(it should be there)
2896 		 */
2897 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2898 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2899 			sctp_mbuf_crush(chk->data);
2900 			chk->send_size -= sizeof(struct sctp_data_chunk);
2901 		}
2902 	}
2903 	SCTP_BUF_NEXT(m_notify) = chk->data;
2904 	/* Steal off the mbuf */
2905 	chk->data = NULL;
2906 	/*
2907 	 * For this case, we check the actual socket buffer, since the assoc
2908 	 * is going away we don't want to overfill the socket buffer for a
2909 	 * non-reader
2910 	 */
2911 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2912 		sctp_m_freem(m_notify);
2913 		return;
2914 	}
2915 	/* append to socket */
2916 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2917 	    0, 0, stcb->asoc.context, 0, 0, 0,
2918 	    m_notify);
2919 	if (control == NULL) {
2920 		/* no memory */
2921 		sctp_m_freem(m_notify);
2922 		return;
2923 	}
2924 	control->spec_flags = M_NOTIFICATION;
2925 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2926 	    control,
2927 	    &stcb->sctp_socket->so_rcv, 1,
2928 	    SCTP_READ_LOCK_NOT_HELD,
2929 	    so_locked);
2930 }
2931 
2932 
2933 static void
2934 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2935     struct sctp_stream_queue_pending *sp, int so_locked
2936 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2937     SCTP_UNUSED
2938 #endif
2939 )
2940 {
2941 	struct mbuf *m_notify;
2942 	struct sctp_send_failed *ssf;
2943 	struct sctp_send_failed_event *ssfe;
2944 	struct sctp_queued_to_read *control;
2945 	int length;
2946 
2947 	if ((stcb == NULL) ||
2948 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2949 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2950 		/* event not enabled */
2951 		return;
2952 	}
2953 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2954 		length = sizeof(struct sctp_send_failed_event);
2955 	} else {
2956 		length = sizeof(struct sctp_send_failed);
2957 	}
2958 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2959 	if (m_notify == NULL) {
2960 		/* no space left */
2961 		return;
2962 	}
2963 	SCTP_BUF_LEN(m_notify) = 0;
2964 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2965 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2966 		memset(ssfe, 0, length);
2967 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2968 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2969 		length += sp->length;
2970 		ssfe->ssfe_length = length;
2971 		ssfe->ssfe_error = error;
2972 		/* not exactly what the user sent in, but should be close :) */
2973 		ssfe->ssfe_info.snd_sid = sp->stream;
2974 		if (sp->some_taken) {
2975 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2976 		} else {
2977 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2978 		}
2979 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2980 		ssfe->ssfe_info.snd_context = sp->context;
2981 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2982 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2983 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2984 	} else {
2985 		ssf = mtod(m_notify, struct sctp_send_failed *);
2986 		memset(ssf, 0, length);
2987 		ssf->ssf_type = SCTP_SEND_FAILED;
2988 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2989 		length += sp->length;
2990 		ssf->ssf_length = length;
2991 		ssf->ssf_error = error;
2992 		/* not exactly what the user sent in, but should be close :) */
2993 		ssf->ssf_info.sinfo_stream = sp->stream;
2994 		ssf->ssf_info.sinfo_ssn = 0;
2995 		if (sp->some_taken) {
2996 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2997 		} else {
2998 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2999 		}
3000 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3001 		ssf->ssf_info.sinfo_context = sp->context;
3002 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3003 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3004 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3005 	}
3006 	SCTP_BUF_NEXT(m_notify) = sp->data;
3007 
3008 	/* Steal off the mbuf */
3009 	sp->data = NULL;
3010 	/*
3011 	 * For this case, we check the actual socket buffer, since the assoc
3012 	 * is going away we don't want to overfill the socket buffer for a
3013 	 * non-reader
3014 	 */
3015 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3016 		sctp_m_freem(m_notify);
3017 		return;
3018 	}
3019 	/* append to socket */
3020 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3021 	    0, 0, stcb->asoc.context, 0, 0, 0,
3022 	    m_notify);
3023 	if (control == NULL) {
3024 		/* no memory */
3025 		sctp_m_freem(m_notify);
3026 		return;
3027 	}
3028 	control->spec_flags = M_NOTIFICATION;
3029 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3030 	    control,
3031 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3032 }
3033 
3034 
3035 
3036 static void
3037 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3038 {
3039 	struct mbuf *m_notify;
3040 	struct sctp_adaptation_event *sai;
3041 	struct sctp_queued_to_read *control;
3042 
3043 	if ((stcb == NULL) ||
3044 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3045 		/* event not enabled */
3046 		return;
3047 	}
3048 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3049 	if (m_notify == NULL)
3050 		/* no space left */
3051 		return;
3052 	SCTP_BUF_LEN(m_notify) = 0;
3053 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3054 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3055 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3056 	sai->sai_flags = 0;
3057 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3058 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3059 	sai->sai_assoc_id = sctp_get_associd(stcb);
3060 
3061 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3062 	SCTP_BUF_NEXT(m_notify) = NULL;
3063 
3064 	/* append to socket */
3065 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3066 	    0, 0, stcb->asoc.context, 0, 0, 0,
3067 	    m_notify);
3068 	if (control == NULL) {
3069 		/* no memory */
3070 		sctp_m_freem(m_notify);
3071 		return;
3072 	}
3073 	control->length = SCTP_BUF_LEN(m_notify);
3074 	control->spec_flags = M_NOTIFICATION;
3075 	/* not that we need this */
3076 	control->tail_mbuf = m_notify;
3077 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3078 	    control,
3079 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3080 }
3081 
3082 /* This always must be called with the read-queue LOCKED in the INP */
3083 static void
3084 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3085     uint32_t val, int so_locked
3086 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3087     SCTP_UNUSED
3088 #endif
3089 )
3090 {
3091 	struct mbuf *m_notify;
3092 	struct sctp_pdapi_event *pdapi;
3093 	struct sctp_queued_to_read *control;
3094 	struct sockbuf *sb;
3095 
3096 	if ((stcb == NULL) ||
3097 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3098 		/* event not enabled */
3099 		return;
3100 	}
3101 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3102 		return;
3103 	}
3104 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3105 	if (m_notify == NULL)
3106 		/* no space left */
3107 		return;
3108 	SCTP_BUF_LEN(m_notify) = 0;
3109 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3110 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3111 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3112 	pdapi->pdapi_flags = 0;
3113 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3114 	pdapi->pdapi_indication = error;
3115 	pdapi->pdapi_stream = (val >> 16);
3116 	pdapi->pdapi_seq = (val & 0x0000ffff);
3117 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3118 
3119 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3120 	SCTP_BUF_NEXT(m_notify) = NULL;
3121 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3122 	    0, 0, stcb->asoc.context, 0, 0, 0,
3123 	    m_notify);
3124 	if (control == NULL) {
3125 		/* no memory */
3126 		sctp_m_freem(m_notify);
3127 		return;
3128 	}
3129 	control->spec_flags = M_NOTIFICATION;
3130 	control->length = SCTP_BUF_LEN(m_notify);
3131 	/* not that we need this */
3132 	control->tail_mbuf = m_notify;
3133 	control->held_length = 0;
3134 	control->length = 0;
3135 	sb = &stcb->sctp_socket->so_rcv;
3136 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3137 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3138 	}
3139 	sctp_sballoc(stcb, sb, m_notify);
3140 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3141 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3142 	}
3143 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3144 	control->end_added = 1;
3145 	if (stcb->asoc.control_pdapi)
3146 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3147 	else {
3148 		/* we really should not see this case */
3149 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3150 	}
3151 	if (stcb->sctp_ep && stcb->sctp_socket) {
3152 		/* This should always be the case */
3153 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3154 		struct socket *so;
3155 
3156 		so = SCTP_INP_SO(stcb->sctp_ep);
3157 		if (!so_locked) {
3158 			atomic_add_int(&stcb->asoc.refcnt, 1);
3159 			SCTP_TCB_UNLOCK(stcb);
3160 			SCTP_SOCKET_LOCK(so, 1);
3161 			SCTP_TCB_LOCK(stcb);
3162 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3163 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3164 				SCTP_SOCKET_UNLOCK(so, 1);
3165 				return;
3166 			}
3167 		}
3168 #endif
3169 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3170 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3171 		if (!so_locked) {
3172 			SCTP_SOCKET_UNLOCK(so, 1);
3173 		}
3174 #endif
3175 	}
3176 }
3177 
3178 static void
3179 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3180 {
3181 	struct mbuf *m_notify;
3182 	struct sctp_shutdown_event *sse;
3183 	struct sctp_queued_to_read *control;
3184 
3185 	/*
3186 	 * For TCP model AND UDP connected sockets we will send an error up
3187 	 * when an SHUTDOWN completes
3188 	 */
3189 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3190 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3191 		/* mark socket closed for read/write and wakeup! */
3192 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3193 		struct socket *so;
3194 
3195 		so = SCTP_INP_SO(stcb->sctp_ep);
3196 		atomic_add_int(&stcb->asoc.refcnt, 1);
3197 		SCTP_TCB_UNLOCK(stcb);
3198 		SCTP_SOCKET_LOCK(so, 1);
3199 		SCTP_TCB_LOCK(stcb);
3200 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3201 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3202 			SCTP_SOCKET_UNLOCK(so, 1);
3203 			return;
3204 		}
3205 #endif
3206 		socantsendmore(stcb->sctp_socket);
3207 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3208 		SCTP_SOCKET_UNLOCK(so, 1);
3209 #endif
3210 	}
3211 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3212 		/* event not enabled */
3213 		return;
3214 	}
3215 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3216 	if (m_notify == NULL)
3217 		/* no space left */
3218 		return;
3219 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3220 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3221 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3222 	sse->sse_flags = 0;
3223 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3224 	sse->sse_assoc_id = sctp_get_associd(stcb);
3225 
3226 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3227 	SCTP_BUF_NEXT(m_notify) = NULL;
3228 
3229 	/* append to socket */
3230 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3231 	    0, 0, stcb->asoc.context, 0, 0, 0,
3232 	    m_notify);
3233 	if (control == NULL) {
3234 		/* no memory */
3235 		sctp_m_freem(m_notify);
3236 		return;
3237 	}
3238 	control->spec_flags = M_NOTIFICATION;
3239 	control->length = SCTP_BUF_LEN(m_notify);
3240 	/* not that we need this */
3241 	control->tail_mbuf = m_notify;
3242 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3243 	    control,
3244 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3245 }
3246 
3247 static void
3248 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3249     int so_locked
3250 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3251     SCTP_UNUSED
3252 #endif
3253 )
3254 {
3255 	struct mbuf *m_notify;
3256 	struct sctp_sender_dry_event *event;
3257 	struct sctp_queued_to_read *control;
3258 
3259 	if ((stcb == NULL) ||
3260 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3261 		/* event not enabled */
3262 		return;
3263 	}
3264 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3265 	if (m_notify == NULL) {
3266 		/* no space left */
3267 		return;
3268 	}
3269 	SCTP_BUF_LEN(m_notify) = 0;
3270 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3271 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3272 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3273 	event->sender_dry_flags = 0;
3274 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3275 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3276 
3277 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3278 	SCTP_BUF_NEXT(m_notify) = NULL;
3279 
3280 	/* append to socket */
3281 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3282 	    0, 0, stcb->asoc.context, 0, 0, 0,
3283 	    m_notify);
3284 	if (control == NULL) {
3285 		/* no memory */
3286 		sctp_m_freem(m_notify);
3287 		return;
3288 	}
3289 	control->length = SCTP_BUF_LEN(m_notify);
3290 	control->spec_flags = M_NOTIFICATION;
3291 	/* not that we need this */
3292 	control->tail_mbuf = m_notify;
3293 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3294 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3295 }
3296 
3297 
3298 void
3299 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3300 {
3301 	struct mbuf *m_notify;
3302 	struct sctp_queued_to_read *control;
3303 	struct sctp_stream_change_event *stradd;
3304 
3305 	if ((stcb == NULL) ||
3306 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3307 		/* event not enabled */
3308 		return;
3309 	}
3310 	if ((stcb->asoc.peer_req_out) && flag) {
3311 		/* Peer made the request, don't tell the local user */
3312 		stcb->asoc.peer_req_out = 0;
3313 		return;
3314 	}
3315 	stcb->asoc.peer_req_out = 0;
3316 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3317 	if (m_notify == NULL)
3318 		/* no space left */
3319 		return;
3320 	SCTP_BUF_LEN(m_notify) = 0;
3321 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3322 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3323 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3324 	stradd->strchange_flags = flag;
3325 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3326 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3327 	stradd->strchange_instrms = numberin;
3328 	stradd->strchange_outstrms = numberout;
3329 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3330 	SCTP_BUF_NEXT(m_notify) = NULL;
3331 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3332 		/* no space */
3333 		sctp_m_freem(m_notify);
3334 		return;
3335 	}
3336 	/* append to socket */
3337 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3338 	    0, 0, stcb->asoc.context, 0, 0, 0,
3339 	    m_notify);
3340 	if (control == NULL) {
3341 		/* no memory */
3342 		sctp_m_freem(m_notify);
3343 		return;
3344 	}
3345 	control->spec_flags = M_NOTIFICATION;
3346 	control->length = SCTP_BUF_LEN(m_notify);
3347 	/* not that we need this */
3348 	control->tail_mbuf = m_notify;
3349 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3350 	    control,
3351 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3352 }
3353 
3354 void
3355 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3356 {
3357 	struct mbuf *m_notify;
3358 	struct sctp_queued_to_read *control;
3359 	struct sctp_assoc_reset_event *strasoc;
3360 
3361 	if ((stcb == NULL) ||
3362 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3363 		/* event not enabled */
3364 		return;
3365 	}
3366 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3367 	if (m_notify == NULL)
3368 		/* no space left */
3369 		return;
3370 	SCTP_BUF_LEN(m_notify) = 0;
3371 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3372 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3373 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3374 	strasoc->assocreset_flags = flag;
3375 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3376 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3377 	strasoc->assocreset_local_tsn = sending_tsn;
3378 	strasoc->assocreset_remote_tsn = recv_tsn;
3379 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3380 	SCTP_BUF_NEXT(m_notify) = NULL;
3381 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3382 		/* no space */
3383 		sctp_m_freem(m_notify);
3384 		return;
3385 	}
3386 	/* append to socket */
3387 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3388 	    0, 0, stcb->asoc.context, 0, 0, 0,
3389 	    m_notify);
3390 	if (control == NULL) {
3391 		/* no memory */
3392 		sctp_m_freem(m_notify);
3393 		return;
3394 	}
3395 	control->spec_flags = M_NOTIFICATION;
3396 	control->length = SCTP_BUF_LEN(m_notify);
3397 	/* not that we need this */
3398 	control->tail_mbuf = m_notify;
3399 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3400 	    control,
3401 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3402 }
3403 
3404 
3405 
3406 static void
3407 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3408     int number_entries, uint16_t * list, int flag)
3409 {
3410 	struct mbuf *m_notify;
3411 	struct sctp_queued_to_read *control;
3412 	struct sctp_stream_reset_event *strreset;
3413 	int len;
3414 
3415 	if ((stcb == NULL) ||
3416 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3417 		/* event not enabled */
3418 		return;
3419 	}
3420 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3421 	if (m_notify == NULL)
3422 		/* no space left */
3423 		return;
3424 	SCTP_BUF_LEN(m_notify) = 0;
3425 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3426 	if (len > M_TRAILINGSPACE(m_notify)) {
3427 		/* never enough room */
3428 		sctp_m_freem(m_notify);
3429 		return;
3430 	}
3431 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3432 	memset(strreset, 0, len);
3433 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3434 	strreset->strreset_flags = flag;
3435 	strreset->strreset_length = len;
3436 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3437 	if (number_entries) {
3438 		int i;
3439 
3440 		for (i = 0; i < number_entries; i++) {
3441 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3442 		}
3443 	}
3444 	SCTP_BUF_LEN(m_notify) = len;
3445 	SCTP_BUF_NEXT(m_notify) = NULL;
3446 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3447 		/* no space */
3448 		sctp_m_freem(m_notify);
3449 		return;
3450 	}
3451 	/* append to socket */
3452 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3453 	    0, 0, stcb->asoc.context, 0, 0, 0,
3454 	    m_notify);
3455 	if (control == NULL) {
3456 		/* no memory */
3457 		sctp_m_freem(m_notify);
3458 		return;
3459 	}
3460 	control->spec_flags = M_NOTIFICATION;
3461 	control->length = SCTP_BUF_LEN(m_notify);
3462 	/* not that we need this */
3463 	control->tail_mbuf = m_notify;
3464 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3465 	    control,
3466 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3467 }
3468 
3469 
3470 static void
3471 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3472 {
3473 	struct mbuf *m_notify;
3474 	struct sctp_remote_error *sre;
3475 	struct sctp_queued_to_read *control;
3476 	size_t notif_len, chunk_len;
3477 
3478 	if ((stcb == NULL) ||
3479 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3480 		return;
3481 	}
3482 	if (chunk != NULL) {
3483 		chunk_len = ntohs(chunk->ch.chunk_length);
3484 	} else {
3485 		chunk_len = 0;
3486 	}
3487 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3488 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3489 	if (m_notify == NULL) {
3490 		/* Retry with smaller value. */
3491 		notif_len = sizeof(struct sctp_remote_error);
3492 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3493 		if (m_notify == NULL) {
3494 			return;
3495 		}
3496 	}
3497 	SCTP_BUF_NEXT(m_notify) = NULL;
3498 	sre = mtod(m_notify, struct sctp_remote_error *);
3499 	memset(sre, 0, notif_len);
3500 	sre->sre_type = SCTP_REMOTE_ERROR;
3501 	sre->sre_flags = 0;
3502 	sre->sre_length = sizeof(struct sctp_remote_error);
3503 	sre->sre_error = error;
3504 	sre->sre_assoc_id = sctp_get_associd(stcb);
3505 	if (notif_len > sizeof(struct sctp_remote_error)) {
3506 		memcpy(sre->sre_data, chunk, chunk_len);
3507 		sre->sre_length += chunk_len;
3508 	}
3509 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3510 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3511 	    0, 0, stcb->asoc.context, 0, 0, 0,
3512 	    m_notify);
3513 	if (control != NULL) {
3514 		control->length = SCTP_BUF_LEN(m_notify);
3515 		/* not that we need this */
3516 		control->tail_mbuf = m_notify;
3517 		control->spec_flags = M_NOTIFICATION;
3518 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3519 		    control,
3520 		    &stcb->sctp_socket->so_rcv, 1,
3521 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3522 	} else {
3523 		sctp_m_freem(m_notify);
3524 	}
3525 }
3526 
3527 
3528 void
3529 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3530     uint32_t error, void *data, int so_locked
3531 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3532     SCTP_UNUSED
3533 #endif
3534 )
3535 {
3536 	if ((stcb == NULL) ||
3537 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3538 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3539 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3540 		/* If the socket is gone we are out of here */
3541 		return;
3542 	}
3543 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3544 		return;
3545 	}
3546 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3547 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3548 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3549 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3550 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3551 			/* Don't report these in front states */
3552 			return;
3553 		}
3554 	}
3555 	switch (notification) {
3556 	case SCTP_NOTIFY_ASSOC_UP:
3557 		if (stcb->asoc.assoc_up_sent == 0) {
3558 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3559 			stcb->asoc.assoc_up_sent = 1;
3560 		}
3561 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3562 			sctp_notify_adaptation_layer(stcb);
3563 		}
3564 		if (stcb->asoc.auth_supported == 0) {
3565 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3566 			    NULL, so_locked);
3567 		}
3568 		break;
3569 	case SCTP_NOTIFY_ASSOC_DOWN:
3570 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3571 		break;
3572 	case SCTP_NOTIFY_INTERFACE_DOWN:
3573 		{
3574 			struct sctp_nets *net;
3575 
3576 			net = (struct sctp_nets *)data;
3577 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3578 			    (struct sockaddr *)&net->ro._l_addr, error);
3579 			break;
3580 		}
3581 	case SCTP_NOTIFY_INTERFACE_UP:
3582 		{
3583 			struct sctp_nets *net;
3584 
3585 			net = (struct sctp_nets *)data;
3586 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3587 			    (struct sockaddr *)&net->ro._l_addr, error);
3588 			break;
3589 		}
3590 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3591 		{
3592 			struct sctp_nets *net;
3593 
3594 			net = (struct sctp_nets *)data;
3595 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3596 			    (struct sockaddr *)&net->ro._l_addr, error);
3597 			break;
3598 		}
3599 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3600 		sctp_notify_send_failed2(stcb, error,
3601 		    (struct sctp_stream_queue_pending *)data, so_locked);
3602 		break;
3603 	case SCTP_NOTIFY_SENT_DG_FAIL:
3604 		sctp_notify_send_failed(stcb, 1, error,
3605 		    (struct sctp_tmit_chunk *)data, so_locked);
3606 		break;
3607 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3608 		sctp_notify_send_failed(stcb, 0, error,
3609 		    (struct sctp_tmit_chunk *)data, so_locked);
3610 		break;
3611 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3612 		{
3613 			uint32_t val;
3614 
3615 			val = *((uint32_t *) data);
3616 
3617 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3618 			break;
3619 		}
3620 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3621 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3622 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3623 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3624 		} else {
3625 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3626 		}
3627 		break;
3628 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3629 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3630 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3631 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3632 		} else {
3633 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3634 		}
3635 		break;
3636 	case SCTP_NOTIFY_ASSOC_RESTART:
3637 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3638 		if (stcb->asoc.auth_supported == 0) {
3639 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3640 			    NULL, so_locked);
3641 		}
3642 		break;
3643 	case SCTP_NOTIFY_STR_RESET_SEND:
3644 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3645 		break;
3646 	case SCTP_NOTIFY_STR_RESET_RECV:
3647 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3648 		break;
3649 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3650 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3651 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3652 		break;
3653 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3654 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3655 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3656 		break;
3657 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3658 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3659 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3660 		break;
3661 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3662 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3663 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3664 		break;
3665 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3666 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3667 		    error);
3668 		break;
3669 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3670 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3671 		    error);
3672 		break;
3673 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3674 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3675 		    error);
3676 		break;
3677 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3678 		sctp_notify_shutdown_event(stcb);
3679 		break;
3680 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3681 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3682 		    (uint16_t) (uintptr_t) data,
3683 		    so_locked);
3684 		break;
3685 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3686 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3687 		    (uint16_t) (uintptr_t) data,
3688 		    so_locked);
3689 		break;
3690 	case SCTP_NOTIFY_NO_PEER_AUTH:
3691 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3692 		    (uint16_t) (uintptr_t) data,
3693 		    so_locked);
3694 		break;
3695 	case SCTP_NOTIFY_SENDER_DRY:
3696 		sctp_notify_sender_dry_event(stcb, so_locked);
3697 		break;
3698 	case SCTP_NOTIFY_REMOTE_ERROR:
3699 		sctp_notify_remote_error(stcb, error, data);
3700 		break;
3701 	default:
3702 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3703 		    __FUNCTION__, notification, notification);
3704 		break;
3705 	}			/* end switch */
3706 }
3707 
3708 void
3709 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3710 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3711     SCTP_UNUSED
3712 #endif
3713 )
3714 {
3715 	struct sctp_association *asoc;
3716 	struct sctp_stream_out *outs;
3717 	struct sctp_tmit_chunk *chk, *nchk;
3718 	struct sctp_stream_queue_pending *sp, *nsp;
3719 	int i;
3720 
3721 	if (stcb == NULL) {
3722 		return;
3723 	}
3724 	asoc = &stcb->asoc;
3725 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3726 		/* already being freed */
3727 		return;
3728 	}
3729 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3730 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3731 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3732 		return;
3733 	}
3734 	/* now through all the gunk freeing chunks */
3735 	if (holds_lock == 0) {
3736 		SCTP_TCB_SEND_LOCK(stcb);
3737 	}
3738 	/* sent queue SHOULD be empty */
3739 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3740 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3741 		asoc->sent_queue_cnt--;
3742 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3743 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3744 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3745 #ifdef INVARIANTS
3746 			} else {
3747 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3748 #endif
3749 			}
3750 		}
3751 		if (chk->data != NULL) {
3752 			sctp_free_bufspace(stcb, asoc, chk, 1);
3753 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3754 			    error, chk, so_locked);
3755 			if (chk->data) {
3756 				sctp_m_freem(chk->data);
3757 				chk->data = NULL;
3758 			}
3759 		}
3760 		sctp_free_a_chunk(stcb, chk, so_locked);
3761 		/* sa_ignore FREED_MEMORY */
3762 	}
3763 	/* pending send queue SHOULD be empty */
3764 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3765 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3766 		asoc->send_queue_cnt--;
3767 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3768 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3769 #ifdef INVARIANTS
3770 		} else {
3771 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3772 #endif
3773 		}
3774 		if (chk->data != NULL) {
3775 			sctp_free_bufspace(stcb, asoc, chk, 1);
3776 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3777 			    error, chk, so_locked);
3778 			if (chk->data) {
3779 				sctp_m_freem(chk->data);
3780 				chk->data = NULL;
3781 			}
3782 		}
3783 		sctp_free_a_chunk(stcb, chk, so_locked);
3784 		/* sa_ignore FREED_MEMORY */
3785 	}
3786 	for (i = 0; i < asoc->streamoutcnt; i++) {
3787 		/* For each stream */
3788 		outs = &asoc->strmout[i];
3789 		/* clean up any sends there */
3790 		asoc->locked_on_sending = NULL;
3791 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3792 			asoc->stream_queue_cnt--;
3793 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3794 			sctp_free_spbufspace(stcb, asoc, sp);
3795 			if (sp->data) {
3796 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3797 				    error, (void *)sp, so_locked);
3798 				if (sp->data) {
3799 					sctp_m_freem(sp->data);
3800 					sp->data = NULL;
3801 					sp->tail_mbuf = NULL;
3802 					sp->length = 0;
3803 				}
3804 			}
3805 			if (sp->net) {
3806 				sctp_free_remote_addr(sp->net);
3807 				sp->net = NULL;
3808 			}
3809 			/* Free the chunk */
3810 			sctp_free_a_strmoq(stcb, sp, so_locked);
3811 			/* sa_ignore FREED_MEMORY */
3812 		}
3813 	}
3814 
3815 	if (holds_lock == 0) {
3816 		SCTP_TCB_SEND_UNLOCK(stcb);
3817 	}
3818 }
3819 
3820 void
3821 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3822     struct sctp_abort_chunk *abort, int so_locked
3823 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3824     SCTP_UNUSED
3825 #endif
3826 )
3827 {
3828 	if (stcb == NULL) {
3829 		return;
3830 	}
3831 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3832 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3833 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3834 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3835 	}
3836 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3837 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3838 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3839 		return;
3840 	}
3841 	/* Tell them we lost the asoc */
3842 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3843 	if (from_peer) {
3844 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3845 	} else {
3846 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3847 	}
3848 }
3849 
3850 void
3851 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3852     struct mbuf *m, int iphlen,
3853     struct sockaddr *src, struct sockaddr *dst,
3854     struct sctphdr *sh, struct mbuf *op_err,
3855     uint8_t use_mflowid, uint32_t mflowid,
3856     uint32_t vrf_id, uint16_t port)
3857 {
3858 	uint32_t vtag;
3859 
3860 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3861 	struct socket *so;
3862 
3863 #endif
3864 
3865 	vtag = 0;
3866 	if (stcb != NULL) {
3867 		/* We have a TCB to abort, send notification too */
3868 		vtag = stcb->asoc.peer_vtag;
3869 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3870 		/* get the assoc vrf id and table id */
3871 		vrf_id = stcb->asoc.vrf_id;
3872 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3873 	}
3874 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3875 	    use_mflowid, mflowid,
3876 	    vrf_id, port);
3877 	if (stcb != NULL) {
3878 		/* Ok, now lets free it */
3879 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3880 		so = SCTP_INP_SO(inp);
3881 		atomic_add_int(&stcb->asoc.refcnt, 1);
3882 		SCTP_TCB_UNLOCK(stcb);
3883 		SCTP_SOCKET_LOCK(so, 1);
3884 		SCTP_TCB_LOCK(stcb);
3885 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3886 #endif
3887 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3888 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3889 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3890 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3891 		}
3892 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3894 		SCTP_SOCKET_UNLOCK(so, 1);
3895 #endif
3896 	}
3897 }
3898 
3899 #ifdef SCTP_ASOCLOG_OF_TSNS
3900 void
3901 sctp_print_out_track_log(struct sctp_tcb *stcb)
3902 {
3903 #ifdef NOSIY_PRINTS
3904 	int i;
3905 
3906 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3907 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3908 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3909 		SCTP_PRINTF("None rcvd\n");
3910 		goto none_in;
3911 	}
3912 	if (stcb->asoc.tsn_in_wrapped) {
3913 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3914 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3915 			    stcb->asoc.in_tsnlog[i].tsn,
3916 			    stcb->asoc.in_tsnlog[i].strm,
3917 			    stcb->asoc.in_tsnlog[i].seq,
3918 			    stcb->asoc.in_tsnlog[i].flgs,
3919 			    stcb->asoc.in_tsnlog[i].sz);
3920 		}
3921 	}
3922 	if (stcb->asoc.tsn_in_at) {
3923 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3924 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3925 			    stcb->asoc.in_tsnlog[i].tsn,
3926 			    stcb->asoc.in_tsnlog[i].strm,
3927 			    stcb->asoc.in_tsnlog[i].seq,
3928 			    stcb->asoc.in_tsnlog[i].flgs,
3929 			    stcb->asoc.in_tsnlog[i].sz);
3930 		}
3931 	}
3932 none_in:
3933 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3934 	if ((stcb->asoc.tsn_out_at == 0) &&
3935 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3936 		SCTP_PRINTF("None sent\n");
3937 	}
3938 	if (stcb->asoc.tsn_out_wrapped) {
3939 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3940 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3941 			    stcb->asoc.out_tsnlog[i].tsn,
3942 			    stcb->asoc.out_tsnlog[i].strm,
3943 			    stcb->asoc.out_tsnlog[i].seq,
3944 			    stcb->asoc.out_tsnlog[i].flgs,
3945 			    stcb->asoc.out_tsnlog[i].sz);
3946 		}
3947 	}
3948 	if (stcb->asoc.tsn_out_at) {
3949 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3950 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3951 			    stcb->asoc.out_tsnlog[i].tsn,
3952 			    stcb->asoc.out_tsnlog[i].strm,
3953 			    stcb->asoc.out_tsnlog[i].seq,
3954 			    stcb->asoc.out_tsnlog[i].flgs,
3955 			    stcb->asoc.out_tsnlog[i].sz);
3956 		}
3957 	}
3958 #endif
3959 }
3960 
3961 #endif
3962 
3963 void
3964 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3965     struct mbuf *op_err,
3966     int so_locked
3967 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3968     SCTP_UNUSED
3969 #endif
3970 )
3971 {
3972 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3973 	struct socket *so;
3974 
3975 #endif
3976 
3977 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3978 	so = SCTP_INP_SO(inp);
3979 #endif
3980 	if (stcb == NULL) {
3981 		/* Got to have a TCB */
3982 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3983 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
3984 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3985 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3986 			}
3987 		}
3988 		return;
3989 	} else {
3990 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3991 	}
3992 	/* notify the ulp */
3993 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3994 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3995 	}
3996 	/* notify the peer */
3997 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3998 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3999 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4000 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4001 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4002 	}
4003 	/* now free the asoc */
4004 #ifdef SCTP_ASOCLOG_OF_TSNS
4005 	sctp_print_out_track_log(stcb);
4006 #endif
4007 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4008 	if (!so_locked) {
4009 		atomic_add_int(&stcb->asoc.refcnt, 1);
4010 		SCTP_TCB_UNLOCK(stcb);
4011 		SCTP_SOCKET_LOCK(so, 1);
4012 		SCTP_TCB_LOCK(stcb);
4013 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4014 	}
4015 #endif
4016 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4017 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4018 	if (!so_locked) {
4019 		SCTP_SOCKET_UNLOCK(so, 1);
4020 	}
4021 #endif
4022 }
4023 
4024 void
4025 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4026     struct sockaddr *src, struct sockaddr *dst,
4027     struct sctphdr *sh, struct sctp_inpcb *inp,
4028     struct mbuf *cause,
4029     uint8_t use_mflowid, uint32_t mflowid,
4030     uint32_t vrf_id, uint16_t port)
4031 {
4032 	struct sctp_chunkhdr *ch, chunk_buf;
4033 	unsigned int chk_length;
4034 	int contains_init_chunk;
4035 
4036 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4037 	/* Generate a TO address for future reference */
4038 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4039 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4040 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4041 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4042 		}
4043 	}
4044 	contains_init_chunk = 0;
4045 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4046 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4047 	while (ch != NULL) {
4048 		chk_length = ntohs(ch->chunk_length);
4049 		if (chk_length < sizeof(*ch)) {
4050 			/* break to abort land */
4051 			break;
4052 		}
4053 		switch (ch->chunk_type) {
4054 		case SCTP_INIT:
4055 			contains_init_chunk = 1;
4056 			break;
4057 		case SCTP_PACKET_DROPPED:
4058 			/* we don't respond to pkt-dropped */
4059 			return;
4060 		case SCTP_ABORT_ASSOCIATION:
4061 			/* we don't respond with an ABORT to an ABORT */
4062 			return;
4063 		case SCTP_SHUTDOWN_COMPLETE:
4064 			/*
4065 			 * we ignore it since we are not waiting for it and
4066 			 * peer is gone
4067 			 */
4068 			return;
4069 		case SCTP_SHUTDOWN_ACK:
4070 			sctp_send_shutdown_complete2(src, dst, sh,
4071 			    use_mflowid, mflowid,
4072 			    vrf_id, port);
4073 			return;
4074 		default:
4075 			break;
4076 		}
4077 		offset += SCTP_SIZE32(chk_length);
4078 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4079 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4080 	}
4081 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4082 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4083 	    (contains_init_chunk == 0))) {
4084 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4085 		    use_mflowid, mflowid,
4086 		    vrf_id, port);
4087 	}
4088 }
4089 
4090 /*
4091  * check the inbound datagram to make sure there is not an abort inside it,
4092  * if there is return 1, else return 0.
4093  */
4094 int
4095 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4096 {
4097 	struct sctp_chunkhdr *ch;
4098 	struct sctp_init_chunk *init_chk, chunk_buf;
4099 	int offset;
4100 	unsigned int chk_length;
4101 
4102 	offset = iphlen + sizeof(struct sctphdr);
4103 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4104 	    (uint8_t *) & chunk_buf);
4105 	while (ch != NULL) {
4106 		chk_length = ntohs(ch->chunk_length);
4107 		if (chk_length < sizeof(*ch)) {
4108 			/* packet is probably corrupt */
4109 			break;
4110 		}
4111 		/* we seem to be ok, is it an abort? */
4112 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4113 			/* yep, tell them */
4114 			return (1);
4115 		}
4116 		if (ch->chunk_type == SCTP_INITIATION) {
4117 			/* need to update the Vtag */
4118 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4119 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4120 			if (init_chk != NULL) {
4121 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4122 			}
4123 		}
4124 		/* Nope, move to the next chunk */
4125 		offset += SCTP_SIZE32(chk_length);
4126 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4127 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4128 	}
4129 	return (0);
4130 }
4131 
4132 /*
4133  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4134  * set (i.e. it's 0) so, create this function to compare link local scopes
4135  */
4136 #ifdef INET6
4137 uint32_t
4138 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4139 {
4140 	struct sockaddr_in6 a, b;
4141 
4142 	/* save copies */
4143 	a = *addr1;
4144 	b = *addr2;
4145 
4146 	if (a.sin6_scope_id == 0)
4147 		if (sa6_recoverscope(&a)) {
4148 			/* can't get scope, so can't match */
4149 			return (0);
4150 		}
4151 	if (b.sin6_scope_id == 0)
4152 		if (sa6_recoverscope(&b)) {
4153 			/* can't get scope, so can't match */
4154 			return (0);
4155 		}
4156 	if (a.sin6_scope_id != b.sin6_scope_id)
4157 		return (0);
4158 
4159 	return (1);
4160 }
4161 
4162 /*
4163  * returns a sockaddr_in6 with embedded scope recovered and removed
4164  */
4165 struct sockaddr_in6 *
4166 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4167 {
4168 	/* check and strip embedded scope junk */
4169 	if (addr->sin6_family == AF_INET6) {
4170 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4171 			if (addr->sin6_scope_id == 0) {
4172 				*store = *addr;
4173 				if (!sa6_recoverscope(store)) {
4174 					/* use the recovered scope */
4175 					addr = store;
4176 				}
4177 			} else {
4178 				/* else, return the original "to" addr */
4179 				in6_clearscope(&addr->sin6_addr);
4180 			}
4181 		}
4182 	}
4183 	return (addr);
4184 }
4185 
4186 #endif
4187 
4188 /*
4189  * are the two addresses the same?  currently a "scopeless" check returns: 1
4190  * if same, 0 if not
4191  */
4192 int
4193 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4194 {
4195 
4196 	/* must be valid */
4197 	if (sa1 == NULL || sa2 == NULL)
4198 		return (0);
4199 
4200 	/* must be the same family */
4201 	if (sa1->sa_family != sa2->sa_family)
4202 		return (0);
4203 
4204 	switch (sa1->sa_family) {
4205 #ifdef INET6
4206 	case AF_INET6:
4207 		{
4208 			/* IPv6 addresses */
4209 			struct sockaddr_in6 *sin6_1, *sin6_2;
4210 
4211 			sin6_1 = (struct sockaddr_in6 *)sa1;
4212 			sin6_2 = (struct sockaddr_in6 *)sa2;
4213 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4214 			    sin6_2));
4215 		}
4216 #endif
4217 #ifdef INET
4218 	case AF_INET:
4219 		{
4220 			/* IPv4 addresses */
4221 			struct sockaddr_in *sin_1, *sin_2;
4222 
4223 			sin_1 = (struct sockaddr_in *)sa1;
4224 			sin_2 = (struct sockaddr_in *)sa2;
4225 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4226 		}
4227 #endif
4228 	default:
4229 		/* we don't do these... */
4230 		return (0);
4231 	}
4232 }
4233 
4234 void
4235 sctp_print_address(struct sockaddr *sa)
4236 {
4237 #ifdef INET6
4238 	char ip6buf[INET6_ADDRSTRLEN];
4239 
4240 #endif
4241 
4242 	switch (sa->sa_family) {
4243 #ifdef INET6
4244 	case AF_INET6:
4245 		{
4246 			struct sockaddr_in6 *sin6;
4247 
4248 			sin6 = (struct sockaddr_in6 *)sa;
4249 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4250 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4251 			    ntohs(sin6->sin6_port),
4252 			    sin6->sin6_scope_id);
4253 			break;
4254 		}
4255 #endif
4256 #ifdef INET
4257 	case AF_INET:
4258 		{
4259 			struct sockaddr_in *sin;
4260 			unsigned char *p;
4261 
4262 			sin = (struct sockaddr_in *)sa;
4263 			p = (unsigned char *)&sin->sin_addr;
4264 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4265 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4266 			break;
4267 		}
4268 #endif
4269 	default:
4270 		SCTP_PRINTF("?\n");
4271 		break;
4272 	}
4273 }
4274 
4275 void
4276 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4277     struct sctp_inpcb *new_inp,
4278     struct sctp_tcb *stcb,
4279     int waitflags)
4280 {
4281 	/*
4282 	 * go through our old INP and pull off any control structures that
4283 	 * belong to stcb and move then to the new inp.
4284 	 */
4285 	struct socket *old_so, *new_so;
4286 	struct sctp_queued_to_read *control, *nctl;
4287 	struct sctp_readhead tmp_queue;
4288 	struct mbuf *m;
4289 	int error = 0;
4290 
4291 	old_so = old_inp->sctp_socket;
4292 	new_so = new_inp->sctp_socket;
4293 	TAILQ_INIT(&tmp_queue);
4294 	error = sblock(&old_so->so_rcv, waitflags);
4295 	if (error) {
4296 		/*
4297 		 * Gak, can't get sblock, we have a problem. data will be
4298 		 * left stranded.. and we don't dare look at it since the
4299 		 * other thread may be reading something. Oh well, its a
4300 		 * screwed up app that does a peeloff OR a accept while
4301 		 * reading from the main socket... actually its only the
4302 		 * peeloff() case, since I think read will fail on a
4303 		 * listening socket..
4304 		 */
4305 		return;
4306 	}
4307 	/* lock the socket buffers */
4308 	SCTP_INP_READ_LOCK(old_inp);
4309 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4310 		/* Pull off all for out target stcb */
4311 		if (control->stcb == stcb) {
4312 			/* remove it we want it */
4313 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4314 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4315 			m = control->data;
4316 			while (m) {
4317 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4318 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4319 				}
4320 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4321 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4322 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4323 				}
4324 				m = SCTP_BUF_NEXT(m);
4325 			}
4326 		}
4327 	}
4328 	SCTP_INP_READ_UNLOCK(old_inp);
4329 	/* Remove the sb-lock on the old socket */
4330 
4331 	sbunlock(&old_so->so_rcv);
4332 	/* Now we move them over to the new socket buffer */
4333 	SCTP_INP_READ_LOCK(new_inp);
4334 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4335 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4336 		m = control->data;
4337 		while (m) {
4338 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4339 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4340 			}
4341 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4342 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4343 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4344 			}
4345 			m = SCTP_BUF_NEXT(m);
4346 		}
4347 	}
4348 	SCTP_INP_READ_UNLOCK(new_inp);
4349 }
4350 
4351 void
4352 sctp_add_to_readq(struct sctp_inpcb *inp,
4353     struct sctp_tcb *stcb,
4354     struct sctp_queued_to_read *control,
4355     struct sockbuf *sb,
4356     int end,
4357     int inp_read_lock_held,
4358     int so_locked
4359 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4360     SCTP_UNUSED
4361 #endif
4362 )
4363 {
4364 	/*
4365 	 * Here we must place the control on the end of the socket read
4366 	 * queue AND increment sb_cc so that select will work properly on
4367 	 * read.
4368 	 */
4369 	struct mbuf *m, *prev = NULL;
4370 
4371 	if (inp == NULL) {
4372 		/* Gak, TSNH!! */
4373 #ifdef INVARIANTS
4374 		panic("Gak, inp NULL on add_to_readq");
4375 #endif
4376 		return;
4377 	}
4378 	if (inp_read_lock_held == 0)
4379 		SCTP_INP_READ_LOCK(inp);
4380 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4381 		sctp_free_remote_addr(control->whoFrom);
4382 		if (control->data) {
4383 			sctp_m_freem(control->data);
4384 			control->data = NULL;
4385 		}
4386 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4387 		if (inp_read_lock_held == 0)
4388 			SCTP_INP_READ_UNLOCK(inp);
4389 		return;
4390 	}
4391 	if (!(control->spec_flags & M_NOTIFICATION)) {
4392 		atomic_add_int(&inp->total_recvs, 1);
4393 		if (!control->do_not_ref_stcb) {
4394 			atomic_add_int(&stcb->total_recvs, 1);
4395 		}
4396 	}
4397 	m = control->data;
4398 	control->held_length = 0;
4399 	control->length = 0;
4400 	while (m) {
4401 		if (SCTP_BUF_LEN(m) == 0) {
4402 			/* Skip mbufs with NO length */
4403 			if (prev == NULL) {
4404 				/* First one */
4405 				control->data = sctp_m_free(m);
4406 				m = control->data;
4407 			} else {
4408 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4409 				m = SCTP_BUF_NEXT(prev);
4410 			}
4411 			if (m == NULL) {
4412 				control->tail_mbuf = prev;
4413 			}
4414 			continue;
4415 		}
4416 		prev = m;
4417 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4418 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4419 		}
4420 		sctp_sballoc(stcb, sb, m);
4421 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4422 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4423 		}
4424 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4425 		m = SCTP_BUF_NEXT(m);
4426 	}
4427 	if (prev != NULL) {
4428 		control->tail_mbuf = prev;
4429 	} else {
4430 		/* Everything got collapsed out?? */
4431 		sctp_free_remote_addr(control->whoFrom);
4432 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4433 		if (inp_read_lock_held == 0)
4434 			SCTP_INP_READ_UNLOCK(inp);
4435 		return;
4436 	}
4437 	if (end) {
4438 		control->end_added = 1;
4439 	}
4440 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4441 	if (inp_read_lock_held == 0)
4442 		SCTP_INP_READ_UNLOCK(inp);
4443 	if (inp && inp->sctp_socket) {
4444 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4445 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4446 		} else {
4447 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4448 			struct socket *so;
4449 
4450 			so = SCTP_INP_SO(inp);
4451 			if (!so_locked) {
4452 				if (stcb) {
4453 					atomic_add_int(&stcb->asoc.refcnt, 1);
4454 					SCTP_TCB_UNLOCK(stcb);
4455 				}
4456 				SCTP_SOCKET_LOCK(so, 1);
4457 				if (stcb) {
4458 					SCTP_TCB_LOCK(stcb);
4459 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4460 				}
4461 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4462 					SCTP_SOCKET_UNLOCK(so, 1);
4463 					return;
4464 				}
4465 			}
4466 #endif
4467 			sctp_sorwakeup(inp, inp->sctp_socket);
4468 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4469 			if (!so_locked) {
4470 				SCTP_SOCKET_UNLOCK(so, 1);
4471 			}
4472 #endif
4473 		}
4474 	}
4475 }
4476 
4477 
4478 int
4479 sctp_append_to_readq(struct sctp_inpcb *inp,
4480     struct sctp_tcb *stcb,
4481     struct sctp_queued_to_read *control,
4482     struct mbuf *m,
4483     int end,
4484     int ctls_cumack,
4485     struct sockbuf *sb)
4486 {
4487 	/*
4488 	 * A partial delivery API event is underway. OR we are appending on
4489 	 * the reassembly queue.
4490 	 *
4491 	 * If PDAPI this means we need to add m to the end of the data.
4492 	 * Increase the length in the control AND increment the sb_cc.
4493 	 * Otherwise sb is NULL and all we need to do is put it at the end
4494 	 * of the mbuf chain.
4495 	 */
4496 	int len = 0;
4497 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4498 
4499 	if (inp) {
4500 		SCTP_INP_READ_LOCK(inp);
4501 	}
4502 	if (control == NULL) {
4503 get_out:
4504 		if (inp) {
4505 			SCTP_INP_READ_UNLOCK(inp);
4506 		}
4507 		return (-1);
4508 	}
4509 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4510 		SCTP_INP_READ_UNLOCK(inp);
4511 		return (0);
4512 	}
4513 	if (control->end_added) {
4514 		/* huh this one is complete? */
4515 		goto get_out;
4516 	}
4517 	mm = m;
4518 	if (mm == NULL) {
4519 		goto get_out;
4520 	}
4521 	while (mm) {
4522 		if (SCTP_BUF_LEN(mm) == 0) {
4523 			/* Skip mbufs with NO lenght */
4524 			if (prev == NULL) {
4525 				/* First one */
4526 				m = sctp_m_free(mm);
4527 				mm = m;
4528 			} else {
4529 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4530 				mm = SCTP_BUF_NEXT(prev);
4531 			}
4532 			continue;
4533 		}
4534 		prev = mm;
4535 		len += SCTP_BUF_LEN(mm);
4536 		if (sb) {
4537 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4538 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4539 			}
4540 			sctp_sballoc(stcb, sb, mm);
4541 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4542 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4543 			}
4544 		}
4545 		mm = SCTP_BUF_NEXT(mm);
4546 	}
4547 	if (prev) {
4548 		tail = prev;
4549 	} else {
4550 		/* Really there should always be a prev */
4551 		if (m == NULL) {
4552 			/* Huh nothing left? */
4553 #ifdef INVARIANTS
4554 			panic("Nothing left to add?");
4555 #else
4556 			goto get_out;
4557 #endif
4558 		}
4559 		tail = m;
4560 	}
4561 	if (control->tail_mbuf) {
4562 		/* append */
4563 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4564 		control->tail_mbuf = tail;
4565 	} else {
4566 		/* nothing there */
4567 #ifdef INVARIANTS
4568 		if (control->data != NULL) {
4569 			panic("This should NOT happen");
4570 		}
4571 #endif
4572 		control->data = m;
4573 		control->tail_mbuf = tail;
4574 	}
4575 	atomic_add_int(&control->length, len);
4576 	if (end) {
4577 		/* message is complete */
4578 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4579 			stcb->asoc.control_pdapi = NULL;
4580 		}
4581 		control->held_length = 0;
4582 		control->end_added = 1;
4583 	}
4584 	if (stcb == NULL) {
4585 		control->do_not_ref_stcb = 1;
4586 	}
4587 	/*
4588 	 * When we are appending in partial delivery, the cum-ack is used
4589 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4590 	 * is populated in the outbound sinfo structure from the true cumack
4591 	 * if the association exists...
4592 	 */
4593 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4594 	if (inp) {
4595 		SCTP_INP_READ_UNLOCK(inp);
4596 	}
4597 	if (inp && inp->sctp_socket) {
4598 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4599 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4600 		} else {
4601 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4602 			struct socket *so;
4603 
4604 			so = SCTP_INP_SO(inp);
4605 			if (stcb) {
4606 				atomic_add_int(&stcb->asoc.refcnt, 1);
4607 				SCTP_TCB_UNLOCK(stcb);
4608 			}
4609 			SCTP_SOCKET_LOCK(so, 1);
4610 			if (stcb) {
4611 				SCTP_TCB_LOCK(stcb);
4612 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4613 			}
4614 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4615 				SCTP_SOCKET_UNLOCK(so, 1);
4616 				return (0);
4617 			}
4618 #endif
4619 			sctp_sorwakeup(inp, inp->sctp_socket);
4620 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4621 			SCTP_SOCKET_UNLOCK(so, 1);
4622 #endif
4623 		}
4624 	}
4625 	return (0);
4626 }
4627 
4628 
4629 
4630 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4631  *************ALTERNATE ROUTING CODE
4632  */
4633 
4634 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4635  *************ALTERNATE ROUTING CODE
4636  */
4637 
4638 struct mbuf *
4639 sctp_generate_cause(uint16_t code, char *info)
4640 {
4641 	struct mbuf *m;
4642 	struct sctp_gen_error_cause *cause;
4643 	size_t info_len, len;
4644 
4645 	if ((code == 0) || (info == NULL)) {
4646 		return (NULL);
4647 	}
4648 	info_len = strlen(info);
4649 	len = sizeof(struct sctp_paramhdr) + info_len;
4650 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4651 	if (m != NULL) {
4652 		SCTP_BUF_LEN(m) = len;
4653 		cause = mtod(m, struct sctp_gen_error_cause *);
4654 		cause->code = htons(code);
4655 		cause->length = htons((uint16_t) len);
4656 		memcpy(cause->info, info, info_len);
4657 	}
4658 	return (m);
4659 }
4660 
4661 struct mbuf *
4662 sctp_generate_no_user_data_cause(uint32_t tsn)
4663 {
4664 	struct mbuf *m;
4665 	struct sctp_error_no_user_data *no_user_data_cause;
4666 	size_t len;
4667 
4668 	len = sizeof(struct sctp_error_no_user_data);
4669 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4670 	if (m != NULL) {
4671 		SCTP_BUF_LEN(m) = len;
4672 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4673 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4674 		no_user_data_cause->cause.length = htons((uint16_t) len);
4675 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4676 	}
4677 	return (m);
4678 }
4679 
4680 #ifdef SCTP_MBCNT_LOGGING
4681 void
4682 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4683     struct sctp_tmit_chunk *tp1, int chk_cnt)
4684 {
4685 	if (tp1->data == NULL) {
4686 		return;
4687 	}
4688 	asoc->chunks_on_out_queue -= chk_cnt;
4689 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4690 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4691 		    asoc->total_output_queue_size,
4692 		    tp1->book_size,
4693 		    0,
4694 		    tp1->mbcnt);
4695 	}
4696 	if (asoc->total_output_queue_size >= tp1->book_size) {
4697 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4698 	} else {
4699 		asoc->total_output_queue_size = 0;
4700 	}
4701 
4702 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4703 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4704 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4705 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4706 		} else {
4707 			stcb->sctp_socket->so_snd.sb_cc = 0;
4708 
4709 		}
4710 	}
4711 }
4712 
4713 #endif
4714 
4715 int
4716 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4717     uint8_t sent, int so_locked
4718 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4719     SCTP_UNUSED
4720 #endif
4721 )
4722 {
4723 	struct sctp_stream_out *strq;
4724 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4725 	struct sctp_stream_queue_pending *sp;
4726 	uint16_t stream = 0, seq = 0;
4727 	uint8_t foundeom = 0;
4728 	int ret_sz = 0;
4729 	int notdone;
4730 	int do_wakeup_routine = 0;
4731 
4732 	stream = tp1->rec.data.stream_number;
4733 	seq = tp1->rec.data.stream_seq;
4734 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4735 		stcb->asoc.abandoned_sent[0]++;
4736 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4737 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4738 #if defined(SCTP_DETAILED_STR_STATS)
4739 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4740 #endif
4741 	} else {
4742 		stcb->asoc.abandoned_unsent[0]++;
4743 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4744 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4745 #if defined(SCTP_DETAILED_STR_STATS)
4746 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4747 #endif
4748 	}
4749 	do {
4750 		ret_sz += tp1->book_size;
4751 		if (tp1->data != NULL) {
4752 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4753 				sctp_flight_size_decrease(tp1);
4754 				sctp_total_flight_decrease(stcb, tp1);
4755 			}
4756 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4757 			stcb->asoc.peers_rwnd += tp1->send_size;
4758 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4759 			if (sent) {
4760 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4761 			} else {
4762 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4763 			}
4764 			if (tp1->data) {
4765 				sctp_m_freem(tp1->data);
4766 				tp1->data = NULL;
4767 			}
4768 			do_wakeup_routine = 1;
4769 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4770 				stcb->asoc.sent_queue_cnt_removeable--;
4771 			}
4772 		}
4773 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4774 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4775 		    SCTP_DATA_NOT_FRAG) {
4776 			/* not frag'ed we ae done   */
4777 			notdone = 0;
4778 			foundeom = 1;
4779 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4780 			/* end of frag, we are done */
4781 			notdone = 0;
4782 			foundeom = 1;
4783 		} else {
4784 			/*
4785 			 * Its a begin or middle piece, we must mark all of
4786 			 * it
4787 			 */
4788 			notdone = 1;
4789 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4790 		}
4791 	} while (tp1 && notdone);
4792 	if (foundeom == 0) {
4793 		/*
4794 		 * The multi-part message was scattered across the send and
4795 		 * sent queue.
4796 		 */
4797 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4798 			if ((tp1->rec.data.stream_number != stream) ||
4799 			    (tp1->rec.data.stream_seq != seq)) {
4800 				break;
4801 			}
4802 			/*
4803 			 * save to chk in case we have some on stream out
4804 			 * queue. If so and we have an un-transmitted one we
4805 			 * don't have to fudge the TSN.
4806 			 */
4807 			chk = tp1;
4808 			ret_sz += tp1->book_size;
4809 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4810 			if (sent) {
4811 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4812 			} else {
4813 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4814 			}
4815 			if (tp1->data) {
4816 				sctp_m_freem(tp1->data);
4817 				tp1->data = NULL;
4818 			}
4819 			/* No flight involved here book the size to 0 */
4820 			tp1->book_size = 0;
4821 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4822 				foundeom = 1;
4823 			}
4824 			do_wakeup_routine = 1;
4825 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4826 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4827 			/*
4828 			 * on to the sent queue so we can wait for it to be
4829 			 * passed by.
4830 			 */
4831 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4832 			    sctp_next);
4833 			stcb->asoc.send_queue_cnt--;
4834 			stcb->asoc.sent_queue_cnt++;
4835 		}
4836 	}
4837 	if (foundeom == 0) {
4838 		/*
4839 		 * Still no eom found. That means there is stuff left on the
4840 		 * stream out queue.. yuck.
4841 		 */
4842 		SCTP_TCB_SEND_LOCK(stcb);
4843 		strq = &stcb->asoc.strmout[stream];
4844 		sp = TAILQ_FIRST(&strq->outqueue);
4845 		if (sp != NULL) {
4846 			sp->discard_rest = 1;
4847 			/*
4848 			 * We may need to put a chunk on the queue that
4849 			 * holds the TSN that would have been sent with the
4850 			 * LAST bit.
4851 			 */
4852 			if (chk == NULL) {
4853 				/* Yep, we have to */
4854 				sctp_alloc_a_chunk(stcb, chk);
4855 				if (chk == NULL) {
4856 					/*
4857 					 * we are hosed. All we can do is
4858 					 * nothing.. which will cause an
4859 					 * abort if the peer is paying
4860 					 * attention.
4861 					 */
4862 					goto oh_well;
4863 				}
4864 				memset(chk, 0, sizeof(*chk));
4865 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4866 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4867 				chk->asoc = &stcb->asoc;
4868 				chk->rec.data.stream_seq = strq->next_sequence_send;
4869 				chk->rec.data.stream_number = sp->stream;
4870 				chk->rec.data.payloadtype = sp->ppid;
4871 				chk->rec.data.context = sp->context;
4872 				chk->flags = sp->act_flags;
4873 				if (sp->net)
4874 					chk->whoTo = sp->net;
4875 				else
4876 					chk->whoTo = stcb->asoc.primary_destination;
4877 				atomic_add_int(&chk->whoTo->ref_count, 1);
4878 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4879 				stcb->asoc.pr_sctp_cnt++;
4880 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4881 				stcb->asoc.sent_queue_cnt++;
4882 				stcb->asoc.pr_sctp_cnt++;
4883 			} else {
4884 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4885 			}
4886 			strq->next_sequence_send++;
4887 	oh_well:
4888 			if (sp->data) {
4889 				/*
4890 				 * Pull any data to free up the SB and allow
4891 				 * sender to "add more" while we will throw
4892 				 * away :-)
4893 				 */
4894 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4895 				ret_sz += sp->length;
4896 				do_wakeup_routine = 1;
4897 				sp->some_taken = 1;
4898 				sctp_m_freem(sp->data);
4899 				sp->data = NULL;
4900 				sp->tail_mbuf = NULL;
4901 				sp->length = 0;
4902 			}
4903 		}
4904 		SCTP_TCB_SEND_UNLOCK(stcb);
4905 	}
4906 	if (do_wakeup_routine) {
4907 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4908 		struct socket *so;
4909 
4910 		so = SCTP_INP_SO(stcb->sctp_ep);
4911 		if (!so_locked) {
4912 			atomic_add_int(&stcb->asoc.refcnt, 1);
4913 			SCTP_TCB_UNLOCK(stcb);
4914 			SCTP_SOCKET_LOCK(so, 1);
4915 			SCTP_TCB_LOCK(stcb);
4916 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4917 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4918 				/* assoc was freed while we were unlocked */
4919 				SCTP_SOCKET_UNLOCK(so, 1);
4920 				return (ret_sz);
4921 			}
4922 		}
4923 #endif
4924 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4925 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4926 		if (!so_locked) {
4927 			SCTP_SOCKET_UNLOCK(so, 1);
4928 		}
4929 #endif
4930 	}
4931 	return (ret_sz);
4932 }
4933 
4934 /*
4935  * checks to see if the given address, sa, is one that is currently known by
4936  * the kernel note: can't distinguish the same address on multiple interfaces
4937  * and doesn't handle multiple addresses with different zone/scope id's note:
4938  * ifa_ifwithaddr() compares the entire sockaddr struct
4939  */
4940 struct sctp_ifa *
4941 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4942     int holds_lock)
4943 {
4944 	struct sctp_laddr *laddr;
4945 
4946 	if (holds_lock == 0) {
4947 		SCTP_INP_RLOCK(inp);
4948 	}
4949 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4950 		if (laddr->ifa == NULL)
4951 			continue;
4952 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4953 			continue;
4954 #ifdef INET
4955 		if (addr->sa_family == AF_INET) {
4956 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4957 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4958 				/* found him. */
4959 				if (holds_lock == 0) {
4960 					SCTP_INP_RUNLOCK(inp);
4961 				}
4962 				return (laddr->ifa);
4963 				break;
4964 			}
4965 		}
4966 #endif
4967 #ifdef INET6
4968 		if (addr->sa_family == AF_INET6) {
4969 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4970 			    &laddr->ifa->address.sin6)) {
4971 				/* found him. */
4972 				if (holds_lock == 0) {
4973 					SCTP_INP_RUNLOCK(inp);
4974 				}
4975 				return (laddr->ifa);
4976 				break;
4977 			}
4978 		}
4979 #endif
4980 	}
4981 	if (holds_lock == 0) {
4982 		SCTP_INP_RUNLOCK(inp);
4983 	}
4984 	return (NULL);
4985 }
4986 
4987 uint32_t
4988 sctp_get_ifa_hash_val(struct sockaddr *addr)
4989 {
4990 	switch (addr->sa_family) {
4991 #ifdef INET
4992 	case AF_INET:
4993 		{
4994 			struct sockaddr_in *sin;
4995 
4996 			sin = (struct sockaddr_in *)addr;
4997 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4998 		}
4999 #endif
5000 #ifdef INET6
5001 	case AF_INET6:
5002 		{
5003 			struct sockaddr_in6 *sin6;
5004 			uint32_t hash_of_addr;
5005 
5006 			sin6 = (struct sockaddr_in6 *)addr;
5007 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5008 			    sin6->sin6_addr.s6_addr32[1] +
5009 			    sin6->sin6_addr.s6_addr32[2] +
5010 			    sin6->sin6_addr.s6_addr32[3]);
5011 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5012 			return (hash_of_addr);
5013 		}
5014 #endif
5015 	default:
5016 		break;
5017 	}
5018 	return (0);
5019 }
5020 
5021 struct sctp_ifa *
5022 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5023 {
5024 	struct sctp_ifa *sctp_ifap;
5025 	struct sctp_vrf *vrf;
5026 	struct sctp_ifalist *hash_head;
5027 	uint32_t hash_of_addr;
5028 
5029 	if (holds_lock == 0)
5030 		SCTP_IPI_ADDR_RLOCK();
5031 
5032 	vrf = sctp_find_vrf(vrf_id);
5033 	if (vrf == NULL) {
5034 stage_right:
5035 		if (holds_lock == 0)
5036 			SCTP_IPI_ADDR_RUNLOCK();
5037 		return (NULL);
5038 	}
5039 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5040 
5041 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5042 	if (hash_head == NULL) {
5043 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5044 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5045 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5046 		sctp_print_address(addr);
5047 		SCTP_PRINTF("No such bucket for address\n");
5048 		if (holds_lock == 0)
5049 			SCTP_IPI_ADDR_RUNLOCK();
5050 
5051 		return (NULL);
5052 	}
5053 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5054 		if (sctp_ifap == NULL) {
5055 #ifdef INVARIANTS
5056 			panic("Huh LIST_FOREACH corrupt");
5057 			goto stage_right;
5058 #else
5059 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5060 			goto stage_right;
5061 #endif
5062 		}
5063 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5064 			continue;
5065 #ifdef INET
5066 		if (addr->sa_family == AF_INET) {
5067 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5068 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5069 				/* found him. */
5070 				if (holds_lock == 0)
5071 					SCTP_IPI_ADDR_RUNLOCK();
5072 				return (sctp_ifap);
5073 				break;
5074 			}
5075 		}
5076 #endif
5077 #ifdef INET6
5078 		if (addr->sa_family == AF_INET6) {
5079 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5080 			    &sctp_ifap->address.sin6)) {
5081 				/* found him. */
5082 				if (holds_lock == 0)
5083 					SCTP_IPI_ADDR_RUNLOCK();
5084 				return (sctp_ifap);
5085 				break;
5086 			}
5087 		}
5088 #endif
5089 	}
5090 	if (holds_lock == 0)
5091 		SCTP_IPI_ADDR_RUNLOCK();
5092 	return (NULL);
5093 }
5094 
5095 static void
5096 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5097     uint32_t rwnd_req)
5098 {
5099 	/* User pulled some data, do we need a rwnd update? */
5100 	int r_unlocked = 0;
5101 	uint32_t dif, rwnd;
5102 	struct socket *so = NULL;
5103 
5104 	if (stcb == NULL)
5105 		return;
5106 
5107 	atomic_add_int(&stcb->asoc.refcnt, 1);
5108 
5109 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5110 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5111 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5112 		/* Pre-check If we are freeing no update */
5113 		goto no_lock;
5114 	}
5115 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5116 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5117 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5118 		goto out;
5119 	}
5120 	so = stcb->sctp_socket;
5121 	if (so == NULL) {
5122 		goto out;
5123 	}
5124 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5125 	/* Have you have freed enough to look */
5126 	*freed_so_far = 0;
5127 	/* Yep, its worth a look and the lock overhead */
5128 
5129 	/* Figure out what the rwnd would be */
5130 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5131 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5132 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5133 	} else {
5134 		dif = 0;
5135 	}
5136 	if (dif >= rwnd_req) {
5137 		if (hold_rlock) {
5138 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5139 			r_unlocked = 1;
5140 		}
5141 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5142 			/*
5143 			 * One last check before we allow the guy possibly
5144 			 * to get in. There is a race, where the guy has not
5145 			 * reached the gate. In that case
5146 			 */
5147 			goto out;
5148 		}
5149 		SCTP_TCB_LOCK(stcb);
5150 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5151 			/* No reports here */
5152 			SCTP_TCB_UNLOCK(stcb);
5153 			goto out;
5154 		}
5155 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5156 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5157 
5158 		sctp_chunk_output(stcb->sctp_ep, stcb,
5159 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5160 		/* make sure no timer is running */
5161 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5162 		SCTP_TCB_UNLOCK(stcb);
5163 	} else {
5164 		/* Update how much we have pending */
5165 		stcb->freed_by_sorcv_sincelast = dif;
5166 	}
5167 out:
5168 	if (so && r_unlocked && hold_rlock) {
5169 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5170 	}
5171 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5172 no_lock:
5173 	atomic_add_int(&stcb->asoc.refcnt, -1);
5174 	return;
5175 }
5176 
5177 int
5178 sctp_sorecvmsg(struct socket *so,
5179     struct uio *uio,
5180     struct mbuf **mp,
5181     struct sockaddr *from,
5182     int fromlen,
5183     int *msg_flags,
5184     struct sctp_sndrcvinfo *sinfo,
5185     int filling_sinfo)
5186 {
5187 	/*
5188 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5189 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5190 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5191 	 * On the way out we may send out any combination of:
5192 	 * MSG_NOTIFICATION MSG_EOR
5193 	 *
5194 	 */
5195 	struct sctp_inpcb *inp = NULL;
5196 	int my_len = 0;
5197 	int cp_len = 0, error = 0;
5198 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5199 	struct mbuf *m = NULL;
5200 	struct sctp_tcb *stcb = NULL;
5201 	int wakeup_read_socket = 0;
5202 	int freecnt_applied = 0;
5203 	int out_flags = 0, in_flags = 0;
5204 	int block_allowed = 1;
5205 	uint32_t freed_so_far = 0;
5206 	uint32_t copied_so_far = 0;
5207 	int in_eeor_mode = 0;
5208 	int no_rcv_needed = 0;
5209 	uint32_t rwnd_req = 0;
5210 	int hold_sblock = 0;
5211 	int hold_rlock = 0;
5212 	int slen = 0;
5213 	uint32_t held_length = 0;
5214 	int sockbuf_lock = 0;
5215 
5216 	if (uio == NULL) {
5217 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5218 		return (EINVAL);
5219 	}
5220 	if (msg_flags) {
5221 		in_flags = *msg_flags;
5222 		if (in_flags & MSG_PEEK)
5223 			SCTP_STAT_INCR(sctps_read_peeks);
5224 	} else {
5225 		in_flags = 0;
5226 	}
5227 	slen = uio->uio_resid;
5228 
5229 	/* Pull in and set up our int flags */
5230 	if (in_flags & MSG_OOB) {
5231 		/* Out of band's NOT supported */
5232 		return (EOPNOTSUPP);
5233 	}
5234 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5235 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5236 		return (EINVAL);
5237 	}
5238 	if ((in_flags & (MSG_DONTWAIT
5239 	    | MSG_NBIO
5240 	    )) ||
5241 	    SCTP_SO_IS_NBIO(so)) {
5242 		block_allowed = 0;
5243 	}
5244 	/* setup the endpoint */
5245 	inp = (struct sctp_inpcb *)so->so_pcb;
5246 	if (inp == NULL) {
5247 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5248 		return (EFAULT);
5249 	}
5250 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5251 	/* Must be at least a MTU's worth */
5252 	if (rwnd_req < SCTP_MIN_RWND)
5253 		rwnd_req = SCTP_MIN_RWND;
5254 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5255 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5256 		sctp_misc_ints(SCTP_SORECV_ENTER,
5257 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5258 	}
5259 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5260 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5261 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5262 	}
5263 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5264 	if (error) {
5265 		goto release_unlocked;
5266 	}
5267 	sockbuf_lock = 1;
5268 restart:
5269 
5270 
5271 restart_nosblocks:
5272 	if (hold_sblock == 0) {
5273 		SOCKBUF_LOCK(&so->so_rcv);
5274 		hold_sblock = 1;
5275 	}
5276 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5277 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5278 		goto out;
5279 	}
5280 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5281 		if (so->so_error) {
5282 			error = so->so_error;
5283 			if ((in_flags & MSG_PEEK) == 0)
5284 				so->so_error = 0;
5285 			goto out;
5286 		} else {
5287 			if (so->so_rcv.sb_cc == 0) {
5288 				/* indicate EOF */
5289 				error = 0;
5290 				goto out;
5291 			}
5292 		}
5293 	}
5294 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5295 		/* we need to wait for data */
5296 		if ((so->so_rcv.sb_cc == 0) &&
5297 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5298 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5299 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5300 				/*
5301 				 * For active open side clear flags for
5302 				 * re-use passive open is blocked by
5303 				 * connect.
5304 				 */
5305 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5306 					/*
5307 					 * You were aborted, passive side
5308 					 * always hits here
5309 					 */
5310 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5311 					error = ECONNRESET;
5312 				}
5313 				so->so_state &= ~(SS_ISCONNECTING |
5314 				    SS_ISDISCONNECTING |
5315 				    SS_ISCONFIRMING |
5316 				    SS_ISCONNECTED);
5317 				if (error == 0) {
5318 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5319 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5320 						error = ENOTCONN;
5321 					}
5322 				}
5323 				goto out;
5324 			}
5325 		}
5326 		error = sbwait(&so->so_rcv);
5327 		if (error) {
5328 			goto out;
5329 		}
5330 		held_length = 0;
5331 		goto restart_nosblocks;
5332 	} else if (so->so_rcv.sb_cc == 0) {
5333 		if (so->so_error) {
5334 			error = so->so_error;
5335 			if ((in_flags & MSG_PEEK) == 0)
5336 				so->so_error = 0;
5337 		} else {
5338 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5339 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5340 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5341 					/*
5342 					 * For active open side clear flags
5343 					 * for re-use passive open is
5344 					 * blocked by connect.
5345 					 */
5346 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5347 						/*
5348 						 * You were aborted, passive
5349 						 * side always hits here
5350 						 */
5351 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5352 						error = ECONNRESET;
5353 					}
5354 					so->so_state &= ~(SS_ISCONNECTING |
5355 					    SS_ISDISCONNECTING |
5356 					    SS_ISCONFIRMING |
5357 					    SS_ISCONNECTED);
5358 					if (error == 0) {
5359 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5360 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5361 							error = ENOTCONN;
5362 						}
5363 					}
5364 					goto out;
5365 				}
5366 			}
5367 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5368 			error = EWOULDBLOCK;
5369 		}
5370 		goto out;
5371 	}
5372 	if (hold_sblock == 1) {
5373 		SOCKBUF_UNLOCK(&so->so_rcv);
5374 		hold_sblock = 0;
5375 	}
5376 	/* we possibly have data we can read */
5377 	/* sa_ignore FREED_MEMORY */
5378 	control = TAILQ_FIRST(&inp->read_queue);
5379 	if (control == NULL) {
5380 		/*
5381 		 * This could be happening since the appender did the
5382 		 * increment but as not yet did the tailq insert onto the
5383 		 * read_queue
5384 		 */
5385 		if (hold_rlock == 0) {
5386 			SCTP_INP_READ_LOCK(inp);
5387 		}
5388 		control = TAILQ_FIRST(&inp->read_queue);
5389 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5390 #ifdef INVARIANTS
5391 			panic("Huh, its non zero and nothing on control?");
5392 #endif
5393 			so->so_rcv.sb_cc = 0;
5394 		}
5395 		SCTP_INP_READ_UNLOCK(inp);
5396 		hold_rlock = 0;
5397 		goto restart;
5398 	}
5399 	if ((control->length == 0) &&
5400 	    (control->do_not_ref_stcb)) {
5401 		/*
5402 		 * Clean up code for freeing assoc that left behind a
5403 		 * pdapi.. maybe a peer in EEOR that just closed after
5404 		 * sending and never indicated a EOR.
5405 		 */
5406 		if (hold_rlock == 0) {
5407 			hold_rlock = 1;
5408 			SCTP_INP_READ_LOCK(inp);
5409 		}
5410 		control->held_length = 0;
5411 		if (control->data) {
5412 			/* Hmm there is data here .. fix */
5413 			struct mbuf *m_tmp;
5414 			int cnt = 0;
5415 
5416 			m_tmp = control->data;
5417 			while (m_tmp) {
5418 				cnt += SCTP_BUF_LEN(m_tmp);
5419 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5420 					control->tail_mbuf = m_tmp;
5421 					control->end_added = 1;
5422 				}
5423 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5424 			}
5425 			control->length = cnt;
5426 		} else {
5427 			/* remove it */
5428 			TAILQ_REMOVE(&inp->read_queue, control, next);
5429 			/* Add back any hiddend data */
5430 			sctp_free_remote_addr(control->whoFrom);
5431 			sctp_free_a_readq(stcb, control);
5432 		}
5433 		if (hold_rlock) {
5434 			hold_rlock = 0;
5435 			SCTP_INP_READ_UNLOCK(inp);
5436 		}
5437 		goto restart;
5438 	}
5439 	if ((control->length == 0) &&
5440 	    (control->end_added == 1)) {
5441 		/*
5442 		 * Do we also need to check for (control->pdapi_aborted ==
5443 		 * 1)?
5444 		 */
5445 		if (hold_rlock == 0) {
5446 			hold_rlock = 1;
5447 			SCTP_INP_READ_LOCK(inp);
5448 		}
5449 		TAILQ_REMOVE(&inp->read_queue, control, next);
5450 		if (control->data) {
5451 #ifdef INVARIANTS
5452 			panic("control->data not null but control->length == 0");
5453 #else
5454 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5455 			sctp_m_freem(control->data);
5456 			control->data = NULL;
5457 #endif
5458 		}
5459 		if (control->aux_data) {
5460 			sctp_m_free(control->aux_data);
5461 			control->aux_data = NULL;
5462 		}
5463 		sctp_free_remote_addr(control->whoFrom);
5464 		sctp_free_a_readq(stcb, control);
5465 		if (hold_rlock) {
5466 			hold_rlock = 0;
5467 			SCTP_INP_READ_UNLOCK(inp);
5468 		}
5469 		goto restart;
5470 	}
5471 	if (control->length == 0) {
5472 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5473 		    (filling_sinfo)) {
5474 			/* find a more suitable one then this */
5475 			ctl = TAILQ_NEXT(control, next);
5476 			while (ctl) {
5477 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5478 				    (ctl->some_taken ||
5479 				    (ctl->spec_flags & M_NOTIFICATION) ||
5480 				    ((ctl->do_not_ref_stcb == 0) &&
5481 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5482 				    ) {
5483 					/*-
5484 					 * If we have a different TCB next, and there is data
5485 					 * present. If we have already taken some (pdapi), OR we can
5486 					 * ref the tcb and no delivery as started on this stream, we
5487 					 * take it. Note we allow a notification on a different
5488 					 * assoc to be delivered..
5489 					 */
5490 					control = ctl;
5491 					goto found_one;
5492 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5493 					    (ctl->length) &&
5494 					    ((ctl->some_taken) ||
5495 					    ((ctl->do_not_ref_stcb == 0) &&
5496 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5497 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5498 					/*-
5499 					 * If we have the same tcb, and there is data present, and we
5500 					 * have the strm interleave feature present. Then if we have
5501 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5502 					 * not started a delivery for this stream, we can take it.
5503 					 * Note we do NOT allow a notificaiton on the same assoc to
5504 					 * be delivered.
5505 					 */
5506 					control = ctl;
5507 					goto found_one;
5508 				}
5509 				ctl = TAILQ_NEXT(ctl, next);
5510 			}
5511 		}
5512 		/*
5513 		 * if we reach here, not suitable replacement is available
5514 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5515 		 * into the our held count, and its time to sleep again.
5516 		 */
5517 		held_length = so->so_rcv.sb_cc;
5518 		control->held_length = so->so_rcv.sb_cc;
5519 		goto restart;
5520 	}
5521 	/* Clear the held length since there is something to read */
5522 	control->held_length = 0;
5523 	if (hold_rlock) {
5524 		SCTP_INP_READ_UNLOCK(inp);
5525 		hold_rlock = 0;
5526 	}
5527 found_one:
5528 	/*
5529 	 * If we reach here, control has a some data for us to read off.
5530 	 * Note that stcb COULD be NULL.
5531 	 */
5532 	control->some_taken++;
5533 	if (hold_sblock) {
5534 		SOCKBUF_UNLOCK(&so->so_rcv);
5535 		hold_sblock = 0;
5536 	}
5537 	stcb = control->stcb;
5538 	if (stcb) {
5539 		if ((control->do_not_ref_stcb == 0) &&
5540 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5541 			if (freecnt_applied == 0)
5542 				stcb = NULL;
5543 		} else if (control->do_not_ref_stcb == 0) {
5544 			/* you can't free it on me please */
5545 			/*
5546 			 * The lock on the socket buffer protects us so the
5547 			 * free code will stop. But since we used the
5548 			 * socketbuf lock and the sender uses the tcb_lock
5549 			 * to increment, we need to use the atomic add to
5550 			 * the refcnt
5551 			 */
5552 			if (freecnt_applied) {
5553 #ifdef INVARIANTS
5554 				panic("refcnt already incremented");
5555 #else
5556 				SCTP_PRINTF("refcnt already incremented?\n");
5557 #endif
5558 			} else {
5559 				atomic_add_int(&stcb->asoc.refcnt, 1);
5560 				freecnt_applied = 1;
5561 			}
5562 			/*
5563 			 * Setup to remember how much we have not yet told
5564 			 * the peer our rwnd has opened up. Note we grab the
5565 			 * value from the tcb from last time. Note too that
5566 			 * sack sending clears this when a sack is sent,
5567 			 * which is fine. Once we hit the rwnd_req, we then
5568 			 * will go to the sctp_user_rcvd() that will not
5569 			 * lock until it KNOWs it MUST send a WUP-SACK.
5570 			 */
5571 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5572 			stcb->freed_by_sorcv_sincelast = 0;
5573 		}
5574 	}
5575 	if (stcb &&
5576 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5577 	    control->do_not_ref_stcb == 0) {
5578 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5579 	}
5580 	/* First lets get off the sinfo and sockaddr info */
5581 	if ((sinfo) && filling_sinfo) {
5582 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5583 		nxt = TAILQ_NEXT(control, next);
5584 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5585 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5586 			struct sctp_extrcvinfo *s_extra;
5587 
5588 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5589 			if ((nxt) &&
5590 			    (nxt->length)) {
5591 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5592 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5593 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5594 				}
5595 				if (nxt->spec_flags & M_NOTIFICATION) {
5596 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5597 				}
5598 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5599 				s_extra->sreinfo_next_length = nxt->length;
5600 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5601 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5602 				if (nxt->tail_mbuf != NULL) {
5603 					if (nxt->end_added) {
5604 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5605 					}
5606 				}
5607 			} else {
5608 				/*
5609 				 * we explicitly 0 this, since the memcpy
5610 				 * got some other things beyond the older
5611 				 * sinfo_ that is on the control's structure
5612 				 * :-D
5613 				 */
5614 				nxt = NULL;
5615 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5616 				s_extra->sreinfo_next_aid = 0;
5617 				s_extra->sreinfo_next_length = 0;
5618 				s_extra->sreinfo_next_ppid = 0;
5619 				s_extra->sreinfo_next_stream = 0;
5620 			}
5621 		}
5622 		/*
5623 		 * update off the real current cum-ack, if we have an stcb.
5624 		 */
5625 		if ((control->do_not_ref_stcb == 0) && stcb)
5626 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5627 		/*
5628 		 * mask off the high bits, we keep the actual chunk bits in
5629 		 * there.
5630 		 */
5631 		sinfo->sinfo_flags &= 0x00ff;
5632 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5633 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5634 		}
5635 	}
5636 #ifdef SCTP_ASOCLOG_OF_TSNS
5637 	{
5638 		int index, newindex;
5639 		struct sctp_pcbtsn_rlog *entry;
5640 
5641 		do {
5642 			index = inp->readlog_index;
5643 			newindex = index + 1;
5644 			if (newindex >= SCTP_READ_LOG_SIZE) {
5645 				newindex = 0;
5646 			}
5647 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5648 		entry = &inp->readlog[index];
5649 		entry->vtag = control->sinfo_assoc_id;
5650 		entry->strm = control->sinfo_stream;
5651 		entry->seq = control->sinfo_ssn;
5652 		entry->sz = control->length;
5653 		entry->flgs = control->sinfo_flags;
5654 	}
5655 #endif
5656 	if (fromlen && from) {
5657 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5658 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5659 #ifdef INET6
5660 		case AF_INET6:
5661 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5662 			break;
5663 #endif
5664 #ifdef INET
5665 		case AF_INET:
5666 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5667 			break;
5668 #endif
5669 		default:
5670 			break;
5671 		}
5672 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5673 
5674 #if defined(INET) && defined(INET6)
5675 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5676 		    (from->sa_family == AF_INET) &&
5677 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5678 			struct sockaddr_in *sin;
5679 			struct sockaddr_in6 sin6;
5680 
5681 			sin = (struct sockaddr_in *)from;
5682 			bzero(&sin6, sizeof(sin6));
5683 			sin6.sin6_family = AF_INET6;
5684 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5685 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5686 			bcopy(&sin->sin_addr,
5687 			    &sin6.sin6_addr.s6_addr32[3],
5688 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5689 			sin6.sin6_port = sin->sin_port;
5690 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5691 		}
5692 #endif
5693 #ifdef INET6
5694 		{
5695 			struct sockaddr_in6 lsa6, *from6;
5696 
5697 			from6 = (struct sockaddr_in6 *)from;
5698 			sctp_recover_scope_mac(from6, (&lsa6));
5699 		}
5700 #endif
5701 	}
5702 	/* now copy out what data we can */
5703 	if (mp == NULL) {
5704 		/* copy out each mbuf in the chain up to length */
5705 get_more_data:
5706 		m = control->data;
5707 		while (m) {
5708 			/* Move out all we can */
5709 			cp_len = (int)uio->uio_resid;
5710 			my_len = (int)SCTP_BUF_LEN(m);
5711 			if (cp_len > my_len) {
5712 				/* not enough in this buf */
5713 				cp_len = my_len;
5714 			}
5715 			if (hold_rlock) {
5716 				SCTP_INP_READ_UNLOCK(inp);
5717 				hold_rlock = 0;
5718 			}
5719 			if (cp_len > 0)
5720 				error = uiomove(mtod(m, char *), cp_len, uio);
5721 			/* re-read */
5722 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5723 				goto release;
5724 			}
5725 			if ((control->do_not_ref_stcb == 0) && stcb &&
5726 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5727 				no_rcv_needed = 1;
5728 			}
5729 			if (error) {
5730 				/* error we are out of here */
5731 				goto release;
5732 			}
5733 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5734 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5735 			    ((control->end_added == 0) ||
5736 			    (control->end_added &&
5737 			    (TAILQ_NEXT(control, next) == NULL)))
5738 			    ) {
5739 				SCTP_INP_READ_LOCK(inp);
5740 				hold_rlock = 1;
5741 			}
5742 			if (cp_len == SCTP_BUF_LEN(m)) {
5743 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5744 				    (control->end_added)) {
5745 					out_flags |= MSG_EOR;
5746 					if ((control->do_not_ref_stcb == 0) &&
5747 					    (control->stcb != NULL) &&
5748 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5749 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5750 				}
5751 				if (control->spec_flags & M_NOTIFICATION) {
5752 					out_flags |= MSG_NOTIFICATION;
5753 				}
5754 				/* we ate up the mbuf */
5755 				if (in_flags & MSG_PEEK) {
5756 					/* just looking */
5757 					m = SCTP_BUF_NEXT(m);
5758 					copied_so_far += cp_len;
5759 				} else {
5760 					/* dispose of the mbuf */
5761 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5762 						sctp_sblog(&so->so_rcv,
5763 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5764 					}
5765 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5766 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5767 						sctp_sblog(&so->so_rcv,
5768 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5769 					}
5770 					copied_so_far += cp_len;
5771 					freed_so_far += cp_len;
5772 					freed_so_far += MSIZE;
5773 					atomic_subtract_int(&control->length, cp_len);
5774 					control->data = sctp_m_free(m);
5775 					m = control->data;
5776 					/*
5777 					 * been through it all, must hold sb
5778 					 * lock ok to null tail
5779 					 */
5780 					if (control->data == NULL) {
5781 #ifdef INVARIANTS
5782 						if ((control->end_added == 0) ||
5783 						    (TAILQ_NEXT(control, next) == NULL)) {
5784 							/*
5785 							 * If the end is not
5786 							 * added, OR the
5787 							 * next is NOT null
5788 							 * we MUST have the
5789 							 * lock.
5790 							 */
5791 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5792 								panic("Hmm we don't own the lock?");
5793 							}
5794 						}
5795 #endif
5796 						control->tail_mbuf = NULL;
5797 #ifdef INVARIANTS
5798 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5799 							panic("end_added, nothing left and no MSG_EOR");
5800 						}
5801 #endif
5802 					}
5803 				}
5804 			} else {
5805 				/* Do we need to trim the mbuf? */
5806 				if (control->spec_flags & M_NOTIFICATION) {
5807 					out_flags |= MSG_NOTIFICATION;
5808 				}
5809 				if ((in_flags & MSG_PEEK) == 0) {
5810 					SCTP_BUF_RESV_UF(m, cp_len);
5811 					SCTP_BUF_LEN(m) -= cp_len;
5812 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5813 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5814 					}
5815 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5816 					if ((control->do_not_ref_stcb == 0) &&
5817 					    stcb) {
5818 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5819 					}
5820 					copied_so_far += cp_len;
5821 					freed_so_far += cp_len;
5822 					freed_so_far += MSIZE;
5823 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5824 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5825 						    SCTP_LOG_SBRESULT, 0);
5826 					}
5827 					atomic_subtract_int(&control->length, cp_len);
5828 				} else {
5829 					copied_so_far += cp_len;
5830 				}
5831 			}
5832 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5833 				break;
5834 			}
5835 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5836 			    (control->do_not_ref_stcb == 0) &&
5837 			    (freed_so_far >= rwnd_req)) {
5838 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5839 			}
5840 		}		/* end while(m) */
5841 		/*
5842 		 * At this point we have looked at it all and we either have
5843 		 * a MSG_EOR/or read all the user wants... <OR>
5844 		 * control->length == 0.
5845 		 */
5846 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5847 			/* we are done with this control */
5848 			if (control->length == 0) {
5849 				if (control->data) {
5850 #ifdef INVARIANTS
5851 					panic("control->data not null at read eor?");
5852 #else
5853 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5854 					sctp_m_freem(control->data);
5855 					control->data = NULL;
5856 #endif
5857 				}
5858 		done_with_control:
5859 				if (TAILQ_NEXT(control, next) == NULL) {
5860 					/*
5861 					 * If we don't have a next we need a
5862 					 * lock, if there is a next
5863 					 * interrupt is filling ahead of us
5864 					 * and we don't need a lock to
5865 					 * remove this guy (which is the
5866 					 * head of the queue).
5867 					 */
5868 					if (hold_rlock == 0) {
5869 						SCTP_INP_READ_LOCK(inp);
5870 						hold_rlock = 1;
5871 					}
5872 				}
5873 				TAILQ_REMOVE(&inp->read_queue, control, next);
5874 				/* Add back any hiddend data */
5875 				if (control->held_length) {
5876 					held_length = 0;
5877 					control->held_length = 0;
5878 					wakeup_read_socket = 1;
5879 				}
5880 				if (control->aux_data) {
5881 					sctp_m_free(control->aux_data);
5882 					control->aux_data = NULL;
5883 				}
5884 				no_rcv_needed = control->do_not_ref_stcb;
5885 				sctp_free_remote_addr(control->whoFrom);
5886 				control->data = NULL;
5887 				sctp_free_a_readq(stcb, control);
5888 				control = NULL;
5889 				if ((freed_so_far >= rwnd_req) &&
5890 				    (no_rcv_needed == 0))
5891 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5892 
5893 			} else {
5894 				/*
5895 				 * The user did not read all of this
5896 				 * message, turn off the returned MSG_EOR
5897 				 * since we are leaving more behind on the
5898 				 * control to read.
5899 				 */
5900 #ifdef INVARIANTS
5901 				if (control->end_added &&
5902 				    (control->data == NULL) &&
5903 				    (control->tail_mbuf == NULL)) {
5904 					panic("Gak, control->length is corrupt?");
5905 				}
5906 #endif
5907 				no_rcv_needed = control->do_not_ref_stcb;
5908 				out_flags &= ~MSG_EOR;
5909 			}
5910 		}
5911 		if (out_flags & MSG_EOR) {
5912 			goto release;
5913 		}
5914 		if ((uio->uio_resid == 0) ||
5915 		    ((in_eeor_mode) &&
5916 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5917 			goto release;
5918 		}
5919 		/*
5920 		 * If I hit here the receiver wants more and this message is
5921 		 * NOT done (pd-api). So two questions. Can we block? if not
5922 		 * we are done. Did the user NOT set MSG_WAITALL?
5923 		 */
5924 		if (block_allowed == 0) {
5925 			goto release;
5926 		}
5927 		/*
5928 		 * We need to wait for more data a few things: - We don't
5929 		 * sbunlock() so we don't get someone else reading. - We
5930 		 * must be sure to account for the case where what is added
5931 		 * is NOT to our control when we wakeup.
5932 		 */
5933 
5934 		/*
5935 		 * Do we need to tell the transport a rwnd update might be
5936 		 * needed before we go to sleep?
5937 		 */
5938 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5939 		    ((freed_so_far >= rwnd_req) &&
5940 		    (control->do_not_ref_stcb == 0) &&
5941 		    (no_rcv_needed == 0))) {
5942 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5943 		}
5944 wait_some_more:
5945 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5946 			goto release;
5947 		}
5948 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5949 			goto release;
5950 
5951 		if (hold_rlock == 1) {
5952 			SCTP_INP_READ_UNLOCK(inp);
5953 			hold_rlock = 0;
5954 		}
5955 		if (hold_sblock == 0) {
5956 			SOCKBUF_LOCK(&so->so_rcv);
5957 			hold_sblock = 1;
5958 		}
5959 		if ((copied_so_far) && (control->length == 0) &&
5960 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5961 			goto release;
5962 		}
5963 		if (so->so_rcv.sb_cc <= control->held_length) {
5964 			error = sbwait(&so->so_rcv);
5965 			if (error) {
5966 				goto release;
5967 			}
5968 			control->held_length = 0;
5969 		}
5970 		if (hold_sblock) {
5971 			SOCKBUF_UNLOCK(&so->so_rcv);
5972 			hold_sblock = 0;
5973 		}
5974 		if (control->length == 0) {
5975 			/* still nothing here */
5976 			if (control->end_added == 1) {
5977 				/* he aborted, or is done i.e.did a shutdown */
5978 				out_flags |= MSG_EOR;
5979 				if (control->pdapi_aborted) {
5980 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5981 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5982 
5983 					out_flags |= MSG_TRUNC;
5984 				} else {
5985 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5986 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5987 				}
5988 				goto done_with_control;
5989 			}
5990 			if (so->so_rcv.sb_cc > held_length) {
5991 				control->held_length = so->so_rcv.sb_cc;
5992 				held_length = 0;
5993 			}
5994 			goto wait_some_more;
5995 		} else if (control->data == NULL) {
5996 			/*
5997 			 * we must re-sync since data is probably being
5998 			 * added
5999 			 */
6000 			SCTP_INP_READ_LOCK(inp);
6001 			if ((control->length > 0) && (control->data == NULL)) {
6002 				/*
6003 				 * big trouble.. we have the lock and its
6004 				 * corrupt?
6005 				 */
6006 #ifdef INVARIANTS
6007 				panic("Impossible data==NULL length !=0");
6008 #endif
6009 				out_flags |= MSG_EOR;
6010 				out_flags |= MSG_TRUNC;
6011 				control->length = 0;
6012 				SCTP_INP_READ_UNLOCK(inp);
6013 				goto done_with_control;
6014 			}
6015 			SCTP_INP_READ_UNLOCK(inp);
6016 			/* We will fall around to get more data */
6017 		}
6018 		goto get_more_data;
6019 	} else {
6020 		/*-
6021 		 * Give caller back the mbuf chain,
6022 		 * store in uio_resid the length
6023 		 */
6024 		wakeup_read_socket = 0;
6025 		if ((control->end_added == 0) ||
6026 		    (TAILQ_NEXT(control, next) == NULL)) {
6027 			/* Need to get rlock */
6028 			if (hold_rlock == 0) {
6029 				SCTP_INP_READ_LOCK(inp);
6030 				hold_rlock = 1;
6031 			}
6032 		}
6033 		if (control->end_added) {
6034 			out_flags |= MSG_EOR;
6035 			if ((control->do_not_ref_stcb == 0) &&
6036 			    (control->stcb != NULL) &&
6037 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6038 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6039 		}
6040 		if (control->spec_flags & M_NOTIFICATION) {
6041 			out_flags |= MSG_NOTIFICATION;
6042 		}
6043 		uio->uio_resid = control->length;
6044 		*mp = control->data;
6045 		m = control->data;
6046 		while (m) {
6047 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6048 				sctp_sblog(&so->so_rcv,
6049 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6050 			}
6051 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6052 			freed_so_far += SCTP_BUF_LEN(m);
6053 			freed_so_far += MSIZE;
6054 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6055 				sctp_sblog(&so->so_rcv,
6056 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6057 			}
6058 			m = SCTP_BUF_NEXT(m);
6059 		}
6060 		control->data = control->tail_mbuf = NULL;
6061 		control->length = 0;
6062 		if (out_flags & MSG_EOR) {
6063 			/* Done with this control */
6064 			goto done_with_control;
6065 		}
6066 	}
6067 release:
6068 	if (hold_rlock == 1) {
6069 		SCTP_INP_READ_UNLOCK(inp);
6070 		hold_rlock = 0;
6071 	}
6072 	if (hold_sblock == 1) {
6073 		SOCKBUF_UNLOCK(&so->so_rcv);
6074 		hold_sblock = 0;
6075 	}
6076 	sbunlock(&so->so_rcv);
6077 	sockbuf_lock = 0;
6078 
6079 release_unlocked:
6080 	if (hold_sblock) {
6081 		SOCKBUF_UNLOCK(&so->so_rcv);
6082 		hold_sblock = 0;
6083 	}
6084 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6085 		if ((freed_so_far >= rwnd_req) &&
6086 		    (control && (control->do_not_ref_stcb == 0)) &&
6087 		    (no_rcv_needed == 0))
6088 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6089 	}
6090 out:
6091 	if (msg_flags) {
6092 		*msg_flags = out_flags;
6093 	}
6094 	if (((out_flags & MSG_EOR) == 0) &&
6095 	    ((in_flags & MSG_PEEK) == 0) &&
6096 	    (sinfo) &&
6097 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6098 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6099 		struct sctp_extrcvinfo *s_extra;
6100 
6101 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6102 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6103 	}
6104 	if (hold_rlock == 1) {
6105 		SCTP_INP_READ_UNLOCK(inp);
6106 	}
6107 	if (hold_sblock) {
6108 		SOCKBUF_UNLOCK(&so->so_rcv);
6109 	}
6110 	if (sockbuf_lock) {
6111 		sbunlock(&so->so_rcv);
6112 	}
6113 	if (freecnt_applied) {
6114 		/*
6115 		 * The lock on the socket buffer protects us so the free
6116 		 * code will stop. But since we used the socketbuf lock and
6117 		 * the sender uses the tcb_lock to increment, we need to use
6118 		 * the atomic add to the refcnt.
6119 		 */
6120 		if (stcb == NULL) {
6121 #ifdef INVARIANTS
6122 			panic("stcb for refcnt has gone NULL?");
6123 			goto stage_left;
6124 #else
6125 			goto stage_left;
6126 #endif
6127 		}
6128 		atomic_add_int(&stcb->asoc.refcnt, -1);
6129 		/* Save the value back for next time */
6130 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6131 	}
6132 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6133 		if (stcb) {
6134 			sctp_misc_ints(SCTP_SORECV_DONE,
6135 			    freed_so_far,
6136 			    ((uio) ? (slen - uio->uio_resid) : slen),
6137 			    stcb->asoc.my_rwnd,
6138 			    so->so_rcv.sb_cc);
6139 		} else {
6140 			sctp_misc_ints(SCTP_SORECV_DONE,
6141 			    freed_so_far,
6142 			    ((uio) ? (slen - uio->uio_resid) : slen),
6143 			    0,
6144 			    so->so_rcv.sb_cc);
6145 		}
6146 	}
6147 stage_left:
6148 	if (wakeup_read_socket) {
6149 		sctp_sorwakeup(inp, so);
6150 	}
6151 	return (error);
6152 }
6153 
6154 
6155 #ifdef SCTP_MBUF_LOGGING
6156 struct mbuf *
6157 sctp_m_free(struct mbuf *m)
6158 {
6159 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6160 		if (SCTP_BUF_IS_EXTENDED(m)) {
6161 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6162 		}
6163 	}
6164 	return (m_free(m));
6165 }
6166 
6167 void
6168 sctp_m_freem(struct mbuf *mb)
6169 {
6170 	while (mb != NULL)
6171 		mb = sctp_m_free(mb);
6172 }
6173 
6174 #endif
6175 
6176 int
6177 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6178 {
6179 	/*
6180 	 * Given a local address. For all associations that holds the
6181 	 * address, request a peer-set-primary.
6182 	 */
6183 	struct sctp_ifa *ifa;
6184 	struct sctp_laddr *wi;
6185 
6186 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6187 	if (ifa == NULL) {
6188 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6189 		return (EADDRNOTAVAIL);
6190 	}
6191 	/*
6192 	 * Now that we have the ifa we must awaken the iterator with this
6193 	 * message.
6194 	 */
6195 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6196 	if (wi == NULL) {
6197 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6198 		return (ENOMEM);
6199 	}
6200 	/* Now incr the count and int wi structure */
6201 	SCTP_INCR_LADDR_COUNT();
6202 	bzero(wi, sizeof(*wi));
6203 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6204 	wi->ifa = ifa;
6205 	wi->action = SCTP_SET_PRIM_ADDR;
6206 	atomic_add_int(&ifa->refcount, 1);
6207 
6208 	/* Now add it to the work queue */
6209 	SCTP_WQ_ADDR_LOCK();
6210 	/*
6211 	 * Should this really be a tailq? As it is we will process the
6212 	 * newest first :-0
6213 	 */
6214 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6215 	SCTP_WQ_ADDR_UNLOCK();
6216 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6217 	    (struct sctp_inpcb *)NULL,
6218 	    (struct sctp_tcb *)NULL,
6219 	    (struct sctp_nets *)NULL);
6220 	return (0);
6221 }
6222 
6223 
6224 int
6225 sctp_soreceive(struct socket *so,
6226     struct sockaddr **psa,
6227     struct uio *uio,
6228     struct mbuf **mp0,
6229     struct mbuf **controlp,
6230     int *flagsp)
6231 {
6232 	int error, fromlen;
6233 	uint8_t sockbuf[256];
6234 	struct sockaddr *from;
6235 	struct sctp_extrcvinfo sinfo;
6236 	int filling_sinfo = 1;
6237 	struct sctp_inpcb *inp;
6238 
6239 	inp = (struct sctp_inpcb *)so->so_pcb;
6240 	/* pickup the assoc we are reading from */
6241 	if (inp == NULL) {
6242 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6243 		return (EINVAL);
6244 	}
6245 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6246 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6247 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6248 	    (controlp == NULL)) {
6249 		/* user does not want the sndrcv ctl */
6250 		filling_sinfo = 0;
6251 	}
6252 	if (psa) {
6253 		from = (struct sockaddr *)sockbuf;
6254 		fromlen = sizeof(sockbuf);
6255 		from->sa_len = 0;
6256 	} else {
6257 		from = NULL;
6258 		fromlen = 0;
6259 	}
6260 
6261 	if (filling_sinfo) {
6262 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6263 	}
6264 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6265 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6266 	if (controlp != NULL) {
6267 		/* copy back the sinfo in a CMSG format */
6268 		if (filling_sinfo)
6269 			*controlp = sctp_build_ctl_nchunk(inp,
6270 			    (struct sctp_sndrcvinfo *)&sinfo);
6271 		else
6272 			*controlp = NULL;
6273 	}
6274 	if (psa) {
6275 		/* copy back the address info */
6276 		if (from && from->sa_len) {
6277 			*psa = sodupsockaddr(from, M_NOWAIT);
6278 		} else {
6279 			*psa = NULL;
6280 		}
6281 	}
6282 	return (error);
6283 }
6284 
6285 
6286 
6287 
6288 
6289 int
6290 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6291     int totaddr, int *error)
6292 {
6293 	int added = 0;
6294 	int i;
6295 	struct sctp_inpcb *inp;
6296 	struct sockaddr *sa;
6297 	size_t incr = 0;
6298 
6299 #ifdef INET
6300 	struct sockaddr_in *sin;
6301 
6302 #endif
6303 #ifdef INET6
6304 	struct sockaddr_in6 *sin6;
6305 
6306 #endif
6307 
6308 	sa = addr;
6309 	inp = stcb->sctp_ep;
6310 	*error = 0;
6311 	for (i = 0; i < totaddr; i++) {
6312 		switch (sa->sa_family) {
6313 #ifdef INET
6314 		case AF_INET:
6315 			incr = sizeof(struct sockaddr_in);
6316 			sin = (struct sockaddr_in *)sa;
6317 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6318 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6319 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6320 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6321 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6322 				*error = EINVAL;
6323 				goto out_now;
6324 			}
6325 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6326 				/* assoc gone no un-lock */
6327 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6328 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6329 				*error = ENOBUFS;
6330 				goto out_now;
6331 			}
6332 			added++;
6333 			break;
6334 #endif
6335 #ifdef INET6
6336 		case AF_INET6:
6337 			incr = sizeof(struct sockaddr_in6);
6338 			sin6 = (struct sockaddr_in6 *)sa;
6339 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6340 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6341 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6342 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6343 				*error = EINVAL;
6344 				goto out_now;
6345 			}
6346 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6347 				/* assoc gone no un-lock */
6348 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6349 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6350 				*error = ENOBUFS;
6351 				goto out_now;
6352 			}
6353 			added++;
6354 			break;
6355 #endif
6356 		default:
6357 			break;
6358 		}
6359 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6360 	}
6361 out_now:
6362 	return (added);
6363 }
6364 
6365 struct sctp_tcb *
6366 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6367     int *totaddr, int *num_v4, int *num_v6, int *error,
6368     int limit, int *bad_addr)
6369 {
6370 	struct sockaddr *sa;
6371 	struct sctp_tcb *stcb = NULL;
6372 	size_t incr, at, i;
6373 
6374 	at = incr = 0;
6375 	sa = addr;
6376 
6377 	*error = *num_v6 = *num_v4 = 0;
6378 	/* account and validate addresses */
6379 	for (i = 0; i < (size_t)*totaddr; i++) {
6380 		switch (sa->sa_family) {
6381 #ifdef INET
6382 		case AF_INET:
6383 			(*num_v4) += 1;
6384 			incr = sizeof(struct sockaddr_in);
6385 			if (sa->sa_len != incr) {
6386 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6387 				*error = EINVAL;
6388 				*bad_addr = 1;
6389 				return (NULL);
6390 			}
6391 			break;
6392 #endif
6393 #ifdef INET6
6394 		case AF_INET6:
6395 			{
6396 				struct sockaddr_in6 *sin6;
6397 
6398 				sin6 = (struct sockaddr_in6 *)sa;
6399 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6400 					/* Must be non-mapped for connectx */
6401 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6402 					*error = EINVAL;
6403 					*bad_addr = 1;
6404 					return (NULL);
6405 				}
6406 				(*num_v6) += 1;
6407 				incr = sizeof(struct sockaddr_in6);
6408 				if (sa->sa_len != incr) {
6409 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6410 					*error = EINVAL;
6411 					*bad_addr = 1;
6412 					return (NULL);
6413 				}
6414 				break;
6415 			}
6416 #endif
6417 		default:
6418 			*totaddr = i;
6419 			/* we are done */
6420 			break;
6421 		}
6422 		if (i == (size_t)*totaddr) {
6423 			break;
6424 		}
6425 		SCTP_INP_INCR_REF(inp);
6426 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6427 		if (stcb != NULL) {
6428 			/* Already have or am bring up an association */
6429 			return (stcb);
6430 		} else {
6431 			SCTP_INP_DECR_REF(inp);
6432 		}
6433 		if ((at + incr) > (size_t)limit) {
6434 			*totaddr = i;
6435 			break;
6436 		}
6437 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6438 	}
6439 	return ((struct sctp_tcb *)NULL);
6440 }
6441 
6442 /*
6443  * sctp_bindx(ADD) for one address.
6444  * assumes all arguments are valid/checked by caller.
6445  */
6446 void
6447 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6448     struct sockaddr *sa, sctp_assoc_t assoc_id,
6449     uint32_t vrf_id, int *error, void *p)
6450 {
6451 	struct sockaddr *addr_touse;
6452 
6453 #ifdef INET6
6454 	struct sockaddr_in sin;
6455 
6456 #endif
6457 
6458 	/* see if we're bound all already! */
6459 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6460 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6461 		*error = EINVAL;
6462 		return;
6463 	}
6464 	addr_touse = sa;
6465 #ifdef INET6
6466 	if (sa->sa_family == AF_INET6) {
6467 		struct sockaddr_in6 *sin6;
6468 
6469 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6470 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6471 			*error = EINVAL;
6472 			return;
6473 		}
6474 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6475 			/* can only bind v6 on PF_INET6 sockets */
6476 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6477 			*error = EINVAL;
6478 			return;
6479 		}
6480 		sin6 = (struct sockaddr_in6 *)addr_touse;
6481 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6482 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6483 			    SCTP_IPV6_V6ONLY(inp)) {
6484 				/* can't bind v4-mapped on PF_INET sockets */
6485 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6486 				*error = EINVAL;
6487 				return;
6488 			}
6489 			in6_sin6_2_sin(&sin, sin6);
6490 			addr_touse = (struct sockaddr *)&sin;
6491 		}
6492 	}
6493 #endif
6494 #ifdef INET
6495 	if (sa->sa_family == AF_INET) {
6496 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6497 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6498 			*error = EINVAL;
6499 			return;
6500 		}
6501 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6502 		    SCTP_IPV6_V6ONLY(inp)) {
6503 			/* can't bind v4 on PF_INET sockets */
6504 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6505 			*error = EINVAL;
6506 			return;
6507 		}
6508 	}
6509 #endif
6510 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6511 		if (p == NULL) {
6512 			/* Can't get proc for Net/Open BSD */
6513 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6514 			*error = EINVAL;
6515 			return;
6516 		}
6517 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6518 		return;
6519 	}
6520 	/*
6521 	 * No locks required here since bind and mgmt_ep_sa all do their own
6522 	 * locking. If we do something for the FIX: below we may need to
6523 	 * lock in that case.
6524 	 */
6525 	if (assoc_id == 0) {
6526 		/* add the address */
6527 		struct sctp_inpcb *lep;
6528 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6529 
6530 		/* validate the incoming port */
6531 		if ((lsin->sin_port != 0) &&
6532 		    (lsin->sin_port != inp->sctp_lport)) {
6533 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6534 			*error = EINVAL;
6535 			return;
6536 		} else {
6537 			/* user specified 0 port, set it to existing port */
6538 			lsin->sin_port = inp->sctp_lport;
6539 		}
6540 
6541 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6542 		if (lep != NULL) {
6543 			/*
6544 			 * We must decrement the refcount since we have the
6545 			 * ep already and are binding. No remove going on
6546 			 * here.
6547 			 */
6548 			SCTP_INP_DECR_REF(lep);
6549 		}
6550 		if (lep == inp) {
6551 			/* already bound to it.. ok */
6552 			return;
6553 		} else if (lep == NULL) {
6554 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6555 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6556 			    SCTP_ADD_IP_ADDRESS,
6557 			    vrf_id, NULL);
6558 		} else {
6559 			*error = EADDRINUSE;
6560 		}
6561 		if (*error)
6562 			return;
6563 	} else {
6564 		/*
6565 		 * FIX: decide whether we allow assoc based bindx
6566 		 */
6567 	}
6568 }
6569 
6570 /*
6571  * sctp_bindx(DELETE) for one address.
6572  * assumes all arguments are valid/checked by caller.
6573  */
6574 void
6575 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6576     struct sockaddr *sa, sctp_assoc_t assoc_id,
6577     uint32_t vrf_id, int *error)
6578 {
6579 	struct sockaddr *addr_touse;
6580 
6581 #ifdef INET6
6582 	struct sockaddr_in sin;
6583 
6584 #endif
6585 
6586 	/* see if we're bound all already! */
6587 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6588 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6589 		*error = EINVAL;
6590 		return;
6591 	}
6592 	addr_touse = sa;
6593 #ifdef INET6
6594 	if (sa->sa_family == AF_INET6) {
6595 		struct sockaddr_in6 *sin6;
6596 
6597 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6598 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6599 			*error = EINVAL;
6600 			return;
6601 		}
6602 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6603 			/* can only bind v6 on PF_INET6 sockets */
6604 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6605 			*error = EINVAL;
6606 			return;
6607 		}
6608 		sin6 = (struct sockaddr_in6 *)addr_touse;
6609 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6610 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6611 			    SCTP_IPV6_V6ONLY(inp)) {
6612 				/* can't bind mapped-v4 on PF_INET sockets */
6613 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6614 				*error = EINVAL;
6615 				return;
6616 			}
6617 			in6_sin6_2_sin(&sin, sin6);
6618 			addr_touse = (struct sockaddr *)&sin;
6619 		}
6620 	}
6621 #endif
6622 #ifdef INET
6623 	if (sa->sa_family == AF_INET) {
6624 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6625 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626 			*error = EINVAL;
6627 			return;
6628 		}
6629 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6630 		    SCTP_IPV6_V6ONLY(inp)) {
6631 			/* can't bind v4 on PF_INET sockets */
6632 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6633 			*error = EINVAL;
6634 			return;
6635 		}
6636 	}
6637 #endif
6638 	/*
6639 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6640 	 * below is ever changed we may need to lock before calling
6641 	 * association level binding.
6642 	 */
6643 	if (assoc_id == 0) {
6644 		/* delete the address */
6645 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6646 		    SCTP_DEL_IP_ADDRESS,
6647 		    vrf_id, NULL);
6648 	} else {
6649 		/*
6650 		 * FIX: decide whether we allow assoc based bindx
6651 		 */
6652 	}
6653 }
6654 
6655 /*
6656  * returns the valid local address count for an assoc, taking into account
6657  * all scoping rules
6658  */
6659 int
6660 sctp_local_addr_count(struct sctp_tcb *stcb)
6661 {
6662 	int loopback_scope;
6663 
6664 #if defined(INET)
6665 	int ipv4_local_scope, ipv4_addr_legal;
6666 
6667 #endif
6668 #if defined (INET6)
6669 	int local_scope, site_scope, ipv6_addr_legal;
6670 
6671 #endif
6672 	struct sctp_vrf *vrf;
6673 	struct sctp_ifn *sctp_ifn;
6674 	struct sctp_ifa *sctp_ifa;
6675 	int count = 0;
6676 
6677 	/* Turn on all the appropriate scopes */
6678 	loopback_scope = stcb->asoc.scope.loopback_scope;
6679 #if defined(INET)
6680 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6681 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6682 #endif
6683 #if defined(INET6)
6684 	local_scope = stcb->asoc.scope.local_scope;
6685 	site_scope = stcb->asoc.scope.site_scope;
6686 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6687 #endif
6688 	SCTP_IPI_ADDR_RLOCK();
6689 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6690 	if (vrf == NULL) {
6691 		/* no vrf, no addresses */
6692 		SCTP_IPI_ADDR_RUNLOCK();
6693 		return (0);
6694 	}
6695 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6696 		/*
6697 		 * bound all case: go through all ifns on the vrf
6698 		 */
6699 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6700 			if ((loopback_scope == 0) &&
6701 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6702 				continue;
6703 			}
6704 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6705 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6706 					continue;
6707 				switch (sctp_ifa->address.sa.sa_family) {
6708 #ifdef INET
6709 				case AF_INET:
6710 					if (ipv4_addr_legal) {
6711 						struct sockaddr_in *sin;
6712 
6713 						sin = &sctp_ifa->address.sin;
6714 						if (sin->sin_addr.s_addr == 0) {
6715 							/*
6716 							 * skip unspecified
6717 							 * addrs
6718 							 */
6719 							continue;
6720 						}
6721 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6722 						    &sin->sin_addr) != 0) {
6723 							continue;
6724 						}
6725 						if ((ipv4_local_scope == 0) &&
6726 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6727 							continue;
6728 						}
6729 						/* count this one */
6730 						count++;
6731 					} else {
6732 						continue;
6733 					}
6734 					break;
6735 #endif
6736 #ifdef INET6
6737 				case AF_INET6:
6738 					if (ipv6_addr_legal) {
6739 						struct sockaddr_in6 *sin6;
6740 
6741 						sin6 = &sctp_ifa->address.sin6;
6742 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6743 							continue;
6744 						}
6745 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6746 						    &sin6->sin6_addr) != 0) {
6747 							continue;
6748 						}
6749 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6750 							if (local_scope == 0)
6751 								continue;
6752 							if (sin6->sin6_scope_id == 0) {
6753 								if (sa6_recoverscope(sin6) != 0)
6754 									/*
6755 									 *
6756 									 * bad
6757 									 *
6758 									 * li
6759 									 * nk
6760 									 *
6761 									 * loc
6762 									 * al
6763 									 *
6764 									 * add
6765 									 * re
6766 									 * ss
6767 									 * */
6768 									continue;
6769 							}
6770 						}
6771 						if ((site_scope == 0) &&
6772 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6773 							continue;
6774 						}
6775 						/* count this one */
6776 						count++;
6777 					}
6778 					break;
6779 #endif
6780 				default:
6781 					/* TSNH */
6782 					break;
6783 				}
6784 			}
6785 		}
6786 	} else {
6787 		/*
6788 		 * subset bound case
6789 		 */
6790 		struct sctp_laddr *laddr;
6791 
6792 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6793 		    sctp_nxt_addr) {
6794 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6795 				continue;
6796 			}
6797 			/* count this one */
6798 			count++;
6799 		}
6800 	}
6801 	SCTP_IPI_ADDR_RUNLOCK();
6802 	return (count);
6803 }
6804 
6805 #if defined(SCTP_LOCAL_TRACE_BUF)
6806 
6807 void
6808 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6809 {
6810 	uint32_t saveindex, newindex;
6811 
6812 	do {
6813 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6814 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6815 			newindex = 1;
6816 		} else {
6817 			newindex = saveindex + 1;
6818 		}
6819 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6820 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6821 		saveindex = 0;
6822 	}
6823 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6824 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6825 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6826 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6827 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6828 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6829 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6830 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6831 }
6832 
6833 #endif
6834 static void
6835 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6836 {
6837 	struct ip *iph;
6838 
6839 #ifdef INET6
6840 	struct ip6_hdr *ip6;
6841 
6842 #endif
6843 	struct mbuf *sp, *last;
6844 	struct udphdr *uhdr;
6845 	uint16_t port;
6846 
6847 	if ((m->m_flags & M_PKTHDR) == 0) {
6848 		/* Can't handle one that is not a pkt hdr */
6849 		goto out;
6850 	}
6851 	/* Pull the src port */
6852 	iph = mtod(m, struct ip *);
6853 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6854 	port = uhdr->uh_sport;
6855 	/*
6856 	 * Split out the mbuf chain. Leave the IP header in m, place the
6857 	 * rest in the sp.
6858 	 */
6859 	sp = m_split(m, off, M_NOWAIT);
6860 	if (sp == NULL) {
6861 		/* Gak, drop packet, we can't do a split */
6862 		goto out;
6863 	}
6864 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6865 		/* Gak, packet can't have an SCTP header in it - too small */
6866 		m_freem(sp);
6867 		goto out;
6868 	}
6869 	/* Now pull up the UDP header and SCTP header together */
6870 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6871 	if (sp == NULL) {
6872 		/* Gak pullup failed */
6873 		goto out;
6874 	}
6875 	/* Trim out the UDP header */
6876 	m_adj(sp, sizeof(struct udphdr));
6877 
6878 	/* Now reconstruct the mbuf chain */
6879 	for (last = m; last->m_next; last = last->m_next);
6880 	last->m_next = sp;
6881 	m->m_pkthdr.len += sp->m_pkthdr.len;
6882 	iph = mtod(m, struct ip *);
6883 	switch (iph->ip_v) {
6884 #ifdef INET
6885 	case IPVERSION:
6886 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6887 		sctp_input_with_port(m, off, port);
6888 		break;
6889 #endif
6890 #ifdef INET6
6891 	case IPV6_VERSION >> 4:
6892 		ip6 = mtod(m, struct ip6_hdr *);
6893 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6894 		sctp6_input_with_port(&m, &off, port);
6895 		break;
6896 #endif
6897 	default:
6898 		goto out;
6899 		break;
6900 	}
6901 	return;
6902 out:
6903 	m_freem(m);
6904 }
6905 
6906 void
6907 sctp_over_udp_stop(void)
6908 {
6909 	/*
6910 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6911 	 * for writting!
6912 	 */
6913 #ifdef INET
6914 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6915 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6916 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6917 	}
6918 #endif
6919 #ifdef INET6
6920 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6921 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6922 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6923 	}
6924 #endif
6925 }
6926 
6927 int
6928 sctp_over_udp_start(void)
6929 {
6930 	uint16_t port;
6931 	int ret;
6932 
6933 #ifdef INET
6934 	struct sockaddr_in sin;
6935 
6936 #endif
6937 #ifdef INET6
6938 	struct sockaddr_in6 sin6;
6939 
6940 #endif
6941 	/*
6942 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6943 	 * for writting!
6944 	 */
6945 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6946 	if (ntohs(port) == 0) {
6947 		/* Must have a port set */
6948 		return (EINVAL);
6949 	}
6950 #ifdef INET
6951 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6952 		/* Already running -- must stop first */
6953 		return (EALREADY);
6954 	}
6955 #endif
6956 #ifdef INET6
6957 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6958 		/* Already running -- must stop first */
6959 		return (EALREADY);
6960 	}
6961 #endif
6962 #ifdef INET
6963 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6964 	    SOCK_DGRAM, IPPROTO_UDP,
6965 	    curthread->td_ucred, curthread))) {
6966 		sctp_over_udp_stop();
6967 		return (ret);
6968 	}
6969 	/* Call the special UDP hook. */
6970 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6971 	    sctp_recv_udp_tunneled_packet))) {
6972 		sctp_over_udp_stop();
6973 		return (ret);
6974 	}
6975 	/* Ok, we have a socket, bind it to the port. */
6976 	memset(&sin, 0, sizeof(struct sockaddr_in));
6977 	sin.sin_len = sizeof(struct sockaddr_in);
6978 	sin.sin_family = AF_INET;
6979 	sin.sin_port = htons(port);
6980 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6981 	    (struct sockaddr *)&sin, curthread))) {
6982 		sctp_over_udp_stop();
6983 		return (ret);
6984 	}
6985 #endif
6986 #ifdef INET6
6987 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6988 	    SOCK_DGRAM, IPPROTO_UDP,
6989 	    curthread->td_ucred, curthread))) {
6990 		sctp_over_udp_stop();
6991 		return (ret);
6992 	}
6993 	/* Call the special UDP hook. */
6994 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6995 	    sctp_recv_udp_tunneled_packet))) {
6996 		sctp_over_udp_stop();
6997 		return (ret);
6998 	}
6999 	/* Ok, we have a socket, bind it to the port. */
7000 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7001 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7002 	sin6.sin6_family = AF_INET6;
7003 	sin6.sin6_port = htons(port);
7004 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7005 	    (struct sockaddr *)&sin6, curthread))) {
7006 		sctp_over_udp_stop();
7007 		return (ret);
7008 	}
7009 #endif
7010 	return (0);
7011 }
7012