xref: /freebsd/sys/netinet/sctputil.c (revision 4990d495fcc77c51b3f46c91ba3a064b565afae0)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern const struct sctp_cc_functions sctp_cc_functions[];
62 extern const struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int32_t
897 sctp_map_assoc_state(int kernel_state)
898 {
899 	int32_t user_state;
900 
901 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
902 		user_state = SCTP_CLOSED;
903 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
904 		user_state = SCTP_SHUTDOWN_PENDING;
905 	} else {
906 		switch (kernel_state & SCTP_STATE_MASK) {
907 		case SCTP_STATE_EMPTY:
908 			user_state = SCTP_CLOSED;
909 			break;
910 		case SCTP_STATE_INUSE:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_COOKIE_WAIT:
914 			user_state = SCTP_COOKIE_WAIT;
915 			break;
916 		case SCTP_STATE_COOKIE_ECHOED:
917 			user_state = SCTP_COOKIE_ECHOED;
918 			break;
919 		case SCTP_STATE_OPEN:
920 			user_state = SCTP_ESTABLISHED;
921 			break;
922 		case SCTP_STATE_SHUTDOWN_SENT:
923 			user_state = SCTP_SHUTDOWN_SENT;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_RECEIVED:
926 			user_state = SCTP_SHUTDOWN_RECEIVED;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
929 			user_state = SCTP_SHUTDOWN_ACK_SENT;
930 			break;
931 		default:
932 			user_state = SCTP_CLOSED;
933 			break;
934 		}
935 	}
936 	return (user_state);
937 }
938 
939 int
940 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
941     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
942 {
943 	struct sctp_association *asoc;
944 
945 	/*
946 	 * Anything set to zero is taken care of by the allocation routine's
947 	 * bzero
948 	 */
949 
950 	/*
951 	 * Up front select what scoping to apply on addresses I tell my peer
952 	 * Not sure what to do with these right now, we will need to come up
953 	 * with a way to set them. We may need to pass them through from the
954 	 * caller in the sctp_aloc_assoc() function.
955 	 */
956 	int i;
957 
958 #if defined(SCTP_DETAILED_STR_STATS)
959 	int j;
960 
961 #endif
962 
963 	asoc = &stcb->asoc;
964 	/* init all variables to a known value. */
965 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
966 	asoc->max_burst = inp->sctp_ep.max_burst;
967 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
968 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
969 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
970 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
971 	asoc->ecn_supported = inp->ecn_supported;
972 	asoc->prsctp_supported = inp->prsctp_supported;
973 	asoc->idata_supported = inp->idata_supported;
974 	asoc->auth_supported = inp->auth_supported;
975 	asoc->asconf_supported = inp->asconf_supported;
976 	asoc->reconfig_supported = inp->reconfig_supported;
977 	asoc->nrsack_supported = inp->nrsack_supported;
978 	asoc->pktdrop_supported = inp->pktdrop_supported;
979 	asoc->idata_supported = inp->idata_supported;
980 	asoc->sctp_cmt_pf = (uint8_t) 0;
981 	asoc->sctp_frag_point = inp->sctp_frag_point;
982 	asoc->sctp_features = inp->sctp_features;
983 	asoc->default_dscp = inp->sctp_ep.default_dscp;
984 	asoc->max_cwnd = inp->max_cwnd;
985 #ifdef INET6
986 	if (inp->sctp_ep.default_flowlabel) {
987 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
988 	} else {
989 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
990 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
991 			asoc->default_flowlabel &= 0x000fffff;
992 			asoc->default_flowlabel |= 0x80000000;
993 		} else {
994 			asoc->default_flowlabel = 0;
995 		}
996 	}
997 #endif
998 	asoc->sb_send_resv = 0;
999 	if (override_tag) {
1000 		asoc->my_vtag = override_tag;
1001 	} else {
1002 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1003 	}
1004 	/* Get the nonce tags */
1005 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1006 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1007 	asoc->vrf_id = vrf_id;
1008 
1009 #ifdef SCTP_ASOCLOG_OF_TSNS
1010 	asoc->tsn_in_at = 0;
1011 	asoc->tsn_out_at = 0;
1012 	asoc->tsn_in_wrapped = 0;
1013 	asoc->tsn_out_wrapped = 0;
1014 	asoc->cumack_log_at = 0;
1015 	asoc->cumack_log_atsnt = 0;
1016 #endif
1017 #ifdef SCTP_FS_SPEC_LOG
1018 	asoc->fs_index = 0;
1019 #endif
1020 	asoc->refcnt = 0;
1021 	asoc->assoc_up_sent = 0;
1022 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1023 	    sctp_select_initial_TSN(&inp->sctp_ep);
1024 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1025 	/* we are optimisitic here */
1026 	asoc->peer_supports_nat = 0;
1027 	asoc->sent_queue_retran_cnt = 0;
1028 
1029 	/* for CMT */
1030 	asoc->last_net_cmt_send_started = NULL;
1031 
1032 	/* This will need to be adjusted */
1033 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1034 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1035 	asoc->asconf_seq_in = asoc->last_acked_seq;
1036 
1037 	/* here we are different, we hold the next one we expect */
1038 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1039 
1040 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1041 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1042 
1043 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1044 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1045 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1046 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1047 	asoc->free_chunk_cnt = 0;
1048 
1049 	asoc->iam_blocking = 0;
1050 	asoc->context = inp->sctp_context;
1051 	asoc->local_strreset_support = inp->local_strreset_support;
1052 	asoc->def_send = inp->def_send;
1053 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1054 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1055 	asoc->pr_sctp_cnt = 0;
1056 	asoc->total_output_queue_size = 0;
1057 
1058 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1059 		asoc->scope.ipv6_addr_legal = 1;
1060 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1061 			asoc->scope.ipv4_addr_legal = 1;
1062 		} else {
1063 			asoc->scope.ipv4_addr_legal = 0;
1064 		}
1065 	} else {
1066 		asoc->scope.ipv6_addr_legal = 0;
1067 		asoc->scope.ipv4_addr_legal = 1;
1068 	}
1069 
1070 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1071 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1072 
1073 	asoc->smallest_mtu = inp->sctp_frag_point;
1074 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1075 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1076 
1077 	asoc->locked_on_sending = NULL;
1078 	asoc->stream_locked_on = 0;
1079 	asoc->ecn_echo_cnt_onq = 0;
1080 	asoc->stream_locked = 0;
1081 
1082 	asoc->send_sack = 1;
1083 
1084 	LIST_INIT(&asoc->sctp_restricted_addrs);
1085 
1086 	TAILQ_INIT(&asoc->nets);
1087 	TAILQ_INIT(&asoc->pending_reply_queue);
1088 	TAILQ_INIT(&asoc->asconf_ack_sent);
1089 	/* Setup to fill the hb random cache at first HB */
1090 	asoc->hb_random_idx = 4;
1091 
1092 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1093 
1094 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1095 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1096 
1097 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1098 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1099 
1100 	/*
1101 	 * Now the stream parameters, here we allocate space for all streams
1102 	 * that we request by default.
1103 	 */
1104 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1105 	    o_strms;
1106 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1107 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1108 	    SCTP_M_STRMO);
1109 	if (asoc->strmout == NULL) {
1110 		/* big trouble no memory */
1111 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1112 		return (ENOMEM);
1113 	}
1114 	for (i = 0; i < asoc->streamoutcnt; i++) {
1115 		/*
1116 		 * inbound side must be set to 0xffff, also NOTE when we get
1117 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1118 		 * count (streamoutcnt) but first check if we sent to any of
1119 		 * the upper streams that were dropped (if some were). Those
1120 		 * that were dropped must be notified to the upper layer as
1121 		 * failed to send.
1122 		 */
1123 		asoc->strmout[i].next_sequence_send = 0x0;
1124 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1125 		asoc->strmout[i].chunks_on_queues = 0;
1126 #if defined(SCTP_DETAILED_STR_STATS)
1127 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1128 			asoc->strmout[i].abandoned_sent[j] = 0;
1129 			asoc->strmout[i].abandoned_unsent[j] = 0;
1130 		}
1131 #else
1132 		asoc->strmout[i].abandoned_sent[0] = 0;
1133 		asoc->strmout[i].abandoned_unsent[0] = 0;
1134 #endif
1135 		asoc->strmout[i].stream_no = i;
1136 		asoc->strmout[i].last_msg_incomplete = 0;
1137 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1138 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1139 	}
1140 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1141 
1142 	/* Now the mapping array */
1143 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1144 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1145 	    SCTP_M_MAP);
1146 	if (asoc->mapping_array == NULL) {
1147 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1152 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1153 	    SCTP_M_MAP);
1154 	if (asoc->nr_mapping_array == NULL) {
1155 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1156 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1157 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1158 		return (ENOMEM);
1159 	}
1160 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1161 
1162 	/* Now the init of the other outqueues */
1163 	TAILQ_INIT(&asoc->free_chunks);
1164 	TAILQ_INIT(&asoc->control_send_queue);
1165 	TAILQ_INIT(&asoc->asconf_send_queue);
1166 	TAILQ_INIT(&asoc->send_queue);
1167 	TAILQ_INIT(&asoc->sent_queue);
1168 	TAILQ_INIT(&asoc->resetHead);
1169 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1170 	TAILQ_INIT(&asoc->asconf_queue);
1171 	/* authentication fields */
1172 	asoc->authinfo.random = NULL;
1173 	asoc->authinfo.active_keyid = 0;
1174 	asoc->authinfo.assoc_key = NULL;
1175 	asoc->authinfo.assoc_keyid = 0;
1176 	asoc->authinfo.recv_key = NULL;
1177 	asoc->authinfo.recv_keyid = 0;
1178 	LIST_INIT(&asoc->shared_keys);
1179 	asoc->marked_retrans = 0;
1180 	asoc->port = inp->sctp_ep.port;
1181 	asoc->timoinit = 0;
1182 	asoc->timodata = 0;
1183 	asoc->timosack = 0;
1184 	asoc->timoshutdown = 0;
1185 	asoc->timoheartbeat = 0;
1186 	asoc->timocookie = 0;
1187 	asoc->timoshutdownack = 0;
1188 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1189 	asoc->discontinuity_time = asoc->start_time;
1190 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1191 		asoc->abandoned_unsent[i] = 0;
1192 		asoc->abandoned_sent[i] = 0;
1193 	}
1194 	/*
1195 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1196 	 * freed later when the association is freed.
1197 	 */
1198 	return (0);
1199 }
1200 
1201 void
1202 sctp_print_mapping_array(struct sctp_association *asoc)
1203 {
1204 	unsigned int i, limit;
1205 
1206 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1207 	    asoc->mapping_array_size,
1208 	    asoc->mapping_array_base_tsn,
1209 	    asoc->cumulative_tsn,
1210 	    asoc->highest_tsn_inside_map,
1211 	    asoc->highest_tsn_inside_nr_map);
1212 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1213 		if (asoc->mapping_array[limit - 1] != 0) {
1214 			break;
1215 		}
1216 	}
1217 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1218 	for (i = 0; i < limit; i++) {
1219 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1220 	}
1221 	if (limit % 16)
1222 		SCTP_PRINTF("\n");
1223 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1224 		if (asoc->nr_mapping_array[limit - 1]) {
1225 			break;
1226 		}
1227 	}
1228 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1229 	for (i = 0; i < limit; i++) {
1230 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1231 	}
1232 	if (limit % 16)
1233 		SCTP_PRINTF("\n");
1234 }
1235 
1236 int
1237 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1238 {
1239 	/* mapping array needs to grow */
1240 	uint8_t *new_array1, *new_array2;
1241 	uint32_t new_size;
1242 
1243 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1244 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1245 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1246 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1247 		/* can't get more, forget it */
1248 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1249 		if (new_array1) {
1250 			SCTP_FREE(new_array1, SCTP_M_MAP);
1251 		}
1252 		if (new_array2) {
1253 			SCTP_FREE(new_array2, SCTP_M_MAP);
1254 		}
1255 		return (-1);
1256 	}
1257 	memset(new_array1, 0, new_size);
1258 	memset(new_array2, 0, new_size);
1259 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1260 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1261 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1262 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1263 	asoc->mapping_array = new_array1;
1264 	asoc->nr_mapping_array = new_array2;
1265 	asoc->mapping_array_size = new_size;
1266 	return (0);
1267 }
1268 
1269 
1270 static void
1271 sctp_iterator_work(struct sctp_iterator *it)
1272 {
1273 	int iteration_count = 0;
1274 	int inp_skip = 0;
1275 	int first_in = 1;
1276 	struct sctp_inpcb *tinp;
1277 
1278 	SCTP_INP_INFO_RLOCK();
1279 	SCTP_ITERATOR_LOCK();
1280 	if (it->inp) {
1281 		SCTP_INP_RLOCK(it->inp);
1282 		SCTP_INP_DECR_REF(it->inp);
1283 	}
1284 	if (it->inp == NULL) {
1285 		/* iterator is complete */
1286 done_with_iterator:
1287 		SCTP_ITERATOR_UNLOCK();
1288 		SCTP_INP_INFO_RUNLOCK();
1289 		if (it->function_atend != NULL) {
1290 			(*it->function_atend) (it->pointer, it->val);
1291 		}
1292 		SCTP_FREE(it, SCTP_M_ITER);
1293 		return;
1294 	}
1295 select_a_new_ep:
1296 	if (first_in) {
1297 		first_in = 0;
1298 	} else {
1299 		SCTP_INP_RLOCK(it->inp);
1300 	}
1301 	while (((it->pcb_flags) &&
1302 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1303 	    ((it->pcb_features) &&
1304 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1305 		/* endpoint flags or features don't match, so keep looking */
1306 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1307 			SCTP_INP_RUNLOCK(it->inp);
1308 			goto done_with_iterator;
1309 		}
1310 		tinp = it->inp;
1311 		it->inp = LIST_NEXT(it->inp, sctp_list);
1312 		SCTP_INP_RUNLOCK(tinp);
1313 		if (it->inp == NULL) {
1314 			goto done_with_iterator;
1315 		}
1316 		SCTP_INP_RLOCK(it->inp);
1317 	}
1318 	/* now go through each assoc which is in the desired state */
1319 	if (it->done_current_ep == 0) {
1320 		if (it->function_inp != NULL)
1321 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1322 		it->done_current_ep = 1;
1323 	}
1324 	if (it->stcb == NULL) {
1325 		/* run the per instance function */
1326 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1327 	}
1328 	if ((inp_skip) || it->stcb == NULL) {
1329 		if (it->function_inp_end != NULL) {
1330 			inp_skip = (*it->function_inp_end) (it->inp,
1331 			    it->pointer,
1332 			    it->val);
1333 		}
1334 		SCTP_INP_RUNLOCK(it->inp);
1335 		goto no_stcb;
1336 	}
1337 	while (it->stcb) {
1338 		SCTP_TCB_LOCK(it->stcb);
1339 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1340 			/* not in the right state... keep looking */
1341 			SCTP_TCB_UNLOCK(it->stcb);
1342 			goto next_assoc;
1343 		}
1344 		/* see if we have limited out the iterator loop */
1345 		iteration_count++;
1346 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1347 			/* Pause to let others grab the lock */
1348 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1349 			SCTP_TCB_UNLOCK(it->stcb);
1350 			SCTP_INP_INCR_REF(it->inp);
1351 			SCTP_INP_RUNLOCK(it->inp);
1352 			SCTP_ITERATOR_UNLOCK();
1353 			SCTP_INP_INFO_RUNLOCK();
1354 			SCTP_INP_INFO_RLOCK();
1355 			SCTP_ITERATOR_LOCK();
1356 			if (sctp_it_ctl.iterator_flags) {
1357 				/* We won't be staying here */
1358 				SCTP_INP_DECR_REF(it->inp);
1359 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1360 				if (sctp_it_ctl.iterator_flags &
1361 				    SCTP_ITERATOR_STOP_CUR_IT) {
1362 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1363 					goto done_with_iterator;
1364 				}
1365 				if (sctp_it_ctl.iterator_flags &
1366 				    SCTP_ITERATOR_STOP_CUR_INP) {
1367 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1368 					goto no_stcb;
1369 				}
1370 				/* If we reach here huh? */
1371 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1372 				    sctp_it_ctl.iterator_flags);
1373 				sctp_it_ctl.iterator_flags = 0;
1374 			}
1375 			SCTP_INP_RLOCK(it->inp);
1376 			SCTP_INP_DECR_REF(it->inp);
1377 			SCTP_TCB_LOCK(it->stcb);
1378 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1379 			iteration_count = 0;
1380 		}
1381 		/* run function on this one */
1382 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1383 
1384 		/*
1385 		 * we lie here, it really needs to have its own type but
1386 		 * first I must verify that this won't effect things :-0
1387 		 */
1388 		if (it->no_chunk_output == 0)
1389 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1390 
1391 		SCTP_TCB_UNLOCK(it->stcb);
1392 next_assoc:
1393 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1394 		if (it->stcb == NULL) {
1395 			/* Run last function */
1396 			if (it->function_inp_end != NULL) {
1397 				inp_skip = (*it->function_inp_end) (it->inp,
1398 				    it->pointer,
1399 				    it->val);
1400 			}
1401 		}
1402 	}
1403 	SCTP_INP_RUNLOCK(it->inp);
1404 no_stcb:
1405 	/* done with all assocs on this endpoint, move on to next endpoint */
1406 	it->done_current_ep = 0;
1407 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1408 		it->inp = NULL;
1409 	} else {
1410 		it->inp = LIST_NEXT(it->inp, sctp_list);
1411 	}
1412 	if (it->inp == NULL) {
1413 		goto done_with_iterator;
1414 	}
1415 	goto select_a_new_ep;
1416 }
1417 
1418 void
1419 sctp_iterator_worker(void)
1420 {
1421 	struct sctp_iterator *it, *nit;
1422 
1423 	/* This function is called with the WQ lock in place */
1424 
1425 	sctp_it_ctl.iterator_running = 1;
1426 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1427 		sctp_it_ctl.cur_it = it;
1428 		/* now lets work on this one */
1429 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1430 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1431 		CURVNET_SET(it->vn);
1432 		sctp_iterator_work(it);
1433 		sctp_it_ctl.cur_it = NULL;
1434 		CURVNET_RESTORE();
1435 		SCTP_IPI_ITERATOR_WQ_LOCK();
1436 		/* sa_ignore FREED_MEMORY */
1437 	}
1438 	sctp_it_ctl.iterator_running = 0;
1439 	return;
1440 }
1441 
1442 
1443 static void
1444 sctp_handle_addr_wq(void)
1445 {
1446 	/* deal with the ADDR wq from the rtsock calls */
1447 	struct sctp_laddr *wi, *nwi;
1448 	struct sctp_asconf_iterator *asc;
1449 
1450 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1451 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1452 	if (asc == NULL) {
1453 		/* Try later, no memory */
1454 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1455 		    (struct sctp_inpcb *)NULL,
1456 		    (struct sctp_tcb *)NULL,
1457 		    (struct sctp_nets *)NULL);
1458 		return;
1459 	}
1460 	LIST_INIT(&asc->list_of_work);
1461 	asc->cnt = 0;
1462 
1463 	SCTP_WQ_ADDR_LOCK();
1464 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1465 		LIST_REMOVE(wi, sctp_nxt_addr);
1466 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1467 		asc->cnt++;
1468 	}
1469 	SCTP_WQ_ADDR_UNLOCK();
1470 
1471 	if (asc->cnt == 0) {
1472 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1473 	} else {
1474 		int ret;
1475 
1476 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1477 		    sctp_asconf_iterator_stcb,
1478 		    NULL,	/* No ep end for boundall */
1479 		    SCTP_PCB_FLAGS_BOUNDALL,
1480 		    SCTP_PCB_ANY_FEATURES,
1481 		    SCTP_ASOC_ANY_STATE,
1482 		    (void *)asc, 0,
1483 		    sctp_asconf_iterator_end, NULL, 0);
1484 		if (ret) {
1485 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1486 			/*
1487 			 * Freeing if we are stopping or put back on the
1488 			 * addr_wq.
1489 			 */
1490 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1491 				sctp_asconf_iterator_end(asc, 0);
1492 			} else {
1493 				SCTP_WQ_ADDR_LOCK();
1494 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1495 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1496 				}
1497 				SCTP_WQ_ADDR_UNLOCK();
1498 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1499 			}
1500 		}
1501 	}
1502 }
1503 
1504 void
1505 sctp_timeout_handler(void *t)
1506 {
1507 	struct sctp_inpcb *inp;
1508 	struct sctp_tcb *stcb;
1509 	struct sctp_nets *net;
1510 	struct sctp_timer *tmr;
1511 	struct mbuf *op_err;
1512 
1513 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1514 	struct socket *so;
1515 
1516 #endif
1517 	int did_output;
1518 	int type;
1519 
1520 	tmr = (struct sctp_timer *)t;
1521 	inp = (struct sctp_inpcb *)tmr->ep;
1522 	stcb = (struct sctp_tcb *)tmr->tcb;
1523 	net = (struct sctp_nets *)tmr->net;
1524 	CURVNET_SET((struct vnet *)tmr->vnet);
1525 	did_output = 1;
1526 
1527 #ifdef SCTP_AUDITING_ENABLED
1528 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1529 	sctp_auditing(3, inp, stcb, net);
1530 #endif
1531 
1532 	/* sanity checks... */
1533 	if (tmr->self != (void *)tmr) {
1534 		/*
1535 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536 		 * (void *)tmr);
1537 		 */
1538 		CURVNET_RESTORE();
1539 		return;
1540 	}
1541 	tmr->stopped_from = 0xa001;
1542 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543 		/*
1544 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545 		 * tmr->type);
1546 		 */
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	tmr->stopped_from = 0xa002;
1551 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552 		CURVNET_RESTORE();
1553 		return;
1554 	}
1555 	/* if this is an iterator timeout, get the struct and clear inp */
1556 	tmr->stopped_from = 0xa003;
1557 	if (inp) {
1558 		SCTP_INP_INCR_REF(inp);
1559 		if ((inp->sctp_socket == NULL) &&
1560 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569 		    ) {
1570 			SCTP_INP_DECR_REF(inp);
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 			CURVNET_RESTORE();
1584 			return;
1585 		}
1586 	}
1587 	type = tmr->type;
1588 	tmr->stopped_from = 0xa005;
1589 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591 		if (inp) {
1592 			SCTP_INP_DECR_REF(inp);
1593 		}
1594 		if (stcb) {
1595 			atomic_add_int(&stcb->asoc.refcnt, -1);
1596 		}
1597 		CURVNET_RESTORE();
1598 		return;
1599 	}
1600 	tmr->stopped_from = 0xa006;
1601 
1602 	if (stcb) {
1603 		SCTP_TCB_LOCK(stcb);
1604 		atomic_add_int(&stcb->asoc.refcnt, -1);
1605 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606 		    ((stcb->asoc.state == 0) ||
1607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608 			SCTP_TCB_UNLOCK(stcb);
1609 			if (inp) {
1610 				SCTP_INP_DECR_REF(inp);
1611 			}
1612 			CURVNET_RESTORE();
1613 			return;
1614 		}
1615 	}
1616 	/* record in stopped what t-o occured */
1617 	tmr->stopped_from = type;
1618 
1619 	/* mark as being serviced now */
1620 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621 		/*
1622 		 * Callout has been rescheduled.
1623 		 */
1624 		goto get_out;
1625 	}
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		/*
1628 		 * Not active, so no action.
1629 		 */
1630 		goto get_out;
1631 	}
1632 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633 
1634 	/* call the handler for the appropriate timer type */
1635 	switch (type) {
1636 	case SCTP_TIMER_TYPE_ZERO_COPY:
1637 		if (inp == NULL) {
1638 			break;
1639 		}
1640 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1641 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1645 		if (inp == NULL) {
1646 			break;
1647 		}
1648 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1649 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_ADDR_WQ:
1653 		sctp_handle_addr_wq();
1654 		break;
1655 	case SCTP_TIMER_TYPE_SEND:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		}
1659 		SCTP_STAT_INCR(sctps_timodata);
1660 		stcb->asoc.timodata++;
1661 		stcb->asoc.num_send_timers_up--;
1662 		if (stcb->asoc.num_send_timers_up < 0) {
1663 			stcb->asoc.num_send_timers_up = 0;
1664 		}
1665 		SCTP_TCB_LOCK_ASSERT(stcb);
1666 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1667 			/* no need to unlock on tcb its gone */
1668 
1669 			goto out_decr;
1670 		}
1671 		SCTP_TCB_LOCK_ASSERT(stcb);
1672 #ifdef SCTP_AUDITING_ENABLED
1673 		sctp_auditing(4, inp, stcb, net);
1674 #endif
1675 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1676 		if ((stcb->asoc.num_send_timers_up == 0) &&
1677 		    (stcb->asoc.sent_queue_cnt > 0)) {
1678 			struct sctp_tmit_chunk *chk;
1679 
1680 			/*
1681 			 * safeguard. If there on some on the sent queue
1682 			 * somewhere but no timers running something is
1683 			 * wrong... so we start a timer on the first chunk
1684 			 * on the send queue on whatever net it is sent to.
1685 			 */
1686 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1687 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1688 			    chk->whoTo);
1689 		}
1690 		break;
1691 	case SCTP_TIMER_TYPE_INIT:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timoinit);
1696 		stcb->asoc.timoinit++;
1697 		if (sctp_t1init_timer(inp, stcb, net)) {
1698 			/* no need to unlock on tcb its gone */
1699 			goto out_decr;
1700 		}
1701 		/* We do output but not here */
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_RECV:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timosack);
1709 		stcb->asoc.timosack++;
1710 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_SHUTDOWN:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		if (sctp_shutdown_timer(inp, stcb, net)) {
1721 			/* no need to unlock on tcb its gone */
1722 			goto out_decr;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timoshutdown);
1725 		stcb->asoc.timoshutdown++;
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1730 		break;
1731 	case SCTP_TIMER_TYPE_HEARTBEAT:
1732 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoheartbeat);
1736 		stcb->asoc.timoheartbeat++;
1737 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 #ifdef SCTP_AUDITING_ENABLED
1742 		sctp_auditing(4, inp, stcb, net);
1743 #endif
1744 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1745 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1746 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1747 		}
1748 		break;
1749 	case SCTP_TIMER_TYPE_COOKIE:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		if (sctp_cookie_timer(inp, stcb, net)) {
1754 			/* no need to unlock on tcb its gone */
1755 			goto out_decr;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timocookie);
1758 		stcb->asoc.timocookie++;
1759 #ifdef SCTP_AUDITING_ENABLED
1760 		sctp_auditing(4, inp, stcb, net);
1761 #endif
1762 		/*
1763 		 * We consider T3 and Cookie timer pretty much the same with
1764 		 * respect to where from in chunk_output.
1765 		 */
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1769 		{
1770 			struct timeval tv;
1771 			int i, secret;
1772 
1773 			if (inp == NULL) {
1774 				break;
1775 			}
1776 			SCTP_STAT_INCR(sctps_timosecret);
1777 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1778 			SCTP_INP_WLOCK(inp);
1779 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1780 			inp->sctp_ep.last_secret_number =
1781 			    inp->sctp_ep.current_secret_number;
1782 			inp->sctp_ep.current_secret_number++;
1783 			if (inp->sctp_ep.current_secret_number >=
1784 			    SCTP_HOW_MANY_SECRETS) {
1785 				inp->sctp_ep.current_secret_number = 0;
1786 			}
1787 			secret = (int)inp->sctp_ep.current_secret_number;
1788 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1789 				inp->sctp_ep.secret_key[secret][i] =
1790 				    sctp_select_initial_TSN(&inp->sctp_ep);
1791 			}
1792 			SCTP_INP_WUNLOCK(inp);
1793 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1794 		}
1795 		did_output = 0;
1796 		break;
1797 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timopathmtu);
1802 		sctp_pathmtu_timer(inp, stcb, net);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdownack);
1814 		stcb->asoc.timoshutdownack++;
1815 #ifdef SCTP_AUDITING_ENABLED
1816 		sctp_auditing(4, inp, stcb, net);
1817 #endif
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1821 		if ((stcb == NULL) || (inp == NULL)) {
1822 			break;
1823 		}
1824 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1825 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1826 		    "Shutdown guard timer expired");
1827 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828 		/* no need to unlock on tcb its gone */
1829 		goto out_decr;
1830 
1831 	case SCTP_TIMER_TYPE_STRRESET:
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		if (sctp_strreset_timer(inp, stcb, net)) {
1836 			/* no need to unlock on tcb its gone */
1837 			goto out_decr;
1838 		}
1839 		SCTP_STAT_INCR(sctps_timostrmrst);
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASCONF:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_asconf_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		sctp_delete_prim_timer(inp, stcb, net);
1861 		SCTP_STAT_INCR(sctps_timodelprim);
1862 		break;
1863 
1864 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 		if ((stcb == NULL) || (inp == NULL)) {
1866 			break;
1867 		}
1868 		SCTP_STAT_INCR(sctps_timoautoclose);
1869 		sctp_autoclose_timer(inp, stcb, net);
1870 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 		did_output = 0;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ASOCKILL:
1874 		if ((stcb == NULL) || (inp == NULL)) {
1875 			break;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timoassockill);
1878 		/* Can we free it yet? */
1879 		SCTP_INP_DECR_REF(inp);
1880 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1881 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1883 		so = SCTP_INP_SO(inp);
1884 		atomic_add_int(&stcb->asoc.refcnt, 1);
1885 		SCTP_TCB_UNLOCK(stcb);
1886 		SCTP_SOCKET_LOCK(so, 1);
1887 		SCTP_TCB_LOCK(stcb);
1888 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1889 #endif
1890 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1893 		SCTP_SOCKET_UNLOCK(so, 1);
1894 #endif
1895 		/*
1896 		 * free asoc, always unlocks (or destroy's) so prevent
1897 		 * duplicate unlock or unlock of a free mtx :-0
1898 		 */
1899 		stcb = NULL;
1900 		goto out_no_decr;
1901 	case SCTP_TIMER_TYPE_INPKILL:
1902 		SCTP_STAT_INCR(sctps_timoinpkill);
1903 		if (inp == NULL) {
1904 			break;
1905 		}
1906 		/*
1907 		 * special case, take away our increment since WE are the
1908 		 * killer
1909 		 */
1910 		SCTP_INP_DECR_REF(inp);
1911 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1913 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1914 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1915 		inp = NULL;
1916 		goto out_no_decr;
1917 	default:
1918 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1919 		    type);
1920 		break;
1921 	}
1922 #ifdef SCTP_AUDITING_ENABLED
1923 	sctp_audit_log(0xF1, (uint8_t) type);
1924 	if (inp)
1925 		sctp_auditing(5, inp, stcb, net);
1926 #endif
1927 	if ((did_output) && stcb) {
1928 		/*
1929 		 * Now we need to clean up the control chunk chain if an
1930 		 * ECNE is on it. It must be marked as UNSENT again so next
1931 		 * call will continue to send it until such time that we get
1932 		 * a CWR, to remove it. It is, however, less likely that we
1933 		 * will find a ecn echo on the chain though.
1934 		 */
1935 		sctp_fix_ecn_echo(&stcb->asoc);
1936 	}
1937 get_out:
1938 	if (stcb) {
1939 		SCTP_TCB_UNLOCK(stcb);
1940 	}
1941 out_decr:
1942 	if (inp) {
1943 		SCTP_INP_DECR_REF(inp);
1944 	}
1945 out_no_decr:
1946 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1947 	CURVNET_RESTORE();
1948 }
1949 
1950 void
1951 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1952     struct sctp_nets *net)
1953 {
1954 	uint32_t to_ticks;
1955 	struct sctp_timer *tmr;
1956 
1957 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1958 		return;
1959 
1960 	tmr = NULL;
1961 	if (stcb) {
1962 		SCTP_TCB_LOCK_ASSERT(stcb);
1963 	}
1964 	switch (t_type) {
1965 	case SCTP_TIMER_TYPE_ZERO_COPY:
1966 		tmr = &inp->sctp_ep.zero_copy_timer;
1967 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ADDR_WQ:
1974 		/* Only 1 tick away :-) */
1975 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 		break;
1978 	case SCTP_TIMER_TYPE_SEND:
1979 		/* Here we use the RTO timer */
1980 		{
1981 			int rto_val;
1982 
1983 			if ((stcb == NULL) || (net == NULL)) {
1984 				return;
1985 			}
1986 			tmr = &net->rxt_timer;
1987 			if (net->RTO == 0) {
1988 				rto_val = stcb->asoc.initial_rto;
1989 			} else {
1990 				rto_val = net->RTO;
1991 			}
1992 			to_ticks = MSEC_TO_TICKS(rto_val);
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_INIT:
1996 		/*
1997 		 * Here we use the INIT timer default usually about 1
1998 		 * minute.
1999 		 */
2000 		if ((stcb == NULL) || (net == NULL)) {
2001 			return;
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		break;
2010 	case SCTP_TIMER_TYPE_RECV:
2011 		/*
2012 		 * Here we use the Delayed-Ack timer value from the inp
2013 		 * ususually about 200ms.
2014 		 */
2015 		if (stcb == NULL) {
2016 			return;
2017 		}
2018 		tmr = &stcb->asoc.dack_timer;
2019 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 		break;
2021 	case SCTP_TIMER_TYPE_SHUTDOWN:
2022 		/* Here we use the RTO of the destination. */
2023 		if ((stcb == NULL) || (net == NULL)) {
2024 			return;
2025 		}
2026 		if (net->RTO == 0) {
2027 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 		} else {
2029 			to_ticks = MSEC_TO_TICKS(net->RTO);
2030 		}
2031 		tmr = &net->rxt_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_HEARTBEAT:
2034 		/*
2035 		 * the net is used here so that we can add in the RTO. Even
2036 		 * though we use a different timer. We also add the HB timer
2037 		 * PLUS a random jitter.
2038 		 */
2039 		if ((stcb == NULL) || (net == NULL)) {
2040 			return;
2041 		} else {
2042 			uint32_t rndval;
2043 			uint32_t jitter;
2044 
2045 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2046 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2047 				return;
2048 			}
2049 			if (net->RTO == 0) {
2050 				to_ticks = stcb->asoc.initial_rto;
2051 			} else {
2052 				to_ticks = net->RTO;
2053 			}
2054 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 			jitter = rndval % to_ticks;
2056 			if (jitter >= (to_ticks >> 1)) {
2057 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2058 			} else {
2059 				to_ticks = to_ticks - jitter;
2060 			}
2061 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2062 			    !(net->dest_state & SCTP_ADDR_PF)) {
2063 				to_ticks += net->heart_beat_delay;
2064 			}
2065 			/*
2066 			 * Now we must convert the to_ticks that are now in
2067 			 * ms to ticks.
2068 			 */
2069 			to_ticks = MSEC_TO_TICKS(to_ticks);
2070 			tmr = &net->hb_timer;
2071 		}
2072 		break;
2073 	case SCTP_TIMER_TYPE_COOKIE:
2074 		/*
2075 		 * Here we can use the RTO timer from the network since one
2076 		 * RTT was compelete. If a retran happened then we will be
2077 		 * using the RTO initial value.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &net->rxt_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2090 		/*
2091 		 * nothing needed but the endpoint here ususually about 60
2092 		 * minutes.
2093 		 */
2094 		tmr = &inp->sctp_ep.signature_change;
2095 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2096 		break;
2097 	case SCTP_TIMER_TYPE_ASOCKILL:
2098 		if (stcb == NULL) {
2099 			return;
2100 		}
2101 		tmr = &stcb->asoc.strreset_timer;
2102 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2103 		break;
2104 	case SCTP_TIMER_TYPE_INPKILL:
2105 		/*
2106 		 * The inp is setup to die. We re-use the signature_chage
2107 		 * timer since that has stopped and we are in the GONE
2108 		 * state.
2109 		 */
2110 		tmr = &inp->sctp_ep.signature_change;
2111 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2112 		break;
2113 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2114 		/*
2115 		 * Here we use the value found in the EP for PMTU ususually
2116 		 * about 10 minutes.
2117 		 */
2118 		if ((stcb == NULL) || (net == NULL)) {
2119 			return;
2120 		}
2121 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2122 			return;
2123 		}
2124 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2125 		tmr = &net->pmtu_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2128 		/* Here we use the RTO of the destination */
2129 		if ((stcb == NULL) || (net == NULL)) {
2130 			return;
2131 		}
2132 		if (net->RTO == 0) {
2133 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2134 		} else {
2135 			to_ticks = MSEC_TO_TICKS(net->RTO);
2136 		}
2137 		tmr = &net->rxt_timer;
2138 		break;
2139 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2140 		/*
2141 		 * Here we use the endpoints shutdown guard timer usually
2142 		 * about 3 minutes.
2143 		 */
2144 		if (stcb == NULL) {
2145 			return;
2146 		}
2147 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2148 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2149 		} else {
2150 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2151 		}
2152 		tmr = &stcb->asoc.shut_guard_timer;
2153 		break;
2154 	case SCTP_TIMER_TYPE_STRRESET:
2155 		/*
2156 		 * Here the timer comes from the stcb but its value is from
2157 		 * the net's RTO.
2158 		 */
2159 		if ((stcb == NULL) || (net == NULL)) {
2160 			return;
2161 		}
2162 		if (net->RTO == 0) {
2163 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2164 		} else {
2165 			to_ticks = MSEC_TO_TICKS(net->RTO);
2166 		}
2167 		tmr = &stcb->asoc.strreset_timer;
2168 		break;
2169 	case SCTP_TIMER_TYPE_ASCONF:
2170 		/*
2171 		 * Here the timer comes from the stcb but its value is from
2172 		 * the net's RTO.
2173 		 */
2174 		if ((stcb == NULL) || (net == NULL)) {
2175 			return;
2176 		}
2177 		if (net->RTO == 0) {
2178 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2179 		} else {
2180 			to_ticks = MSEC_TO_TICKS(net->RTO);
2181 		}
2182 		tmr = &stcb->asoc.asconf_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2185 		if ((stcb == NULL) || (net != NULL)) {
2186 			return;
2187 		}
2188 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189 		tmr = &stcb->asoc.delete_prim_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2192 		if (stcb == NULL) {
2193 			return;
2194 		}
2195 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2196 			/*
2197 			 * Really an error since stcb is NOT set to
2198 			 * autoclose
2199 			 */
2200 			return;
2201 		}
2202 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2203 		tmr = &stcb->asoc.autoclose_timer;
2204 		break;
2205 	default:
2206 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2207 		    __func__, t_type);
2208 		return;
2209 		break;
2210 	}
2211 	if ((to_ticks <= 0) || (tmr == NULL)) {
2212 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2213 		    __func__, t_type, to_ticks, (void *)tmr);
2214 		return;
2215 	}
2216 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2217 		/*
2218 		 * we do NOT allow you to have it already running. if it is
2219 		 * we leave the current one up unchanged
2220 		 */
2221 		return;
2222 	}
2223 	/* At this point we can proceed */
2224 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2225 		stcb->asoc.num_send_timers_up++;
2226 	}
2227 	tmr->stopped_from = 0;
2228 	tmr->type = t_type;
2229 	tmr->ep = (void *)inp;
2230 	tmr->tcb = (void *)stcb;
2231 	tmr->net = (void *)net;
2232 	tmr->self = (void *)tmr;
2233 	tmr->vnet = (void *)curvnet;
2234 	tmr->ticks = sctp_get_tick_count();
2235 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236 	return;
2237 }
2238 
2239 void
2240 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241     struct sctp_nets *net, uint32_t from)
2242 {
2243 	struct sctp_timer *tmr;
2244 
2245 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246 	    (inp == NULL))
2247 		return;
2248 
2249 	tmr = NULL;
2250 	if (stcb) {
2251 		SCTP_TCB_LOCK_ASSERT(stcb);
2252 	}
2253 	switch (t_type) {
2254 	case SCTP_TIMER_TYPE_ZERO_COPY:
2255 		tmr = &inp->sctp_ep.zero_copy_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ADDR_WQ:
2261 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262 		break;
2263 	case SCTP_TIMER_TYPE_SEND:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_INIT:
2270 		if ((stcb == NULL) || (net == NULL)) {
2271 			return;
2272 		}
2273 		tmr = &net->rxt_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_RECV:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.dack_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_SHUTDOWN:
2282 		if ((stcb == NULL) || (net == NULL)) {
2283 			return;
2284 		}
2285 		tmr = &net->rxt_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_HEARTBEAT:
2288 		if ((stcb == NULL) || (net == NULL)) {
2289 			return;
2290 		}
2291 		tmr = &net->hb_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_COOKIE:
2294 		if ((stcb == NULL) || (net == NULL)) {
2295 			return;
2296 		}
2297 		tmr = &net->rxt_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2300 		/* nothing needed but the endpoint here */
2301 		tmr = &inp->sctp_ep.signature_change;
2302 		/*
2303 		 * We re-use the newcookie timer for the INP kill timer. We
2304 		 * must assure that we do not kill it by accident.
2305 		 */
2306 		break;
2307 	case SCTP_TIMER_TYPE_ASOCKILL:
2308 		/*
2309 		 * Stop the asoc kill timer.
2310 		 */
2311 		if (stcb == NULL) {
2312 			return;
2313 		}
2314 		tmr = &stcb->asoc.strreset_timer;
2315 		break;
2316 
2317 	case SCTP_TIMER_TYPE_INPKILL:
2318 		/*
2319 		 * The inp is setup to die. We re-use the signature_chage
2320 		 * timer since that has stopped and we are in the GONE
2321 		 * state.
2322 		 */
2323 		tmr = &inp->sctp_ep.signature_change;
2324 		break;
2325 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2326 		if ((stcb == NULL) || (net == NULL)) {
2327 			return;
2328 		}
2329 		tmr = &net->pmtu_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2332 		if ((stcb == NULL) || (net == NULL)) {
2333 			return;
2334 		}
2335 		tmr = &net->rxt_timer;
2336 		break;
2337 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.shut_guard_timer;
2342 		break;
2343 	case SCTP_TIMER_TYPE_STRRESET:
2344 		if (stcb == NULL) {
2345 			return;
2346 		}
2347 		tmr = &stcb->asoc.strreset_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_ASCONF:
2350 		if (stcb == NULL) {
2351 			return;
2352 		}
2353 		tmr = &stcb->asoc.asconf_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2356 		if (stcb == NULL) {
2357 			return;
2358 		}
2359 		tmr = &stcb->asoc.delete_prim_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.autoclose_timer;
2366 		break;
2367 	default:
2368 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2369 		    __func__, t_type);
2370 		break;
2371 	}
2372 	if (tmr == NULL) {
2373 		return;
2374 	}
2375 	if ((tmr->type != t_type) && tmr->type) {
2376 		/*
2377 		 * Ok we have a timer that is under joint use. Cookie timer
2378 		 * per chance with the SEND timer. We therefore are NOT
2379 		 * running the timer that the caller wants stopped.  So just
2380 		 * return.
2381 		 */
2382 		return;
2383 	}
2384 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2385 		stcb->asoc.num_send_timers_up--;
2386 		if (stcb->asoc.num_send_timers_up < 0) {
2387 			stcb->asoc.num_send_timers_up = 0;
2388 		}
2389 	}
2390 	tmr->self = NULL;
2391 	tmr->stopped_from = from;
2392 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2393 	return;
2394 }
2395 
2396 uint32_t
2397 sctp_calculate_len(struct mbuf *m)
2398 {
2399 	uint32_t tlen = 0;
2400 	struct mbuf *at;
2401 
2402 	at = m;
2403 	while (at) {
2404 		tlen += SCTP_BUF_LEN(at);
2405 		at = SCTP_BUF_NEXT(at);
2406 	}
2407 	return (tlen);
2408 }
2409 
2410 void
2411 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2412     struct sctp_association *asoc, uint32_t mtu)
2413 {
2414 	/*
2415 	 * Reset the P-MTU size on this association, this involves changing
2416 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2417 	 * allow the DF flag to be cleared.
2418 	 */
2419 	struct sctp_tmit_chunk *chk;
2420 	unsigned int eff_mtu, ovh;
2421 
2422 	asoc->smallest_mtu = mtu;
2423 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2424 		ovh = SCTP_MIN_OVERHEAD;
2425 	} else {
2426 		ovh = SCTP_MIN_V4_OVERHEAD;
2427 	}
2428 	eff_mtu = mtu - ovh;
2429 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2430 		if (chk->send_size > eff_mtu) {
2431 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432 		}
2433 	}
2434 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2435 		if (chk->send_size > eff_mtu) {
2436 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2437 		}
2438 	}
2439 }
2440 
2441 
2442 /*
2443  * given an association and starting time of the current RTT period return
2444  * RTO in number of msecs net should point to the current network
2445  */
2446 
2447 uint32_t
2448 sctp_calculate_rto(struct sctp_tcb *stcb,
2449     struct sctp_association *asoc,
2450     struct sctp_nets *net,
2451     struct timeval *told,
2452     int safe, int rtt_from_sack)
2453 {
2454 	/*-
2455 	 * given an association and the starting time of the current RTT
2456 	 * period (in value1/value2) return RTO in number of msecs.
2457 	 */
2458 	int32_t rtt;		/* RTT in ms */
2459 	uint32_t new_rto;
2460 	int first_measure = 0;
2461 	struct timeval now, then, *old;
2462 
2463 	/* Copy it out for sparc64 */
2464 	if (safe == sctp_align_unsafe_makecopy) {
2465 		old = &then;
2466 		memcpy(&then, told, sizeof(struct timeval));
2467 	} else if (safe == sctp_align_safe_nocopy) {
2468 		old = told;
2469 	} else {
2470 		/* error */
2471 		SCTP_PRINTF("Huh, bad rto calc call\n");
2472 		return (0);
2473 	}
2474 	/************************/
2475 	/* 1. calculate new RTT */
2476 	/************************/
2477 	/* get the current time */
2478 	if (stcb->asoc.use_precise_time) {
2479 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2480 	} else {
2481 		(void)SCTP_GETTIME_TIMEVAL(&now);
2482 	}
2483 	timevalsub(&now, old);
2484 	/* store the current RTT in us */
2485 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2486 	        (uint64_t) now.tv_usec;
2487 
2488 	/* compute rtt in ms */
2489 	rtt = (int32_t) (net->rtt / 1000);
2490 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2491 		/*
2492 		 * Tell the CC module that a new update has just occurred
2493 		 * from a sack
2494 		 */
2495 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2496 	}
2497 	/*
2498 	 * Do we need to determine the lan? We do this only on sacks i.e.
2499 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2500 	 */
2501 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2502 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2503 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2504 			net->lan_type = SCTP_LAN_INTERNET;
2505 		} else {
2506 			net->lan_type = SCTP_LAN_LOCAL;
2507 		}
2508 	}
2509 	/***************************/
2510 	/* 2. update RTTVAR & SRTT */
2511 	/***************************/
2512 	/*-
2513 	 * Compute the scaled average lastsa and the
2514 	 * scaled variance lastsv as described in van Jacobson
2515 	 * Paper "Congestion Avoidance and Control", Annex A.
2516 	 *
2517 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2518 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2519 	 */
2520 	if (net->RTO_measured) {
2521 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2522 		net->lastsa += rtt;
2523 		if (rtt < 0) {
2524 			rtt = -rtt;
2525 		}
2526 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2527 		net->lastsv += rtt;
2528 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2529 			rto_logging(net, SCTP_LOG_RTTVAR);
2530 		}
2531 	} else {
2532 		/* First RTO measurment */
2533 		net->RTO_measured = 1;
2534 		first_measure = 1;
2535 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2536 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2538 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2539 		}
2540 	}
2541 	if (net->lastsv == 0) {
2542 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2543 	}
2544 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2545 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2546 	    (stcb->asoc.sat_network_lockout == 0)) {
2547 		stcb->asoc.sat_network = 1;
2548 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2549 		stcb->asoc.sat_network = 0;
2550 		stcb->asoc.sat_network_lockout = 1;
2551 	}
2552 	/* bound it, per C6/C7 in Section 5.3.1 */
2553 	if (new_rto < stcb->asoc.minrto) {
2554 		new_rto = stcb->asoc.minrto;
2555 	}
2556 	if (new_rto > stcb->asoc.maxrto) {
2557 		new_rto = stcb->asoc.maxrto;
2558 	}
2559 	/* we are now returning the RTO */
2560 	return (new_rto);
2561 }
2562 
2563 /*
2564  * return a pointer to a contiguous piece of data from the given mbuf chain
2565  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2566  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2567  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2568  */
2569 caddr_t
2570 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2571 {
2572 	uint32_t count;
2573 	uint8_t *ptr;
2574 
2575 	ptr = in_ptr;
2576 	if ((off < 0) || (len <= 0))
2577 		return (NULL);
2578 
2579 	/* find the desired start location */
2580 	while ((m != NULL) && (off > 0)) {
2581 		if (off < SCTP_BUF_LEN(m))
2582 			break;
2583 		off -= SCTP_BUF_LEN(m);
2584 		m = SCTP_BUF_NEXT(m);
2585 	}
2586 	if (m == NULL)
2587 		return (NULL);
2588 
2589 	/* is the current mbuf large enough (eg. contiguous)? */
2590 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2591 		return (mtod(m, caddr_t)+off);
2592 	} else {
2593 		/* else, it spans more than one mbuf, so save a temp copy... */
2594 		while ((m != NULL) && (len > 0)) {
2595 			count = min(SCTP_BUF_LEN(m) - off, len);
2596 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2597 			len -= count;
2598 			ptr += count;
2599 			off = 0;
2600 			m = SCTP_BUF_NEXT(m);
2601 		}
2602 		if ((m == NULL) && (len > 0))
2603 			return (NULL);
2604 		else
2605 			return ((caddr_t)in_ptr);
2606 	}
2607 }
2608 
2609 
2610 
2611 struct sctp_paramhdr *
2612 sctp_get_next_param(struct mbuf *m,
2613     int offset,
2614     struct sctp_paramhdr *pull,
2615     int pull_limit)
2616 {
2617 	/* This just provides a typed signature to Peter's Pull routine */
2618 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2619 	    (uint8_t *) pull));
2620 }
2621 
2622 
2623 struct mbuf *
2624 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2625 {
2626 	struct mbuf *m_last;
2627 	caddr_t dp;
2628 
2629 	if (padlen > 3) {
2630 		return (NULL);
2631 	}
2632 	if (padlen <= M_TRAILINGSPACE(m)) {
2633 		/*
2634 		 * The easy way. We hope the majority of the time we hit
2635 		 * here :)
2636 		 */
2637 		m_last = m;
2638 	} else {
2639 		/* Hard way we must grow the mbuf chain */
2640 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2641 		if (m_last == NULL) {
2642 			return (NULL);
2643 		}
2644 		SCTP_BUF_LEN(m_last) = 0;
2645 		SCTP_BUF_NEXT(m_last) = NULL;
2646 		SCTP_BUF_NEXT(m) = m_last;
2647 	}
2648 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2649 	SCTP_BUF_LEN(m_last) += padlen;
2650 	memset(dp, 0, padlen);
2651 	return (m_last);
2652 }
2653 
2654 struct mbuf *
2655 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2656 {
2657 	/* find the last mbuf in chain and pad it */
2658 	struct mbuf *m_at;
2659 
2660 	if (last_mbuf != NULL) {
2661 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2662 	} else {
2663 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2664 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2665 				return (sctp_add_pad_tombuf(m_at, padval));
2666 			}
2667 		}
2668 	}
2669 	return (NULL);
2670 }
2671 
2672 static void
2673 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2674     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2675 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2676     SCTP_UNUSED
2677 #endif
2678 )
2679 {
2680 	struct mbuf *m_notify;
2681 	struct sctp_assoc_change *sac;
2682 	struct sctp_queued_to_read *control;
2683 	unsigned int notif_len;
2684 	uint16_t abort_len;
2685 	unsigned int i;
2686 
2687 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2688 	struct socket *so;
2689 
2690 #endif
2691 
2692 	if (stcb == NULL) {
2693 		return;
2694 	}
2695 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2696 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2697 		if (abort != NULL) {
2698 			abort_len = ntohs(abort->ch.chunk_length);
2699 		} else {
2700 			abort_len = 0;
2701 		}
2702 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2703 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2704 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2705 			notif_len += abort_len;
2706 		}
2707 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2708 		if (m_notify == NULL) {
2709 			/* Retry with smaller value. */
2710 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2711 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2712 			if (m_notify == NULL) {
2713 				goto set_error;
2714 			}
2715 		}
2716 		SCTP_BUF_NEXT(m_notify) = NULL;
2717 		sac = mtod(m_notify, struct sctp_assoc_change *);
2718 		memset(sac, 0, notif_len);
2719 		sac->sac_type = SCTP_ASSOC_CHANGE;
2720 		sac->sac_flags = 0;
2721 		sac->sac_length = sizeof(struct sctp_assoc_change);
2722 		sac->sac_state = state;
2723 		sac->sac_error = error;
2724 		/* XXX verify these stream counts */
2725 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2726 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2727 		sac->sac_assoc_id = sctp_get_associd(stcb);
2728 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2729 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2730 				i = 0;
2731 				if (stcb->asoc.prsctp_supported == 1) {
2732 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2733 				}
2734 				if (stcb->asoc.auth_supported == 1) {
2735 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2736 				}
2737 				if (stcb->asoc.asconf_supported == 1) {
2738 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2739 				}
2740 				if (stcb->asoc.idata_supported == 1) {
2741 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2742 				}
2743 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2744 				if (stcb->asoc.reconfig_supported == 1) {
2745 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2746 				}
2747 				sac->sac_length += i;
2748 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2749 				memcpy(sac->sac_info, abort, abort_len);
2750 				sac->sac_length += abort_len;
2751 			}
2752 		}
2753 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2754 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2755 		    0, 0, stcb->asoc.context, 0, 0, 0,
2756 		    m_notify);
2757 		if (control != NULL) {
2758 			control->length = SCTP_BUF_LEN(m_notify);
2759 			/* not that we need this */
2760 			control->tail_mbuf = m_notify;
2761 			control->spec_flags = M_NOTIFICATION;
2762 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2763 			    control,
2764 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2765 			    so_locked);
2766 		} else {
2767 			sctp_m_freem(m_notify);
2768 		}
2769 	}
2770 	/*
2771 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2772 	 * comes in.
2773 	 */
2774 set_error:
2775 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2776 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2777 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2778 		SOCK_LOCK(stcb->sctp_socket);
2779 		if (from_peer) {
2780 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2781 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2782 				stcb->sctp_socket->so_error = ECONNREFUSED;
2783 			} else {
2784 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2785 				stcb->sctp_socket->so_error = ECONNRESET;
2786 			}
2787 		} else {
2788 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2789 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2790 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2791 				stcb->sctp_socket->so_error = ETIMEDOUT;
2792 			} else {
2793 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2794 				stcb->sctp_socket->so_error = ECONNABORTED;
2795 			}
2796 		}
2797 	}
2798 	/* Wake ANY sleepers */
2799 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2800 	so = SCTP_INP_SO(stcb->sctp_ep);
2801 	if (!so_locked) {
2802 		atomic_add_int(&stcb->asoc.refcnt, 1);
2803 		SCTP_TCB_UNLOCK(stcb);
2804 		SCTP_SOCKET_LOCK(so, 1);
2805 		SCTP_TCB_LOCK(stcb);
2806 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2807 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2808 			SCTP_SOCKET_UNLOCK(so, 1);
2809 			return;
2810 		}
2811 	}
2812 #endif
2813 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2814 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2815 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2816 		socantrcvmore_locked(stcb->sctp_socket);
2817 	}
2818 	sorwakeup(stcb->sctp_socket);
2819 	sowwakeup(stcb->sctp_socket);
2820 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2821 	if (!so_locked) {
2822 		SCTP_SOCKET_UNLOCK(so, 1);
2823 	}
2824 #endif
2825 }
2826 
2827 static void
2828 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2829     struct sockaddr *sa, uint32_t error, int so_locked
2830 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2831     SCTP_UNUSED
2832 #endif
2833 )
2834 {
2835 	struct mbuf *m_notify;
2836 	struct sctp_paddr_change *spc;
2837 	struct sctp_queued_to_read *control;
2838 
2839 	if ((stcb == NULL) ||
2840 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2841 		/* event not enabled */
2842 		return;
2843 	}
2844 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2845 	if (m_notify == NULL)
2846 		return;
2847 	SCTP_BUF_LEN(m_notify) = 0;
2848 	spc = mtod(m_notify, struct sctp_paddr_change *);
2849 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2850 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2851 	spc->spc_flags = 0;
2852 	spc->spc_length = sizeof(struct sctp_paddr_change);
2853 	switch (sa->sa_family) {
2854 #ifdef INET
2855 	case AF_INET:
2856 #ifdef INET6
2857 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2858 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2859 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2860 		} else {
2861 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2862 		}
2863 #else
2864 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2865 #endif
2866 		break;
2867 #endif
2868 #ifdef INET6
2869 	case AF_INET6:
2870 		{
2871 			struct sockaddr_in6 *sin6;
2872 
2873 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2874 
2875 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2876 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2877 				if (sin6->sin6_scope_id == 0) {
2878 					/* recover scope_id for user */
2879 					(void)sa6_recoverscope(sin6);
2880 				} else {
2881 					/* clear embedded scope_id for user */
2882 					in6_clearscope(&sin6->sin6_addr);
2883 				}
2884 			}
2885 			break;
2886 		}
2887 #endif
2888 	default:
2889 		/* TSNH */
2890 		break;
2891 	}
2892 	spc->spc_state = state;
2893 	spc->spc_error = error;
2894 	spc->spc_assoc_id = sctp_get_associd(stcb);
2895 
2896 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2897 	SCTP_BUF_NEXT(m_notify) = NULL;
2898 
2899 	/* append to socket */
2900 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2901 	    0, 0, stcb->asoc.context, 0, 0, 0,
2902 	    m_notify);
2903 	if (control == NULL) {
2904 		/* no memory */
2905 		sctp_m_freem(m_notify);
2906 		return;
2907 	}
2908 	control->length = SCTP_BUF_LEN(m_notify);
2909 	control->spec_flags = M_NOTIFICATION;
2910 	/* not that we need this */
2911 	control->tail_mbuf = m_notify;
2912 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2913 	    control,
2914 	    &stcb->sctp_socket->so_rcv, 1,
2915 	    SCTP_READ_LOCK_NOT_HELD,
2916 	    so_locked);
2917 }
2918 
2919 
2920 static void
2921 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2922     struct sctp_tmit_chunk *chk, int so_locked
2923 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2924     SCTP_UNUSED
2925 #endif
2926 )
2927 {
2928 	struct mbuf *m_notify;
2929 	struct sctp_send_failed *ssf;
2930 	struct sctp_send_failed_event *ssfe;
2931 	struct sctp_queued_to_read *control;
2932 	int length;
2933 
2934 	if ((stcb == NULL) ||
2935 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2936 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2937 		/* event not enabled */
2938 		return;
2939 	}
2940 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2941 		length = sizeof(struct sctp_send_failed_event);
2942 	} else {
2943 		length = sizeof(struct sctp_send_failed);
2944 	}
2945 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2946 	if (m_notify == NULL)
2947 		/* no space left */
2948 		return;
2949 	SCTP_BUF_LEN(m_notify) = 0;
2950 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2951 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2952 		memset(ssfe, 0, length);
2953 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2954 		if (sent) {
2955 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2956 		} else {
2957 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2958 		}
2959 		length += chk->send_size;
2960 		length -= sizeof(struct sctp_data_chunk);
2961 		ssfe->ssfe_length = length;
2962 		ssfe->ssfe_error = error;
2963 		/* not exactly what the user sent in, but should be close :) */
2964 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2965 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2966 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2967 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2968 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2969 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2970 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2971 	} else {
2972 		ssf = mtod(m_notify, struct sctp_send_failed *);
2973 		memset(ssf, 0, length);
2974 		ssf->ssf_type = SCTP_SEND_FAILED;
2975 		if (sent) {
2976 			ssf->ssf_flags = SCTP_DATA_SENT;
2977 		} else {
2978 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2979 		}
2980 		length += chk->send_size;
2981 		length -= sizeof(struct sctp_data_chunk);
2982 		ssf->ssf_length = length;
2983 		ssf->ssf_error = error;
2984 		/* not exactly what the user sent in, but should be close :) */
2985 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2986 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2987 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2988 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2989 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2990 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2991 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2992 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2993 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2994 	}
2995 	if (chk->data) {
2996 		/*
2997 		 * trim off the sctp chunk header(it should be there)
2998 		 */
2999 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3000 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3001 			sctp_mbuf_crush(chk->data);
3002 			chk->send_size -= sizeof(struct sctp_data_chunk);
3003 		}
3004 	}
3005 	SCTP_BUF_NEXT(m_notify) = chk->data;
3006 	/* Steal off the mbuf */
3007 	chk->data = NULL;
3008 	/*
3009 	 * For this case, we check the actual socket buffer, since the assoc
3010 	 * is going away we don't want to overfill the socket buffer for a
3011 	 * non-reader
3012 	 */
3013 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3014 		sctp_m_freem(m_notify);
3015 		return;
3016 	}
3017 	/* append to socket */
3018 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3019 	    0, 0, stcb->asoc.context, 0, 0, 0,
3020 	    m_notify);
3021 	if (control == NULL) {
3022 		/* no memory */
3023 		sctp_m_freem(m_notify);
3024 		return;
3025 	}
3026 	control->spec_flags = M_NOTIFICATION;
3027 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3028 	    control,
3029 	    &stcb->sctp_socket->so_rcv, 1,
3030 	    SCTP_READ_LOCK_NOT_HELD,
3031 	    so_locked);
3032 }
3033 
3034 
3035 static void
3036 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3037     struct sctp_stream_queue_pending *sp, int so_locked
3038 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3039     SCTP_UNUSED
3040 #endif
3041 )
3042 {
3043 	struct mbuf *m_notify;
3044 	struct sctp_send_failed *ssf;
3045 	struct sctp_send_failed_event *ssfe;
3046 	struct sctp_queued_to_read *control;
3047 	int length;
3048 
3049 	if ((stcb == NULL) ||
3050 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3051 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3052 		/* event not enabled */
3053 		return;
3054 	}
3055 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3056 		length = sizeof(struct sctp_send_failed_event);
3057 	} else {
3058 		length = sizeof(struct sctp_send_failed);
3059 	}
3060 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3061 	if (m_notify == NULL) {
3062 		/* no space left */
3063 		return;
3064 	}
3065 	SCTP_BUF_LEN(m_notify) = 0;
3066 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3067 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3068 		memset(ssfe, 0, length);
3069 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3070 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3071 		length += sp->length;
3072 		ssfe->ssfe_length = length;
3073 		ssfe->ssfe_error = error;
3074 		/* not exactly what the user sent in, but should be close :) */
3075 		ssfe->ssfe_info.snd_sid = sp->stream;
3076 		if (sp->some_taken) {
3077 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3078 		} else {
3079 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3080 		}
3081 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3082 		ssfe->ssfe_info.snd_context = sp->context;
3083 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3084 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3085 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3086 	} else {
3087 		ssf = mtod(m_notify, struct sctp_send_failed *);
3088 		memset(ssf, 0, length);
3089 		ssf->ssf_type = SCTP_SEND_FAILED;
3090 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3091 		length += sp->length;
3092 		ssf->ssf_length = length;
3093 		ssf->ssf_error = error;
3094 		/* not exactly what the user sent in, but should be close :) */
3095 		ssf->ssf_info.sinfo_stream = sp->stream;
3096 		ssf->ssf_info.sinfo_ssn = 0;
3097 		if (sp->some_taken) {
3098 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3099 		} else {
3100 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3101 		}
3102 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3103 		ssf->ssf_info.sinfo_context = sp->context;
3104 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3105 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3106 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3107 	}
3108 	SCTP_BUF_NEXT(m_notify) = sp->data;
3109 
3110 	/* Steal off the mbuf */
3111 	sp->data = NULL;
3112 	/*
3113 	 * For this case, we check the actual socket buffer, since the assoc
3114 	 * is going away we don't want to overfill the socket buffer for a
3115 	 * non-reader
3116 	 */
3117 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3118 		sctp_m_freem(m_notify);
3119 		return;
3120 	}
3121 	/* append to socket */
3122 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3123 	    0, 0, stcb->asoc.context, 0, 0, 0,
3124 	    m_notify);
3125 	if (control == NULL) {
3126 		/* no memory */
3127 		sctp_m_freem(m_notify);
3128 		return;
3129 	}
3130 	control->spec_flags = M_NOTIFICATION;
3131 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3132 	    control,
3133 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3134 }
3135 
3136 
3137 
3138 static void
3139 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3140 {
3141 	struct mbuf *m_notify;
3142 	struct sctp_adaptation_event *sai;
3143 	struct sctp_queued_to_read *control;
3144 
3145 	if ((stcb == NULL) ||
3146 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3147 		/* event not enabled */
3148 		return;
3149 	}
3150 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3151 	if (m_notify == NULL)
3152 		/* no space left */
3153 		return;
3154 	SCTP_BUF_LEN(m_notify) = 0;
3155 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3156 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3157 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3158 	sai->sai_flags = 0;
3159 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3160 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3161 	sai->sai_assoc_id = sctp_get_associd(stcb);
3162 
3163 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3164 	SCTP_BUF_NEXT(m_notify) = NULL;
3165 
3166 	/* append to socket */
3167 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3168 	    0, 0, stcb->asoc.context, 0, 0, 0,
3169 	    m_notify);
3170 	if (control == NULL) {
3171 		/* no memory */
3172 		sctp_m_freem(m_notify);
3173 		return;
3174 	}
3175 	control->length = SCTP_BUF_LEN(m_notify);
3176 	control->spec_flags = M_NOTIFICATION;
3177 	/* not that we need this */
3178 	control->tail_mbuf = m_notify;
3179 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3180 	    control,
3181 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3182 }
3183 
3184 /* This always must be called with the read-queue LOCKED in the INP */
3185 static void
3186 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3187     uint32_t val, int so_locked
3188 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3189     SCTP_UNUSED
3190 #endif
3191 )
3192 {
3193 	struct mbuf *m_notify;
3194 	struct sctp_pdapi_event *pdapi;
3195 	struct sctp_queued_to_read *control;
3196 	struct sockbuf *sb;
3197 
3198 	if ((stcb == NULL) ||
3199 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3200 		/* event not enabled */
3201 		return;
3202 	}
3203 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3204 		return;
3205 	}
3206 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3207 	if (m_notify == NULL)
3208 		/* no space left */
3209 		return;
3210 	SCTP_BUF_LEN(m_notify) = 0;
3211 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3212 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3213 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3214 	pdapi->pdapi_flags = 0;
3215 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3216 	pdapi->pdapi_indication = error;
3217 	pdapi->pdapi_stream = (val >> 16);
3218 	pdapi->pdapi_seq = (val & 0x0000ffff);
3219 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3220 
3221 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3222 	SCTP_BUF_NEXT(m_notify) = NULL;
3223 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3224 	    0, 0, stcb->asoc.context, 0, 0, 0,
3225 	    m_notify);
3226 	if (control == NULL) {
3227 		/* no memory */
3228 		sctp_m_freem(m_notify);
3229 		return;
3230 	}
3231 	control->spec_flags = M_NOTIFICATION;
3232 	control->length = SCTP_BUF_LEN(m_notify);
3233 	/* not that we need this */
3234 	control->tail_mbuf = m_notify;
3235 	control->held_length = 0;
3236 	control->length = 0;
3237 	sb = &stcb->sctp_socket->so_rcv;
3238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3239 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3240 	}
3241 	sctp_sballoc(stcb, sb, m_notify);
3242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3243 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3244 	}
3245 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3246 	control->end_added = 1;
3247 	if (stcb->asoc.control_pdapi)
3248 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3249 	else {
3250 		/* we really should not see this case */
3251 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3252 	}
3253 	if (stcb->sctp_ep && stcb->sctp_socket) {
3254 		/* This should always be the case */
3255 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3256 		struct socket *so;
3257 
3258 		so = SCTP_INP_SO(stcb->sctp_ep);
3259 		if (!so_locked) {
3260 			atomic_add_int(&stcb->asoc.refcnt, 1);
3261 			SCTP_TCB_UNLOCK(stcb);
3262 			SCTP_SOCKET_LOCK(so, 1);
3263 			SCTP_TCB_LOCK(stcb);
3264 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3265 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3266 				SCTP_SOCKET_UNLOCK(so, 1);
3267 				return;
3268 			}
3269 		}
3270 #endif
3271 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3272 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3273 		if (!so_locked) {
3274 			SCTP_SOCKET_UNLOCK(so, 1);
3275 		}
3276 #endif
3277 	}
3278 }
3279 
3280 static void
3281 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3282 {
3283 	struct mbuf *m_notify;
3284 	struct sctp_shutdown_event *sse;
3285 	struct sctp_queued_to_read *control;
3286 
3287 	/*
3288 	 * For TCP model AND UDP connected sockets we will send an error up
3289 	 * when an SHUTDOWN completes
3290 	 */
3291 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3292 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3293 		/* mark socket closed for read/write and wakeup! */
3294 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3295 		struct socket *so;
3296 
3297 		so = SCTP_INP_SO(stcb->sctp_ep);
3298 		atomic_add_int(&stcb->asoc.refcnt, 1);
3299 		SCTP_TCB_UNLOCK(stcb);
3300 		SCTP_SOCKET_LOCK(so, 1);
3301 		SCTP_TCB_LOCK(stcb);
3302 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3303 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3304 			SCTP_SOCKET_UNLOCK(so, 1);
3305 			return;
3306 		}
3307 #endif
3308 		socantsendmore(stcb->sctp_socket);
3309 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3310 		SCTP_SOCKET_UNLOCK(so, 1);
3311 #endif
3312 	}
3313 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3314 		/* event not enabled */
3315 		return;
3316 	}
3317 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3318 	if (m_notify == NULL)
3319 		/* no space left */
3320 		return;
3321 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3322 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3323 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3324 	sse->sse_flags = 0;
3325 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3326 	sse->sse_assoc_id = sctp_get_associd(stcb);
3327 
3328 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3329 	SCTP_BUF_NEXT(m_notify) = NULL;
3330 
3331 	/* append to socket */
3332 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3333 	    0, 0, stcb->asoc.context, 0, 0, 0,
3334 	    m_notify);
3335 	if (control == NULL) {
3336 		/* no memory */
3337 		sctp_m_freem(m_notify);
3338 		return;
3339 	}
3340 	control->spec_flags = M_NOTIFICATION;
3341 	control->length = SCTP_BUF_LEN(m_notify);
3342 	/* not that we need this */
3343 	control->tail_mbuf = m_notify;
3344 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3345 	    control,
3346 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3347 }
3348 
3349 static void
3350 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3351     int so_locked
3352 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3353     SCTP_UNUSED
3354 #endif
3355 )
3356 {
3357 	struct mbuf *m_notify;
3358 	struct sctp_sender_dry_event *event;
3359 	struct sctp_queued_to_read *control;
3360 
3361 	if ((stcb == NULL) ||
3362 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3363 		/* event not enabled */
3364 		return;
3365 	}
3366 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3367 	if (m_notify == NULL) {
3368 		/* no space left */
3369 		return;
3370 	}
3371 	SCTP_BUF_LEN(m_notify) = 0;
3372 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3373 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3374 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3375 	event->sender_dry_flags = 0;
3376 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3377 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3378 
3379 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3380 	SCTP_BUF_NEXT(m_notify) = NULL;
3381 
3382 	/* append to socket */
3383 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3384 	    0, 0, stcb->asoc.context, 0, 0, 0,
3385 	    m_notify);
3386 	if (control == NULL) {
3387 		/* no memory */
3388 		sctp_m_freem(m_notify);
3389 		return;
3390 	}
3391 	control->length = SCTP_BUF_LEN(m_notify);
3392 	control->spec_flags = M_NOTIFICATION;
3393 	/* not that we need this */
3394 	control->tail_mbuf = m_notify;
3395 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3396 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3397 }
3398 
3399 
3400 void
3401 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3402 {
3403 	struct mbuf *m_notify;
3404 	struct sctp_queued_to_read *control;
3405 	struct sctp_stream_change_event *stradd;
3406 
3407 	if ((stcb == NULL) ||
3408 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3409 		/* event not enabled */
3410 		return;
3411 	}
3412 	if ((stcb->asoc.peer_req_out) && flag) {
3413 		/* Peer made the request, don't tell the local user */
3414 		stcb->asoc.peer_req_out = 0;
3415 		return;
3416 	}
3417 	stcb->asoc.peer_req_out = 0;
3418 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3419 	if (m_notify == NULL)
3420 		/* no space left */
3421 		return;
3422 	SCTP_BUF_LEN(m_notify) = 0;
3423 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3424 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3425 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3426 	stradd->strchange_flags = flag;
3427 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3428 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3429 	stradd->strchange_instrms = numberin;
3430 	stradd->strchange_outstrms = numberout;
3431 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3432 	SCTP_BUF_NEXT(m_notify) = NULL;
3433 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3434 		/* no space */
3435 		sctp_m_freem(m_notify);
3436 		return;
3437 	}
3438 	/* append to socket */
3439 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3440 	    0, 0, stcb->asoc.context, 0, 0, 0,
3441 	    m_notify);
3442 	if (control == NULL) {
3443 		/* no memory */
3444 		sctp_m_freem(m_notify);
3445 		return;
3446 	}
3447 	control->spec_flags = M_NOTIFICATION;
3448 	control->length = SCTP_BUF_LEN(m_notify);
3449 	/* not that we need this */
3450 	control->tail_mbuf = m_notify;
3451 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3452 	    control,
3453 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3454 }
3455 
3456 void
3457 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3458 {
3459 	struct mbuf *m_notify;
3460 	struct sctp_queued_to_read *control;
3461 	struct sctp_assoc_reset_event *strasoc;
3462 
3463 	if ((stcb == NULL) ||
3464 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3465 		/* event not enabled */
3466 		return;
3467 	}
3468 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3469 	if (m_notify == NULL)
3470 		/* no space left */
3471 		return;
3472 	SCTP_BUF_LEN(m_notify) = 0;
3473 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3474 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3475 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3476 	strasoc->assocreset_flags = flag;
3477 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3478 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3479 	strasoc->assocreset_local_tsn = sending_tsn;
3480 	strasoc->assocreset_remote_tsn = recv_tsn;
3481 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3482 	SCTP_BUF_NEXT(m_notify) = NULL;
3483 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3484 		/* no space */
3485 		sctp_m_freem(m_notify);
3486 		return;
3487 	}
3488 	/* append to socket */
3489 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3490 	    0, 0, stcb->asoc.context, 0, 0, 0,
3491 	    m_notify);
3492 	if (control == NULL) {
3493 		/* no memory */
3494 		sctp_m_freem(m_notify);
3495 		return;
3496 	}
3497 	control->spec_flags = M_NOTIFICATION;
3498 	control->length = SCTP_BUF_LEN(m_notify);
3499 	/* not that we need this */
3500 	control->tail_mbuf = m_notify;
3501 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3502 	    control,
3503 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3504 }
3505 
3506 
3507 
3508 static void
3509 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3510     int number_entries, uint16_t * list, int flag)
3511 {
3512 	struct mbuf *m_notify;
3513 	struct sctp_queued_to_read *control;
3514 	struct sctp_stream_reset_event *strreset;
3515 	int len;
3516 
3517 	if ((stcb == NULL) ||
3518 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3519 		/* event not enabled */
3520 		return;
3521 	}
3522 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3523 	if (m_notify == NULL)
3524 		/* no space left */
3525 		return;
3526 	SCTP_BUF_LEN(m_notify) = 0;
3527 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3528 	if (len > M_TRAILINGSPACE(m_notify)) {
3529 		/* never enough room */
3530 		sctp_m_freem(m_notify);
3531 		return;
3532 	}
3533 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3534 	memset(strreset, 0, len);
3535 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3536 	strreset->strreset_flags = flag;
3537 	strreset->strreset_length = len;
3538 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3539 	if (number_entries) {
3540 		int i;
3541 
3542 		for (i = 0; i < number_entries; i++) {
3543 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3544 		}
3545 	}
3546 	SCTP_BUF_LEN(m_notify) = len;
3547 	SCTP_BUF_NEXT(m_notify) = NULL;
3548 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3549 		/* no space */
3550 		sctp_m_freem(m_notify);
3551 		return;
3552 	}
3553 	/* append to socket */
3554 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3555 	    0, 0, stcb->asoc.context, 0, 0, 0,
3556 	    m_notify);
3557 	if (control == NULL) {
3558 		/* no memory */
3559 		sctp_m_freem(m_notify);
3560 		return;
3561 	}
3562 	control->spec_flags = M_NOTIFICATION;
3563 	control->length = SCTP_BUF_LEN(m_notify);
3564 	/* not that we need this */
3565 	control->tail_mbuf = m_notify;
3566 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3567 	    control,
3568 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3569 }
3570 
3571 
3572 static void
3573 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3574 {
3575 	struct mbuf *m_notify;
3576 	struct sctp_remote_error *sre;
3577 	struct sctp_queued_to_read *control;
3578 	unsigned int notif_len;
3579 	uint16_t chunk_len;
3580 
3581 	if ((stcb == NULL) ||
3582 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3583 		return;
3584 	}
3585 	if (chunk != NULL) {
3586 		chunk_len = ntohs(chunk->ch.chunk_length);
3587 	} else {
3588 		chunk_len = 0;
3589 	}
3590 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3591 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3592 	if (m_notify == NULL) {
3593 		/* Retry with smaller value. */
3594 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3595 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3596 		if (m_notify == NULL) {
3597 			return;
3598 		}
3599 	}
3600 	SCTP_BUF_NEXT(m_notify) = NULL;
3601 	sre = mtod(m_notify, struct sctp_remote_error *);
3602 	memset(sre, 0, notif_len);
3603 	sre->sre_type = SCTP_REMOTE_ERROR;
3604 	sre->sre_flags = 0;
3605 	sre->sre_length = sizeof(struct sctp_remote_error);
3606 	sre->sre_error = error;
3607 	sre->sre_assoc_id = sctp_get_associd(stcb);
3608 	if (notif_len > sizeof(struct sctp_remote_error)) {
3609 		memcpy(sre->sre_data, chunk, chunk_len);
3610 		sre->sre_length += chunk_len;
3611 	}
3612 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3613 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3614 	    0, 0, stcb->asoc.context, 0, 0, 0,
3615 	    m_notify);
3616 	if (control != NULL) {
3617 		control->length = SCTP_BUF_LEN(m_notify);
3618 		/* not that we need this */
3619 		control->tail_mbuf = m_notify;
3620 		control->spec_flags = M_NOTIFICATION;
3621 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3622 		    control,
3623 		    &stcb->sctp_socket->so_rcv, 1,
3624 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3625 	} else {
3626 		sctp_m_freem(m_notify);
3627 	}
3628 }
3629 
3630 
3631 void
3632 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3633     uint32_t error, void *data, int so_locked
3634 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3635     SCTP_UNUSED
3636 #endif
3637 )
3638 {
3639 	if ((stcb == NULL) ||
3640 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3641 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3642 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3643 		/* If the socket is gone we are out of here */
3644 		return;
3645 	}
3646 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3647 		return;
3648 	}
3649 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3650 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3651 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3652 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3653 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3654 			/* Don't report these in front states */
3655 			return;
3656 		}
3657 	}
3658 	switch (notification) {
3659 	case SCTP_NOTIFY_ASSOC_UP:
3660 		if (stcb->asoc.assoc_up_sent == 0) {
3661 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3662 			stcb->asoc.assoc_up_sent = 1;
3663 		}
3664 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3665 			sctp_notify_adaptation_layer(stcb);
3666 		}
3667 		if (stcb->asoc.auth_supported == 0) {
3668 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3669 			    NULL, so_locked);
3670 		}
3671 		break;
3672 	case SCTP_NOTIFY_ASSOC_DOWN:
3673 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3674 		break;
3675 	case SCTP_NOTIFY_INTERFACE_DOWN:
3676 		{
3677 			struct sctp_nets *net;
3678 
3679 			net = (struct sctp_nets *)data;
3680 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3681 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3682 			break;
3683 		}
3684 	case SCTP_NOTIFY_INTERFACE_UP:
3685 		{
3686 			struct sctp_nets *net;
3687 
3688 			net = (struct sctp_nets *)data;
3689 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3690 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3691 			break;
3692 		}
3693 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3694 		{
3695 			struct sctp_nets *net;
3696 
3697 			net = (struct sctp_nets *)data;
3698 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3699 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3700 			break;
3701 		}
3702 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3703 		sctp_notify_send_failed2(stcb, error,
3704 		    (struct sctp_stream_queue_pending *)data, so_locked);
3705 		break;
3706 	case SCTP_NOTIFY_SENT_DG_FAIL:
3707 		sctp_notify_send_failed(stcb, 1, error,
3708 		    (struct sctp_tmit_chunk *)data, so_locked);
3709 		break;
3710 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3711 		sctp_notify_send_failed(stcb, 0, error,
3712 		    (struct sctp_tmit_chunk *)data, so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3715 		{
3716 			uint32_t val;
3717 
3718 			val = *((uint32_t *) data);
3719 
3720 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3721 			break;
3722 		}
3723 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3724 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3725 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3726 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3727 		} else {
3728 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3729 		}
3730 		break;
3731 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3732 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3733 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3734 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3735 		} else {
3736 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3737 		}
3738 		break;
3739 	case SCTP_NOTIFY_ASSOC_RESTART:
3740 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3741 		if (stcb->asoc.auth_supported == 0) {
3742 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3743 			    NULL, so_locked);
3744 		}
3745 		break;
3746 	case SCTP_NOTIFY_STR_RESET_SEND:
3747 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3748 		break;
3749 	case SCTP_NOTIFY_STR_RESET_RECV:
3750 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3751 		break;
3752 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3753 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3754 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3755 		break;
3756 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3757 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3758 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3759 		break;
3760 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3761 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3762 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3766 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3767 		break;
3768 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3769 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3770 		    error, so_locked);
3771 		break;
3772 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3773 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3774 		    error, so_locked);
3775 		break;
3776 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3777 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3778 		    error, so_locked);
3779 		break;
3780 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3781 		sctp_notify_shutdown_event(stcb);
3782 		break;
3783 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3784 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3785 		    (uint16_t) (uintptr_t) data,
3786 		    so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3789 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3790 		    (uint16_t) (uintptr_t) data,
3791 		    so_locked);
3792 		break;
3793 	case SCTP_NOTIFY_NO_PEER_AUTH:
3794 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3795 		    (uint16_t) (uintptr_t) data,
3796 		    so_locked);
3797 		break;
3798 	case SCTP_NOTIFY_SENDER_DRY:
3799 		sctp_notify_sender_dry_event(stcb, so_locked);
3800 		break;
3801 	case SCTP_NOTIFY_REMOTE_ERROR:
3802 		sctp_notify_remote_error(stcb, error, data);
3803 		break;
3804 	default:
3805 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3806 		    __func__, notification, notification);
3807 		break;
3808 	}			/* end switch */
3809 }
3810 
3811 void
3812 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3813 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3814     SCTP_UNUSED
3815 #endif
3816 )
3817 {
3818 	struct sctp_association *asoc;
3819 	struct sctp_stream_out *outs;
3820 	struct sctp_tmit_chunk *chk, *nchk;
3821 	struct sctp_stream_queue_pending *sp, *nsp;
3822 	int i;
3823 
3824 	if (stcb == NULL) {
3825 		return;
3826 	}
3827 	asoc = &stcb->asoc;
3828 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3829 		/* already being freed */
3830 		return;
3831 	}
3832 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3833 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3834 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3835 		return;
3836 	}
3837 	/* now through all the gunk freeing chunks */
3838 	if (holds_lock == 0) {
3839 		SCTP_TCB_SEND_LOCK(stcb);
3840 	}
3841 	/* sent queue SHOULD be empty */
3842 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3843 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3844 		asoc->sent_queue_cnt--;
3845 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3846 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3847 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3848 #ifdef INVARIANTS
3849 			} else {
3850 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3851 #endif
3852 			}
3853 		}
3854 		if (chk->data != NULL) {
3855 			sctp_free_bufspace(stcb, asoc, chk, 1);
3856 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3857 			    error, chk, so_locked);
3858 			if (chk->data) {
3859 				sctp_m_freem(chk->data);
3860 				chk->data = NULL;
3861 			}
3862 		}
3863 		sctp_free_a_chunk(stcb, chk, so_locked);
3864 		/* sa_ignore FREED_MEMORY */
3865 	}
3866 	/* pending send queue SHOULD be empty */
3867 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3868 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3869 		asoc->send_queue_cnt--;
3870 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3871 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3872 #ifdef INVARIANTS
3873 		} else {
3874 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3875 #endif
3876 		}
3877 		if (chk->data != NULL) {
3878 			sctp_free_bufspace(stcb, asoc, chk, 1);
3879 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3880 			    error, chk, so_locked);
3881 			if (chk->data) {
3882 				sctp_m_freem(chk->data);
3883 				chk->data = NULL;
3884 			}
3885 		}
3886 		sctp_free_a_chunk(stcb, chk, so_locked);
3887 		/* sa_ignore FREED_MEMORY */
3888 	}
3889 	for (i = 0; i < asoc->streamoutcnt; i++) {
3890 		/* For each stream */
3891 		outs = &asoc->strmout[i];
3892 		/* clean up any sends there */
3893 		asoc->locked_on_sending = NULL;
3894 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3895 			asoc->stream_queue_cnt--;
3896 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3897 			sctp_free_spbufspace(stcb, asoc, sp);
3898 			if (sp->data) {
3899 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3900 				    error, (void *)sp, so_locked);
3901 				if (sp->data) {
3902 					sctp_m_freem(sp->data);
3903 					sp->data = NULL;
3904 					sp->tail_mbuf = NULL;
3905 					sp->length = 0;
3906 				}
3907 			}
3908 			if (sp->net) {
3909 				sctp_free_remote_addr(sp->net);
3910 				sp->net = NULL;
3911 			}
3912 			/* Free the chunk */
3913 			sctp_free_a_strmoq(stcb, sp, so_locked);
3914 			/* sa_ignore FREED_MEMORY */
3915 		}
3916 	}
3917 
3918 	if (holds_lock == 0) {
3919 		SCTP_TCB_SEND_UNLOCK(stcb);
3920 	}
3921 }
3922 
3923 void
3924 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3925     struct sctp_abort_chunk *abort, int so_locked
3926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3927     SCTP_UNUSED
3928 #endif
3929 )
3930 {
3931 	if (stcb == NULL) {
3932 		return;
3933 	}
3934 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3935 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3936 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3937 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3938 	}
3939 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3940 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3941 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3942 		return;
3943 	}
3944 	/* Tell them we lost the asoc */
3945 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3946 	if (from_peer) {
3947 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3948 	} else {
3949 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3950 	}
3951 }
3952 
3953 void
3954 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3955     struct mbuf *m, int iphlen,
3956     struct sockaddr *src, struct sockaddr *dst,
3957     struct sctphdr *sh, struct mbuf *op_err,
3958     uint8_t mflowtype, uint32_t mflowid,
3959     uint32_t vrf_id, uint16_t port)
3960 {
3961 	uint32_t vtag;
3962 
3963 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3964 	struct socket *so;
3965 
3966 #endif
3967 
3968 	vtag = 0;
3969 	if (stcb != NULL) {
3970 		/* We have a TCB to abort, send notification too */
3971 		vtag = stcb->asoc.peer_vtag;
3972 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3973 		/* get the assoc vrf id and table id */
3974 		vrf_id = stcb->asoc.vrf_id;
3975 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3976 	}
3977 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3978 	    mflowtype, mflowid, inp->fibnum,
3979 	    vrf_id, port);
3980 	if (stcb != NULL) {
3981 		/* Ok, now lets free it */
3982 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3983 		so = SCTP_INP_SO(inp);
3984 		atomic_add_int(&stcb->asoc.refcnt, 1);
3985 		SCTP_TCB_UNLOCK(stcb);
3986 		SCTP_SOCKET_LOCK(so, 1);
3987 		SCTP_TCB_LOCK(stcb);
3988 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3989 #endif
3990 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3991 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3992 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3993 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3994 		}
3995 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3996 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998 		SCTP_SOCKET_UNLOCK(so, 1);
3999 #endif
4000 	}
4001 }
4002 
4003 #ifdef SCTP_ASOCLOG_OF_TSNS
4004 void
4005 sctp_print_out_track_log(struct sctp_tcb *stcb)
4006 {
4007 #ifdef NOSIY_PRINTS
4008 	int i;
4009 
4010 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4011 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4012 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4013 		SCTP_PRINTF("None rcvd\n");
4014 		goto none_in;
4015 	}
4016 	if (stcb->asoc.tsn_in_wrapped) {
4017 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4018 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4019 			    stcb->asoc.in_tsnlog[i].tsn,
4020 			    stcb->asoc.in_tsnlog[i].strm,
4021 			    stcb->asoc.in_tsnlog[i].seq,
4022 			    stcb->asoc.in_tsnlog[i].flgs,
4023 			    stcb->asoc.in_tsnlog[i].sz);
4024 		}
4025 	}
4026 	if (stcb->asoc.tsn_in_at) {
4027 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4028 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4029 			    stcb->asoc.in_tsnlog[i].tsn,
4030 			    stcb->asoc.in_tsnlog[i].strm,
4031 			    stcb->asoc.in_tsnlog[i].seq,
4032 			    stcb->asoc.in_tsnlog[i].flgs,
4033 			    stcb->asoc.in_tsnlog[i].sz);
4034 		}
4035 	}
4036 none_in:
4037 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4038 	if ((stcb->asoc.tsn_out_at == 0) &&
4039 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4040 		SCTP_PRINTF("None sent\n");
4041 	}
4042 	if (stcb->asoc.tsn_out_wrapped) {
4043 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4044 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4045 			    stcb->asoc.out_tsnlog[i].tsn,
4046 			    stcb->asoc.out_tsnlog[i].strm,
4047 			    stcb->asoc.out_tsnlog[i].seq,
4048 			    stcb->asoc.out_tsnlog[i].flgs,
4049 			    stcb->asoc.out_tsnlog[i].sz);
4050 		}
4051 	}
4052 	if (stcb->asoc.tsn_out_at) {
4053 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4054 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4055 			    stcb->asoc.out_tsnlog[i].tsn,
4056 			    stcb->asoc.out_tsnlog[i].strm,
4057 			    stcb->asoc.out_tsnlog[i].seq,
4058 			    stcb->asoc.out_tsnlog[i].flgs,
4059 			    stcb->asoc.out_tsnlog[i].sz);
4060 		}
4061 	}
4062 #endif
4063 }
4064 
4065 #endif
4066 
4067 void
4068 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4069     struct mbuf *op_err,
4070     int so_locked
4071 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4072     SCTP_UNUSED
4073 #endif
4074 )
4075 {
4076 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4077 	struct socket *so;
4078 
4079 #endif
4080 
4081 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4082 	so = SCTP_INP_SO(inp);
4083 #endif
4084 	if (stcb == NULL) {
4085 		/* Got to have a TCB */
4086 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4087 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4088 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4089 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4090 			}
4091 		}
4092 		return;
4093 	} else {
4094 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4095 	}
4096 	/* notify the ulp */
4097 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4098 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4099 	}
4100 	/* notify the peer */
4101 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4102 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4103 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4104 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4105 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4106 	}
4107 	/* now free the asoc */
4108 #ifdef SCTP_ASOCLOG_OF_TSNS
4109 	sctp_print_out_track_log(stcb);
4110 #endif
4111 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4112 	if (!so_locked) {
4113 		atomic_add_int(&stcb->asoc.refcnt, 1);
4114 		SCTP_TCB_UNLOCK(stcb);
4115 		SCTP_SOCKET_LOCK(so, 1);
4116 		SCTP_TCB_LOCK(stcb);
4117 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4118 	}
4119 #endif
4120 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4121 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4122 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4123 	if (!so_locked) {
4124 		SCTP_SOCKET_UNLOCK(so, 1);
4125 	}
4126 #endif
4127 }
4128 
4129 void
4130 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4131     struct sockaddr *src, struct sockaddr *dst,
4132     struct sctphdr *sh, struct sctp_inpcb *inp,
4133     struct mbuf *cause,
4134     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4135     uint32_t vrf_id, uint16_t port)
4136 {
4137 	struct sctp_chunkhdr *ch, chunk_buf;
4138 	unsigned int chk_length;
4139 	int contains_init_chunk;
4140 
4141 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4142 	/* Generate a TO address for future reference */
4143 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4144 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4145 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4146 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4147 		}
4148 	}
4149 	contains_init_chunk = 0;
4150 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4151 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4152 	while (ch != NULL) {
4153 		chk_length = ntohs(ch->chunk_length);
4154 		if (chk_length < sizeof(*ch)) {
4155 			/* break to abort land */
4156 			break;
4157 		}
4158 		switch (ch->chunk_type) {
4159 		case SCTP_INIT:
4160 			contains_init_chunk = 1;
4161 			break;
4162 		case SCTP_PACKET_DROPPED:
4163 			/* we don't respond to pkt-dropped */
4164 			return;
4165 		case SCTP_ABORT_ASSOCIATION:
4166 			/* we don't respond with an ABORT to an ABORT */
4167 			return;
4168 		case SCTP_SHUTDOWN_COMPLETE:
4169 			/*
4170 			 * we ignore it since we are not waiting for it and
4171 			 * peer is gone
4172 			 */
4173 			return;
4174 		case SCTP_SHUTDOWN_ACK:
4175 			sctp_send_shutdown_complete2(src, dst, sh,
4176 			    mflowtype, mflowid, fibnum,
4177 			    vrf_id, port);
4178 			return;
4179 		default:
4180 			break;
4181 		}
4182 		offset += SCTP_SIZE32(chk_length);
4183 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4184 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4185 	}
4186 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4187 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4188 	    (contains_init_chunk == 0))) {
4189 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4190 		    mflowtype, mflowid, fibnum,
4191 		    vrf_id, port);
4192 	}
4193 }
4194 
4195 /*
4196  * check the inbound datagram to make sure there is not an abort inside it,
4197  * if there is return 1, else return 0.
4198  */
4199 int
4200 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4201 {
4202 	struct sctp_chunkhdr *ch;
4203 	struct sctp_init_chunk *init_chk, chunk_buf;
4204 	int offset;
4205 	unsigned int chk_length;
4206 
4207 	offset = iphlen + sizeof(struct sctphdr);
4208 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4209 	    (uint8_t *) & chunk_buf);
4210 	while (ch != NULL) {
4211 		chk_length = ntohs(ch->chunk_length);
4212 		if (chk_length < sizeof(*ch)) {
4213 			/* packet is probably corrupt */
4214 			break;
4215 		}
4216 		/* we seem to be ok, is it an abort? */
4217 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4218 			/* yep, tell them */
4219 			return (1);
4220 		}
4221 		if (ch->chunk_type == SCTP_INITIATION) {
4222 			/* need to update the Vtag */
4223 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4224 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4225 			if (init_chk != NULL) {
4226 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4227 			}
4228 		}
4229 		/* Nope, move to the next chunk */
4230 		offset += SCTP_SIZE32(chk_length);
4231 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4232 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4233 	}
4234 	return (0);
4235 }
4236 
4237 /*
4238  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4239  * set (i.e. it's 0) so, create this function to compare link local scopes
4240  */
4241 #ifdef INET6
4242 uint32_t
4243 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4244 {
4245 	struct sockaddr_in6 a, b;
4246 
4247 	/* save copies */
4248 	a = *addr1;
4249 	b = *addr2;
4250 
4251 	if (a.sin6_scope_id == 0)
4252 		if (sa6_recoverscope(&a)) {
4253 			/* can't get scope, so can't match */
4254 			return (0);
4255 		}
4256 	if (b.sin6_scope_id == 0)
4257 		if (sa6_recoverscope(&b)) {
4258 			/* can't get scope, so can't match */
4259 			return (0);
4260 		}
4261 	if (a.sin6_scope_id != b.sin6_scope_id)
4262 		return (0);
4263 
4264 	return (1);
4265 }
4266 
4267 /*
4268  * returns a sockaddr_in6 with embedded scope recovered and removed
4269  */
4270 struct sockaddr_in6 *
4271 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4272 {
4273 	/* check and strip embedded scope junk */
4274 	if (addr->sin6_family == AF_INET6) {
4275 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4276 			if (addr->sin6_scope_id == 0) {
4277 				*store = *addr;
4278 				if (!sa6_recoverscope(store)) {
4279 					/* use the recovered scope */
4280 					addr = store;
4281 				}
4282 			} else {
4283 				/* else, return the original "to" addr */
4284 				in6_clearscope(&addr->sin6_addr);
4285 			}
4286 		}
4287 	}
4288 	return (addr);
4289 }
4290 
4291 #endif
4292 
4293 /*
4294  * are the two addresses the same?  currently a "scopeless" check returns: 1
4295  * if same, 0 if not
4296  */
4297 int
4298 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4299 {
4300 
4301 	/* must be valid */
4302 	if (sa1 == NULL || sa2 == NULL)
4303 		return (0);
4304 
4305 	/* must be the same family */
4306 	if (sa1->sa_family != sa2->sa_family)
4307 		return (0);
4308 
4309 	switch (sa1->sa_family) {
4310 #ifdef INET6
4311 	case AF_INET6:
4312 		{
4313 			/* IPv6 addresses */
4314 			struct sockaddr_in6 *sin6_1, *sin6_2;
4315 
4316 			sin6_1 = (struct sockaddr_in6 *)sa1;
4317 			sin6_2 = (struct sockaddr_in6 *)sa2;
4318 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4319 			    sin6_2));
4320 		}
4321 #endif
4322 #ifdef INET
4323 	case AF_INET:
4324 		{
4325 			/* IPv4 addresses */
4326 			struct sockaddr_in *sin_1, *sin_2;
4327 
4328 			sin_1 = (struct sockaddr_in *)sa1;
4329 			sin_2 = (struct sockaddr_in *)sa2;
4330 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4331 		}
4332 #endif
4333 	default:
4334 		/* we don't do these... */
4335 		return (0);
4336 	}
4337 }
4338 
4339 void
4340 sctp_print_address(struct sockaddr *sa)
4341 {
4342 #ifdef INET6
4343 	char ip6buf[INET6_ADDRSTRLEN];
4344 
4345 #endif
4346 
4347 	switch (sa->sa_family) {
4348 #ifdef INET6
4349 	case AF_INET6:
4350 		{
4351 			struct sockaddr_in6 *sin6;
4352 
4353 			sin6 = (struct sockaddr_in6 *)sa;
4354 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4355 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4356 			    ntohs(sin6->sin6_port),
4357 			    sin6->sin6_scope_id);
4358 			break;
4359 		}
4360 #endif
4361 #ifdef INET
4362 	case AF_INET:
4363 		{
4364 			struct sockaddr_in *sin;
4365 			unsigned char *p;
4366 
4367 			sin = (struct sockaddr_in *)sa;
4368 			p = (unsigned char *)&sin->sin_addr;
4369 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4370 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4371 			break;
4372 		}
4373 #endif
4374 	default:
4375 		SCTP_PRINTF("?\n");
4376 		break;
4377 	}
4378 }
4379 
4380 void
4381 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4382     struct sctp_inpcb *new_inp,
4383     struct sctp_tcb *stcb,
4384     int waitflags)
4385 {
4386 	/*
4387 	 * go through our old INP and pull off any control structures that
4388 	 * belong to stcb and move then to the new inp.
4389 	 */
4390 	struct socket *old_so, *new_so;
4391 	struct sctp_queued_to_read *control, *nctl;
4392 	struct sctp_readhead tmp_queue;
4393 	struct mbuf *m;
4394 	int error = 0;
4395 
4396 	old_so = old_inp->sctp_socket;
4397 	new_so = new_inp->sctp_socket;
4398 	TAILQ_INIT(&tmp_queue);
4399 	error = sblock(&old_so->so_rcv, waitflags);
4400 	if (error) {
4401 		/*
4402 		 * Gak, can't get sblock, we have a problem. data will be
4403 		 * left stranded.. and we don't dare look at it since the
4404 		 * other thread may be reading something. Oh well, its a
4405 		 * screwed up app that does a peeloff OR a accept while
4406 		 * reading from the main socket... actually its only the
4407 		 * peeloff() case, since I think read will fail on a
4408 		 * listening socket..
4409 		 */
4410 		return;
4411 	}
4412 	/* lock the socket buffers */
4413 	SCTP_INP_READ_LOCK(old_inp);
4414 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4415 		/* Pull off all for out target stcb */
4416 		if (control->stcb == stcb) {
4417 			/* remove it we want it */
4418 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4419 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4420 			m = control->data;
4421 			while (m) {
4422 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4423 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4424 				}
4425 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4426 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4427 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4428 				}
4429 				m = SCTP_BUF_NEXT(m);
4430 			}
4431 		}
4432 	}
4433 	SCTP_INP_READ_UNLOCK(old_inp);
4434 	/* Remove the sb-lock on the old socket */
4435 
4436 	sbunlock(&old_so->so_rcv);
4437 	/* Now we move them over to the new socket buffer */
4438 	SCTP_INP_READ_LOCK(new_inp);
4439 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4440 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4441 		m = control->data;
4442 		while (m) {
4443 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4444 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4445 			}
4446 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4448 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4449 			}
4450 			m = SCTP_BUF_NEXT(m);
4451 		}
4452 	}
4453 	SCTP_INP_READ_UNLOCK(new_inp);
4454 }
4455 
4456 void
4457 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4458     struct sctp_tcb *stcb,
4459     int so_locked
4460 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4461     SCTP_UNUSED
4462 #endif
4463 )
4464 {
4465 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4466 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4467 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4468 		} else {
4469 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4470 			struct socket *so;
4471 
4472 			so = SCTP_INP_SO(inp);
4473 			if (!so_locked) {
4474 				if (stcb) {
4475 					atomic_add_int(&stcb->asoc.refcnt, 1);
4476 					SCTP_TCB_UNLOCK(stcb);
4477 				}
4478 				SCTP_SOCKET_LOCK(so, 1);
4479 				if (stcb) {
4480 					SCTP_TCB_LOCK(stcb);
4481 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4482 				}
4483 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4484 					SCTP_SOCKET_UNLOCK(so, 1);
4485 					return;
4486 				}
4487 			}
4488 #endif
4489 			sctp_sorwakeup(inp, inp->sctp_socket);
4490 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4491 			if (!so_locked) {
4492 				SCTP_SOCKET_UNLOCK(so, 1);
4493 			}
4494 #endif
4495 		}
4496 	}
4497 }
4498 
4499 void
4500 sctp_add_to_readq(struct sctp_inpcb *inp,
4501     struct sctp_tcb *stcb,
4502     struct sctp_queued_to_read *control,
4503     struct sockbuf *sb,
4504     int end,
4505     int inp_read_lock_held,
4506     int so_locked
4507 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4508     SCTP_UNUSED
4509 #endif
4510 )
4511 {
4512 	/*
4513 	 * Here we must place the control on the end of the socket read
4514 	 * queue AND increment sb_cc so that select will work properly on
4515 	 * read.
4516 	 */
4517 	struct mbuf *m, *prev = NULL;
4518 
4519 	if (inp == NULL) {
4520 		/* Gak, TSNH!! */
4521 #ifdef INVARIANTS
4522 		panic("Gak, inp NULL on add_to_readq");
4523 #endif
4524 		return;
4525 	}
4526 	if (inp_read_lock_held == 0)
4527 		SCTP_INP_READ_LOCK(inp);
4528 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4529 		sctp_free_remote_addr(control->whoFrom);
4530 		if (control->data) {
4531 			sctp_m_freem(control->data);
4532 			control->data = NULL;
4533 		}
4534 		sctp_free_a_readq(stcb, control);
4535 		if (inp_read_lock_held == 0)
4536 			SCTP_INP_READ_UNLOCK(inp);
4537 		return;
4538 	}
4539 	if (!(control->spec_flags & M_NOTIFICATION)) {
4540 		atomic_add_int(&inp->total_recvs, 1);
4541 		if (!control->do_not_ref_stcb) {
4542 			atomic_add_int(&stcb->total_recvs, 1);
4543 		}
4544 	}
4545 	m = control->data;
4546 	control->held_length = 0;
4547 	control->length = 0;
4548 	while (m) {
4549 		if (SCTP_BUF_LEN(m) == 0) {
4550 			/* Skip mbufs with NO length */
4551 			if (prev == NULL) {
4552 				/* First one */
4553 				control->data = sctp_m_free(m);
4554 				m = control->data;
4555 			} else {
4556 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4557 				m = SCTP_BUF_NEXT(prev);
4558 			}
4559 			if (m == NULL) {
4560 				control->tail_mbuf = prev;
4561 			}
4562 			continue;
4563 		}
4564 		prev = m;
4565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4566 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4567 		}
4568 		sctp_sballoc(stcb, sb, m);
4569 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4570 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4571 		}
4572 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4573 		m = SCTP_BUF_NEXT(m);
4574 	}
4575 	if (prev != NULL) {
4576 		control->tail_mbuf = prev;
4577 	} else {
4578 		/* Everything got collapsed out?? */
4579 		sctp_free_remote_addr(control->whoFrom);
4580 		sctp_free_a_readq(stcb, control);
4581 		if (inp_read_lock_held == 0)
4582 			SCTP_INP_READ_UNLOCK(inp);
4583 		return;
4584 	}
4585 	if (end) {
4586 		control->end_added = 1;
4587 	}
4588 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4589 	control->on_read_q = 1;
4590 	if (inp_read_lock_held == 0)
4591 		SCTP_INP_READ_UNLOCK(inp);
4592 	if (inp && inp->sctp_socket) {
4593 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4594 	}
4595 }
4596 
4597 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4598  *************ALTERNATE ROUTING CODE
4599  */
4600 
4601 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4602  *************ALTERNATE ROUTING CODE
4603  */
4604 
4605 struct mbuf *
4606 sctp_generate_cause(uint16_t code, char *info)
4607 {
4608 	struct mbuf *m;
4609 	struct sctp_gen_error_cause *cause;
4610 	size_t info_len;
4611 	uint16_t len;
4612 
4613 	if ((code == 0) || (info == NULL)) {
4614 		return (NULL);
4615 	}
4616 	info_len = strlen(info);
4617 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4618 		return (NULL);
4619 	}
4620 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4621 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4622 	if (m != NULL) {
4623 		SCTP_BUF_LEN(m) = len;
4624 		cause = mtod(m, struct sctp_gen_error_cause *);
4625 		cause->code = htons(code);
4626 		cause->length = htons(len);
4627 		memcpy(cause->info, info, info_len);
4628 	}
4629 	return (m);
4630 }
4631 
4632 struct mbuf *
4633 sctp_generate_no_user_data_cause(uint32_t tsn)
4634 {
4635 	struct mbuf *m;
4636 	struct sctp_error_no_user_data *no_user_data_cause;
4637 	uint16_t len;
4638 
4639 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4640 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4641 	if (m != NULL) {
4642 		SCTP_BUF_LEN(m) = len;
4643 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4644 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4645 		no_user_data_cause->cause.length = htons(len);
4646 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4647 	}
4648 	return (m);
4649 }
4650 
4651 #ifdef SCTP_MBCNT_LOGGING
4652 void
4653 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4654     struct sctp_tmit_chunk *tp1, int chk_cnt)
4655 {
4656 	if (tp1->data == NULL) {
4657 		return;
4658 	}
4659 	asoc->chunks_on_out_queue -= chk_cnt;
4660 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4661 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4662 		    asoc->total_output_queue_size,
4663 		    tp1->book_size,
4664 		    0,
4665 		    tp1->mbcnt);
4666 	}
4667 	if (asoc->total_output_queue_size >= tp1->book_size) {
4668 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4669 	} else {
4670 		asoc->total_output_queue_size = 0;
4671 	}
4672 
4673 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4674 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4675 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4676 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4677 		} else {
4678 			stcb->sctp_socket->so_snd.sb_cc = 0;
4679 
4680 		}
4681 	}
4682 }
4683 
4684 #endif
4685 
4686 int
4687 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4688     uint8_t sent, int so_locked
4689 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4690     SCTP_UNUSED
4691 #endif
4692 )
4693 {
4694 	struct sctp_stream_out *strq;
4695 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4696 	struct sctp_stream_queue_pending *sp;
4697 	uint16_t stream = 0, seq = 0;
4698 	uint8_t foundeom = 0;
4699 	int ret_sz = 0;
4700 	int notdone;
4701 	int do_wakeup_routine = 0;
4702 
4703 	stream = tp1->rec.data.stream_number;
4704 	seq = tp1->rec.data.stream_seq;
4705 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4706 		stcb->asoc.abandoned_sent[0]++;
4707 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4708 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4709 #if defined(SCTP_DETAILED_STR_STATS)
4710 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4711 #endif
4712 	} else {
4713 		stcb->asoc.abandoned_unsent[0]++;
4714 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4715 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4716 #if defined(SCTP_DETAILED_STR_STATS)
4717 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4718 #endif
4719 	}
4720 	do {
4721 		ret_sz += tp1->book_size;
4722 		if (tp1->data != NULL) {
4723 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4724 				sctp_flight_size_decrease(tp1);
4725 				sctp_total_flight_decrease(stcb, tp1);
4726 			}
4727 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4728 			stcb->asoc.peers_rwnd += tp1->send_size;
4729 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4730 			if (sent) {
4731 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4732 			} else {
4733 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4734 			}
4735 			if (tp1->data) {
4736 				sctp_m_freem(tp1->data);
4737 				tp1->data = NULL;
4738 			}
4739 			do_wakeup_routine = 1;
4740 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4741 				stcb->asoc.sent_queue_cnt_removeable--;
4742 			}
4743 		}
4744 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4745 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4746 		    SCTP_DATA_NOT_FRAG) {
4747 			/* not frag'ed we ae done   */
4748 			notdone = 0;
4749 			foundeom = 1;
4750 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4751 			/* end of frag, we are done */
4752 			notdone = 0;
4753 			foundeom = 1;
4754 		} else {
4755 			/*
4756 			 * Its a begin or middle piece, we must mark all of
4757 			 * it
4758 			 */
4759 			notdone = 1;
4760 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4761 		}
4762 	} while (tp1 && notdone);
4763 	if (foundeom == 0) {
4764 		/*
4765 		 * The multi-part message was scattered across the send and
4766 		 * sent queue.
4767 		 */
4768 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4769 			if ((tp1->rec.data.stream_number != stream) ||
4770 			    (tp1->rec.data.stream_seq != seq)) {
4771 				break;
4772 			}
4773 			/*
4774 			 * save to chk in case we have some on stream out
4775 			 * queue. If so and we have an un-transmitted one we
4776 			 * don't have to fudge the TSN.
4777 			 */
4778 			chk = tp1;
4779 			ret_sz += tp1->book_size;
4780 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4781 			if (sent) {
4782 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4783 			} else {
4784 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4785 			}
4786 			if (tp1->data) {
4787 				sctp_m_freem(tp1->data);
4788 				tp1->data = NULL;
4789 			}
4790 			/* No flight involved here book the size to 0 */
4791 			tp1->book_size = 0;
4792 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4793 				foundeom = 1;
4794 			}
4795 			do_wakeup_routine = 1;
4796 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4797 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4798 			/*
4799 			 * on to the sent queue so we can wait for it to be
4800 			 * passed by.
4801 			 */
4802 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4803 			    sctp_next);
4804 			stcb->asoc.send_queue_cnt--;
4805 			stcb->asoc.sent_queue_cnt++;
4806 		}
4807 	}
4808 	if (foundeom == 0) {
4809 		/*
4810 		 * Still no eom found. That means there is stuff left on the
4811 		 * stream out queue.. yuck.
4812 		 */
4813 		SCTP_TCB_SEND_LOCK(stcb);
4814 		strq = &stcb->asoc.strmout[stream];
4815 		sp = TAILQ_FIRST(&strq->outqueue);
4816 		if (sp != NULL) {
4817 			sp->discard_rest = 1;
4818 			/*
4819 			 * We may need to put a chunk on the queue that
4820 			 * holds the TSN that would have been sent with the
4821 			 * LAST bit.
4822 			 */
4823 			if (chk == NULL) {
4824 				/* Yep, we have to */
4825 				sctp_alloc_a_chunk(stcb, chk);
4826 				if (chk == NULL) {
4827 					/*
4828 					 * we are hosed. All we can do is
4829 					 * nothing.. which will cause an
4830 					 * abort if the peer is paying
4831 					 * attention.
4832 					 */
4833 					goto oh_well;
4834 				}
4835 				memset(chk, 0, sizeof(*chk));
4836 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4837 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4838 				chk->asoc = &stcb->asoc;
4839 				chk->rec.data.stream_seq = strq->next_sequence_send;
4840 				chk->rec.data.stream_number = sp->stream;
4841 				chk->rec.data.payloadtype = sp->ppid;
4842 				chk->rec.data.context = sp->context;
4843 				chk->flags = sp->act_flags;
4844 				chk->whoTo = NULL;
4845 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4846 				strq->chunks_on_queues++;
4847 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4848 				stcb->asoc.sent_queue_cnt++;
4849 				stcb->asoc.pr_sctp_cnt++;
4850 			} else {
4851 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4852 			}
4853 			strq->next_sequence_send++;
4854 	oh_well:
4855 			if (sp->data) {
4856 				/*
4857 				 * Pull any data to free up the SB and allow
4858 				 * sender to "add more" while we will throw
4859 				 * away :-)
4860 				 */
4861 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4862 				ret_sz += sp->length;
4863 				do_wakeup_routine = 1;
4864 				sp->some_taken = 1;
4865 				sctp_m_freem(sp->data);
4866 				sp->data = NULL;
4867 				sp->tail_mbuf = NULL;
4868 				sp->length = 0;
4869 			}
4870 		}
4871 		SCTP_TCB_SEND_UNLOCK(stcb);
4872 	}
4873 	if (do_wakeup_routine) {
4874 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4875 		struct socket *so;
4876 
4877 		so = SCTP_INP_SO(stcb->sctp_ep);
4878 		if (!so_locked) {
4879 			atomic_add_int(&stcb->asoc.refcnt, 1);
4880 			SCTP_TCB_UNLOCK(stcb);
4881 			SCTP_SOCKET_LOCK(so, 1);
4882 			SCTP_TCB_LOCK(stcb);
4883 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4884 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4885 				/* assoc was freed while we were unlocked */
4886 				SCTP_SOCKET_UNLOCK(so, 1);
4887 				return (ret_sz);
4888 			}
4889 		}
4890 #endif
4891 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4893 		if (!so_locked) {
4894 			SCTP_SOCKET_UNLOCK(so, 1);
4895 		}
4896 #endif
4897 	}
4898 	return (ret_sz);
4899 }
4900 
4901 /*
4902  * checks to see if the given address, sa, is one that is currently known by
4903  * the kernel note: can't distinguish the same address on multiple interfaces
4904  * and doesn't handle multiple addresses with different zone/scope id's note:
4905  * ifa_ifwithaddr() compares the entire sockaddr struct
4906  */
4907 struct sctp_ifa *
4908 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4909     int holds_lock)
4910 {
4911 	struct sctp_laddr *laddr;
4912 
4913 	if (holds_lock == 0) {
4914 		SCTP_INP_RLOCK(inp);
4915 	}
4916 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4917 		if (laddr->ifa == NULL)
4918 			continue;
4919 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4920 			continue;
4921 #ifdef INET
4922 		if (addr->sa_family == AF_INET) {
4923 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4924 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4925 				/* found him. */
4926 				if (holds_lock == 0) {
4927 					SCTP_INP_RUNLOCK(inp);
4928 				}
4929 				return (laddr->ifa);
4930 				break;
4931 			}
4932 		}
4933 #endif
4934 #ifdef INET6
4935 		if (addr->sa_family == AF_INET6) {
4936 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4937 			    &laddr->ifa->address.sin6)) {
4938 				/* found him. */
4939 				if (holds_lock == 0) {
4940 					SCTP_INP_RUNLOCK(inp);
4941 				}
4942 				return (laddr->ifa);
4943 				break;
4944 			}
4945 		}
4946 #endif
4947 	}
4948 	if (holds_lock == 0) {
4949 		SCTP_INP_RUNLOCK(inp);
4950 	}
4951 	return (NULL);
4952 }
4953 
4954 uint32_t
4955 sctp_get_ifa_hash_val(struct sockaddr *addr)
4956 {
4957 	switch (addr->sa_family) {
4958 #ifdef INET
4959 	case AF_INET:
4960 		{
4961 			struct sockaddr_in *sin;
4962 
4963 			sin = (struct sockaddr_in *)addr;
4964 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4965 		}
4966 #endif
4967 #ifdef INET6
4968 	case AF_INET6:
4969 		{
4970 			struct sockaddr_in6 *sin6;
4971 			uint32_t hash_of_addr;
4972 
4973 			sin6 = (struct sockaddr_in6 *)addr;
4974 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4975 			    sin6->sin6_addr.s6_addr32[1] +
4976 			    sin6->sin6_addr.s6_addr32[2] +
4977 			    sin6->sin6_addr.s6_addr32[3]);
4978 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4979 			return (hash_of_addr);
4980 		}
4981 #endif
4982 	default:
4983 		break;
4984 	}
4985 	return (0);
4986 }
4987 
4988 struct sctp_ifa *
4989 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4990 {
4991 	struct sctp_ifa *sctp_ifap;
4992 	struct sctp_vrf *vrf;
4993 	struct sctp_ifalist *hash_head;
4994 	uint32_t hash_of_addr;
4995 
4996 	if (holds_lock == 0)
4997 		SCTP_IPI_ADDR_RLOCK();
4998 
4999 	vrf = sctp_find_vrf(vrf_id);
5000 	if (vrf == NULL) {
5001 		if (holds_lock == 0)
5002 			SCTP_IPI_ADDR_RUNLOCK();
5003 		return (NULL);
5004 	}
5005 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5006 
5007 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5008 	if (hash_head == NULL) {
5009 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5010 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5011 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5012 		sctp_print_address(addr);
5013 		SCTP_PRINTF("No such bucket for address\n");
5014 		if (holds_lock == 0)
5015 			SCTP_IPI_ADDR_RUNLOCK();
5016 
5017 		return (NULL);
5018 	}
5019 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5020 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5021 			continue;
5022 #ifdef INET
5023 		if (addr->sa_family == AF_INET) {
5024 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5025 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5026 				/* found him. */
5027 				if (holds_lock == 0)
5028 					SCTP_IPI_ADDR_RUNLOCK();
5029 				return (sctp_ifap);
5030 				break;
5031 			}
5032 		}
5033 #endif
5034 #ifdef INET6
5035 		if (addr->sa_family == AF_INET6) {
5036 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5037 			    &sctp_ifap->address.sin6)) {
5038 				/* found him. */
5039 				if (holds_lock == 0)
5040 					SCTP_IPI_ADDR_RUNLOCK();
5041 				return (sctp_ifap);
5042 				break;
5043 			}
5044 		}
5045 #endif
5046 	}
5047 	if (holds_lock == 0)
5048 		SCTP_IPI_ADDR_RUNLOCK();
5049 	return (NULL);
5050 }
5051 
5052 static void
5053 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5054     uint32_t rwnd_req)
5055 {
5056 	/* User pulled some data, do we need a rwnd update? */
5057 	int r_unlocked = 0;
5058 	uint32_t dif, rwnd;
5059 	struct socket *so = NULL;
5060 
5061 	if (stcb == NULL)
5062 		return;
5063 
5064 	atomic_add_int(&stcb->asoc.refcnt, 1);
5065 
5066 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5067 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5068 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5069 		/* Pre-check If we are freeing no update */
5070 		goto no_lock;
5071 	}
5072 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5073 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5074 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5075 		goto out;
5076 	}
5077 	so = stcb->sctp_socket;
5078 	if (so == NULL) {
5079 		goto out;
5080 	}
5081 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5082 	/* Have you have freed enough to look */
5083 	*freed_so_far = 0;
5084 	/* Yep, its worth a look and the lock overhead */
5085 
5086 	/* Figure out what the rwnd would be */
5087 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5088 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5089 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5090 	} else {
5091 		dif = 0;
5092 	}
5093 	if (dif >= rwnd_req) {
5094 		if (hold_rlock) {
5095 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5096 			r_unlocked = 1;
5097 		}
5098 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5099 			/*
5100 			 * One last check before we allow the guy possibly
5101 			 * to get in. There is a race, where the guy has not
5102 			 * reached the gate. In that case
5103 			 */
5104 			goto out;
5105 		}
5106 		SCTP_TCB_LOCK(stcb);
5107 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5108 			/* No reports here */
5109 			SCTP_TCB_UNLOCK(stcb);
5110 			goto out;
5111 		}
5112 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5113 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5114 
5115 		sctp_chunk_output(stcb->sctp_ep, stcb,
5116 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5117 		/* make sure no timer is running */
5118 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5119 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5120 		SCTP_TCB_UNLOCK(stcb);
5121 	} else {
5122 		/* Update how much we have pending */
5123 		stcb->freed_by_sorcv_sincelast = dif;
5124 	}
5125 out:
5126 	if (so && r_unlocked && hold_rlock) {
5127 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5128 	}
5129 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5130 no_lock:
5131 	atomic_add_int(&stcb->asoc.refcnt, -1);
5132 	return;
5133 }
5134 
5135 int
5136 sctp_sorecvmsg(struct socket *so,
5137     struct uio *uio,
5138     struct mbuf **mp,
5139     struct sockaddr *from,
5140     int fromlen,
5141     int *msg_flags,
5142     struct sctp_sndrcvinfo *sinfo,
5143     int filling_sinfo)
5144 {
5145 	/*
5146 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5147 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5148 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5149 	 * On the way out we may send out any combination of:
5150 	 * MSG_NOTIFICATION MSG_EOR
5151 	 *
5152 	 */
5153 	struct sctp_inpcb *inp = NULL;
5154 	int my_len = 0;
5155 	int cp_len = 0, error = 0;
5156 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5157 	struct mbuf *m = NULL;
5158 	struct sctp_tcb *stcb = NULL;
5159 	int wakeup_read_socket = 0;
5160 	int freecnt_applied = 0;
5161 	int out_flags = 0, in_flags = 0;
5162 	int block_allowed = 1;
5163 	uint32_t freed_so_far = 0;
5164 	uint32_t copied_so_far = 0;
5165 	int in_eeor_mode = 0;
5166 	int no_rcv_needed = 0;
5167 	uint32_t rwnd_req = 0;
5168 	int hold_sblock = 0;
5169 	int hold_rlock = 0;
5170 	ssize_t slen = 0;
5171 	uint32_t held_length = 0;
5172 	int sockbuf_lock = 0;
5173 
5174 	if (uio == NULL) {
5175 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5176 		return (EINVAL);
5177 	}
5178 	if (msg_flags) {
5179 		in_flags = *msg_flags;
5180 		if (in_flags & MSG_PEEK)
5181 			SCTP_STAT_INCR(sctps_read_peeks);
5182 	} else {
5183 		in_flags = 0;
5184 	}
5185 	slen = uio->uio_resid;
5186 
5187 	/* Pull in and set up our int flags */
5188 	if (in_flags & MSG_OOB) {
5189 		/* Out of band's NOT supported */
5190 		return (EOPNOTSUPP);
5191 	}
5192 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5193 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5194 		return (EINVAL);
5195 	}
5196 	if ((in_flags & (MSG_DONTWAIT
5197 	    | MSG_NBIO
5198 	    )) ||
5199 	    SCTP_SO_IS_NBIO(so)) {
5200 		block_allowed = 0;
5201 	}
5202 	/* setup the endpoint */
5203 	inp = (struct sctp_inpcb *)so->so_pcb;
5204 	if (inp == NULL) {
5205 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5206 		return (EFAULT);
5207 	}
5208 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5209 	/* Must be at least a MTU's worth */
5210 	if (rwnd_req < SCTP_MIN_RWND)
5211 		rwnd_req = SCTP_MIN_RWND;
5212 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5213 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5214 		sctp_misc_ints(SCTP_SORECV_ENTER,
5215 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5216 	}
5217 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5218 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5219 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5220 	}
5221 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5222 	if (error) {
5223 		goto release_unlocked;
5224 	}
5225 	sockbuf_lock = 1;
5226 restart:
5227 
5228 
5229 restart_nosblocks:
5230 	if (hold_sblock == 0) {
5231 		SOCKBUF_LOCK(&so->so_rcv);
5232 		hold_sblock = 1;
5233 	}
5234 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5235 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5236 		goto out;
5237 	}
5238 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5239 		if (so->so_error) {
5240 			error = so->so_error;
5241 			if ((in_flags & MSG_PEEK) == 0)
5242 				so->so_error = 0;
5243 			goto out;
5244 		} else {
5245 			if (so->so_rcv.sb_cc == 0) {
5246 				/* indicate EOF */
5247 				error = 0;
5248 				goto out;
5249 			}
5250 		}
5251 	}
5252 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5253 		/* we need to wait for data */
5254 		if ((so->so_rcv.sb_cc == 0) &&
5255 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5256 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5257 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5258 				/*
5259 				 * For active open side clear flags for
5260 				 * re-use passive open is blocked by
5261 				 * connect.
5262 				 */
5263 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5264 					/*
5265 					 * You were aborted, passive side
5266 					 * always hits here
5267 					 */
5268 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5269 					error = ECONNRESET;
5270 				}
5271 				so->so_state &= ~(SS_ISCONNECTING |
5272 				    SS_ISDISCONNECTING |
5273 				    SS_ISCONFIRMING |
5274 				    SS_ISCONNECTED);
5275 				if (error == 0) {
5276 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5277 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5278 						error = ENOTCONN;
5279 					}
5280 				}
5281 				goto out;
5282 			}
5283 		}
5284 		error = sbwait(&so->so_rcv);
5285 		if (error) {
5286 			goto out;
5287 		}
5288 		held_length = 0;
5289 		goto restart_nosblocks;
5290 	} else if (so->so_rcv.sb_cc == 0) {
5291 		if (so->so_error) {
5292 			error = so->so_error;
5293 			if ((in_flags & MSG_PEEK) == 0)
5294 				so->so_error = 0;
5295 		} else {
5296 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5297 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5298 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5299 					/*
5300 					 * For active open side clear flags
5301 					 * for re-use passive open is
5302 					 * blocked by connect.
5303 					 */
5304 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5305 						/*
5306 						 * You were aborted, passive
5307 						 * side always hits here
5308 						 */
5309 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5310 						error = ECONNRESET;
5311 					}
5312 					so->so_state &= ~(SS_ISCONNECTING |
5313 					    SS_ISDISCONNECTING |
5314 					    SS_ISCONFIRMING |
5315 					    SS_ISCONNECTED);
5316 					if (error == 0) {
5317 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5318 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5319 							error = ENOTCONN;
5320 						}
5321 					}
5322 					goto out;
5323 				}
5324 			}
5325 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5326 			error = EWOULDBLOCK;
5327 		}
5328 		goto out;
5329 	}
5330 	if (hold_sblock == 1) {
5331 		SOCKBUF_UNLOCK(&so->so_rcv);
5332 		hold_sblock = 0;
5333 	}
5334 	/* we possibly have data we can read */
5335 	/* sa_ignore FREED_MEMORY */
5336 	control = TAILQ_FIRST(&inp->read_queue);
5337 	if (control == NULL) {
5338 		/*
5339 		 * This could be happening since the appender did the
5340 		 * increment but as not yet did the tailq insert onto the
5341 		 * read_queue
5342 		 */
5343 		if (hold_rlock == 0) {
5344 			SCTP_INP_READ_LOCK(inp);
5345 		}
5346 		control = TAILQ_FIRST(&inp->read_queue);
5347 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5348 #ifdef INVARIANTS
5349 			panic("Huh, its non zero and nothing on control?");
5350 #endif
5351 			so->so_rcv.sb_cc = 0;
5352 		}
5353 		SCTP_INP_READ_UNLOCK(inp);
5354 		hold_rlock = 0;
5355 		goto restart;
5356 	}
5357 	if ((control->length == 0) &&
5358 	    (control->do_not_ref_stcb)) {
5359 		/*
5360 		 * Clean up code for freeing assoc that left behind a
5361 		 * pdapi.. maybe a peer in EEOR that just closed after
5362 		 * sending and never indicated a EOR.
5363 		 */
5364 		if (hold_rlock == 0) {
5365 			hold_rlock = 1;
5366 			SCTP_INP_READ_LOCK(inp);
5367 		}
5368 		control->held_length = 0;
5369 		if (control->data) {
5370 			/* Hmm there is data here .. fix */
5371 			struct mbuf *m_tmp;
5372 			int cnt = 0;
5373 
5374 			m_tmp = control->data;
5375 			while (m_tmp) {
5376 				cnt += SCTP_BUF_LEN(m_tmp);
5377 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5378 					control->tail_mbuf = m_tmp;
5379 					control->end_added = 1;
5380 				}
5381 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5382 			}
5383 			control->length = cnt;
5384 		} else {
5385 			/* remove it */
5386 			TAILQ_REMOVE(&inp->read_queue, control, next);
5387 			/* Add back any hiddend data */
5388 			sctp_free_remote_addr(control->whoFrom);
5389 			sctp_free_a_readq(stcb, control);
5390 		}
5391 		if (hold_rlock) {
5392 			hold_rlock = 0;
5393 			SCTP_INP_READ_UNLOCK(inp);
5394 		}
5395 		goto restart;
5396 	}
5397 	if ((control->length == 0) &&
5398 	    (control->end_added == 1)) {
5399 		/*
5400 		 * Do we also need to check for (control->pdapi_aborted ==
5401 		 * 1)?
5402 		 */
5403 		if (hold_rlock == 0) {
5404 			hold_rlock = 1;
5405 			SCTP_INP_READ_LOCK(inp);
5406 		}
5407 		TAILQ_REMOVE(&inp->read_queue, control, next);
5408 		if (control->data) {
5409 #ifdef INVARIANTS
5410 			panic("control->data not null but control->length == 0");
5411 #else
5412 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5413 			sctp_m_freem(control->data);
5414 			control->data = NULL;
5415 #endif
5416 		}
5417 		if (control->aux_data) {
5418 			sctp_m_free(control->aux_data);
5419 			control->aux_data = NULL;
5420 		}
5421 #ifdef INVARIANTS
5422 		if (control->on_strm_q) {
5423 			panic("About to free ctl:%p so:%p and its in %d",
5424 			    control, so, control->on_strm_q);
5425 		}
5426 #endif
5427 		sctp_free_remote_addr(control->whoFrom);
5428 		sctp_free_a_readq(stcb, control);
5429 		if (hold_rlock) {
5430 			hold_rlock = 0;
5431 			SCTP_INP_READ_UNLOCK(inp);
5432 		}
5433 		goto restart;
5434 	}
5435 	if (control->length == 0) {
5436 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5437 		    (filling_sinfo)) {
5438 			/* find a more suitable one then this */
5439 			ctl = TAILQ_NEXT(control, next);
5440 			while (ctl) {
5441 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5442 				    (ctl->some_taken ||
5443 				    (ctl->spec_flags & M_NOTIFICATION) ||
5444 				    ((ctl->do_not_ref_stcb == 0) &&
5445 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5446 				    ) {
5447 					/*-
5448 					 * If we have a different TCB next, and there is data
5449 					 * present. If we have already taken some (pdapi), OR we can
5450 					 * ref the tcb and no delivery as started on this stream, we
5451 					 * take it. Note we allow a notification on a different
5452 					 * assoc to be delivered..
5453 					 */
5454 					control = ctl;
5455 					goto found_one;
5456 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5457 					    (ctl->length) &&
5458 					    ((ctl->some_taken) ||
5459 					    ((ctl->do_not_ref_stcb == 0) &&
5460 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5461 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5462 					/*-
5463 					 * If we have the same tcb, and there is data present, and we
5464 					 * have the strm interleave feature present. Then if we have
5465 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5466 					 * not started a delivery for this stream, we can take it.
5467 					 * Note we do NOT allow a notificaiton on the same assoc to
5468 					 * be delivered.
5469 					 */
5470 					control = ctl;
5471 					goto found_one;
5472 				}
5473 				ctl = TAILQ_NEXT(ctl, next);
5474 			}
5475 		}
5476 		/*
5477 		 * if we reach here, not suitable replacement is available
5478 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5479 		 * into the our held count, and its time to sleep again.
5480 		 */
5481 		held_length = so->so_rcv.sb_cc;
5482 		control->held_length = so->so_rcv.sb_cc;
5483 		goto restart;
5484 	}
5485 	/* Clear the held length since there is something to read */
5486 	control->held_length = 0;
5487 	if (hold_rlock) {
5488 		SCTP_INP_READ_UNLOCK(inp);
5489 		hold_rlock = 0;
5490 	}
5491 found_one:
5492 	/*
5493 	 * If we reach here, control has a some data for us to read off.
5494 	 * Note that stcb COULD be NULL.
5495 	 */
5496 	control->some_taken++;
5497 	if (hold_sblock) {
5498 		SOCKBUF_UNLOCK(&so->so_rcv);
5499 		hold_sblock = 0;
5500 	}
5501 	stcb = control->stcb;
5502 	if (stcb) {
5503 		if ((control->do_not_ref_stcb == 0) &&
5504 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5505 			if (freecnt_applied == 0)
5506 				stcb = NULL;
5507 		} else if (control->do_not_ref_stcb == 0) {
5508 			/* you can't free it on me please */
5509 			/*
5510 			 * The lock on the socket buffer protects us so the
5511 			 * free code will stop. But since we used the
5512 			 * socketbuf lock and the sender uses the tcb_lock
5513 			 * to increment, we need to use the atomic add to
5514 			 * the refcnt
5515 			 */
5516 			if (freecnt_applied) {
5517 #ifdef INVARIANTS
5518 				panic("refcnt already incremented");
5519 #else
5520 				SCTP_PRINTF("refcnt already incremented?\n");
5521 #endif
5522 			} else {
5523 				atomic_add_int(&stcb->asoc.refcnt, 1);
5524 				freecnt_applied = 1;
5525 			}
5526 			/*
5527 			 * Setup to remember how much we have not yet told
5528 			 * the peer our rwnd has opened up. Note we grab the
5529 			 * value from the tcb from last time. Note too that
5530 			 * sack sending clears this when a sack is sent,
5531 			 * which is fine. Once we hit the rwnd_req, we then
5532 			 * will go to the sctp_user_rcvd() that will not
5533 			 * lock until it KNOWs it MUST send a WUP-SACK.
5534 			 */
5535 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5536 			stcb->freed_by_sorcv_sincelast = 0;
5537 		}
5538 	}
5539 	if (stcb &&
5540 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5541 	    control->do_not_ref_stcb == 0) {
5542 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5543 	}
5544 	/* First lets get off the sinfo and sockaddr info */
5545 	if ((sinfo) && filling_sinfo) {
5546 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5547 		nxt = TAILQ_NEXT(control, next);
5548 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5549 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5550 			struct sctp_extrcvinfo *s_extra;
5551 
5552 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5553 			if ((nxt) &&
5554 			    (nxt->length)) {
5555 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5556 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5557 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5558 				}
5559 				if (nxt->spec_flags & M_NOTIFICATION) {
5560 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5561 				}
5562 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5563 				s_extra->serinfo_next_length = nxt->length;
5564 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5565 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5566 				if (nxt->tail_mbuf != NULL) {
5567 					if (nxt->end_added) {
5568 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5569 					}
5570 				}
5571 			} else {
5572 				/*
5573 				 * we explicitly 0 this, since the memcpy
5574 				 * got some other things beyond the older
5575 				 * sinfo_ that is on the control's structure
5576 				 * :-D
5577 				 */
5578 				nxt = NULL;
5579 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5580 				s_extra->serinfo_next_aid = 0;
5581 				s_extra->serinfo_next_length = 0;
5582 				s_extra->serinfo_next_ppid = 0;
5583 				s_extra->serinfo_next_stream = 0;
5584 			}
5585 		}
5586 		/*
5587 		 * update off the real current cum-ack, if we have an stcb.
5588 		 */
5589 		if ((control->do_not_ref_stcb == 0) && stcb)
5590 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5591 		/*
5592 		 * mask off the high bits, we keep the actual chunk bits in
5593 		 * there.
5594 		 */
5595 		sinfo->sinfo_flags &= 0x00ff;
5596 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5597 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5598 		}
5599 	}
5600 #ifdef SCTP_ASOCLOG_OF_TSNS
5601 	{
5602 		int index, newindex;
5603 		struct sctp_pcbtsn_rlog *entry;
5604 
5605 		do {
5606 			index = inp->readlog_index;
5607 			newindex = index + 1;
5608 			if (newindex >= SCTP_READ_LOG_SIZE) {
5609 				newindex = 0;
5610 			}
5611 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5612 		entry = &inp->readlog[index];
5613 		entry->vtag = control->sinfo_assoc_id;
5614 		entry->strm = control->sinfo_stream;
5615 		entry->seq = control->sinfo_ssn;
5616 		entry->sz = control->length;
5617 		entry->flgs = control->sinfo_flags;
5618 	}
5619 #endif
5620 	if ((fromlen > 0) && (from != NULL)) {
5621 		union sctp_sockstore store;
5622 		size_t len;
5623 
5624 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5625 #ifdef INET6
5626 		case AF_INET6:
5627 			len = sizeof(struct sockaddr_in6);
5628 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5629 			store.sin6.sin6_port = control->port_from;
5630 			break;
5631 #endif
5632 #ifdef INET
5633 		case AF_INET:
5634 #ifdef INET6
5635 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5636 				len = sizeof(struct sockaddr_in6);
5637 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5638 				    &store.sin6);
5639 				store.sin6.sin6_port = control->port_from;
5640 			} else {
5641 				len = sizeof(struct sockaddr_in);
5642 				store.sin = control->whoFrom->ro._l_addr.sin;
5643 				store.sin.sin_port = control->port_from;
5644 			}
5645 #else
5646 			len = sizeof(struct sockaddr_in);
5647 			store.sin = control->whoFrom->ro._l_addr.sin;
5648 			store.sin.sin_port = control->port_from;
5649 #endif
5650 			break;
5651 #endif
5652 		default:
5653 			len = 0;
5654 			break;
5655 		}
5656 		memcpy(from, &store, min((size_t)fromlen, len));
5657 #ifdef INET6
5658 		{
5659 			struct sockaddr_in6 lsa6, *from6;
5660 
5661 			from6 = (struct sockaddr_in6 *)from;
5662 			sctp_recover_scope_mac(from6, (&lsa6));
5663 		}
5664 #endif
5665 	}
5666 	/* now copy out what data we can */
5667 	if (mp == NULL) {
5668 		/* copy out each mbuf in the chain up to length */
5669 get_more_data:
5670 		m = control->data;
5671 		while (m) {
5672 			/* Move out all we can */
5673 			cp_len = (int)uio->uio_resid;
5674 			my_len = (int)SCTP_BUF_LEN(m);
5675 			if (cp_len > my_len) {
5676 				/* not enough in this buf */
5677 				cp_len = my_len;
5678 			}
5679 			if (hold_rlock) {
5680 				SCTP_INP_READ_UNLOCK(inp);
5681 				hold_rlock = 0;
5682 			}
5683 			if (cp_len > 0)
5684 				error = uiomove(mtod(m, char *), cp_len, uio);
5685 			/* re-read */
5686 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5687 				goto release;
5688 			}
5689 			if ((control->do_not_ref_stcb == 0) && stcb &&
5690 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5691 				no_rcv_needed = 1;
5692 			}
5693 			if (error) {
5694 				/* error we are out of here */
5695 				goto release;
5696 			}
5697 			SCTP_INP_READ_LOCK(inp);
5698 			hold_rlock = 1;
5699 			if (cp_len == SCTP_BUF_LEN(m)) {
5700 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5701 				    (control->end_added)) {
5702 					out_flags |= MSG_EOR;
5703 					if ((control->do_not_ref_stcb == 0) &&
5704 					    (control->stcb != NULL) &&
5705 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5706 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5707 				}
5708 				if (control->spec_flags & M_NOTIFICATION) {
5709 					out_flags |= MSG_NOTIFICATION;
5710 				}
5711 				/* we ate up the mbuf */
5712 				if (in_flags & MSG_PEEK) {
5713 					/* just looking */
5714 					m = SCTP_BUF_NEXT(m);
5715 					copied_so_far += cp_len;
5716 				} else {
5717 					/* dispose of the mbuf */
5718 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5719 						sctp_sblog(&so->so_rcv,
5720 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5721 					}
5722 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5723 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5724 						sctp_sblog(&so->so_rcv,
5725 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5726 					}
5727 					copied_so_far += cp_len;
5728 					freed_so_far += cp_len;
5729 					freed_so_far += MSIZE;
5730 					atomic_subtract_int(&control->length, cp_len);
5731 					control->data = sctp_m_free(m);
5732 					m = control->data;
5733 					/*
5734 					 * been through it all, must hold sb
5735 					 * lock ok to null tail
5736 					 */
5737 					if (control->data == NULL) {
5738 #ifdef INVARIANTS
5739 						if ((control->end_added == 0) ||
5740 						    (TAILQ_NEXT(control, next) == NULL)) {
5741 							/*
5742 							 * If the end is not
5743 							 * added, OR the
5744 							 * next is NOT null
5745 							 * we MUST have the
5746 							 * lock.
5747 							 */
5748 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5749 								panic("Hmm we don't own the lock?");
5750 							}
5751 						}
5752 #endif
5753 						control->tail_mbuf = NULL;
5754 #ifdef INVARIANTS
5755 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5756 							panic("end_added, nothing left and no MSG_EOR");
5757 						}
5758 #endif
5759 					}
5760 				}
5761 			} else {
5762 				/* Do we need to trim the mbuf? */
5763 				if (control->spec_flags & M_NOTIFICATION) {
5764 					out_flags |= MSG_NOTIFICATION;
5765 				}
5766 				if ((in_flags & MSG_PEEK) == 0) {
5767 					SCTP_BUF_RESV_UF(m, cp_len);
5768 					SCTP_BUF_LEN(m) -= cp_len;
5769 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5770 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5771 					}
5772 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5773 					if ((control->do_not_ref_stcb == 0) &&
5774 					    stcb) {
5775 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5776 					}
5777 					copied_so_far += cp_len;
5778 					freed_so_far += cp_len;
5779 					freed_so_far += MSIZE;
5780 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5781 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5782 						    SCTP_LOG_SBRESULT, 0);
5783 					}
5784 					atomic_subtract_int(&control->length, cp_len);
5785 				} else {
5786 					copied_so_far += cp_len;
5787 				}
5788 			}
5789 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5790 				break;
5791 			}
5792 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5793 			    (control->do_not_ref_stcb == 0) &&
5794 			    (freed_so_far >= rwnd_req)) {
5795 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5796 			}
5797 		}		/* end while(m) */
5798 		/*
5799 		 * At this point we have looked at it all and we either have
5800 		 * a MSG_EOR/or read all the user wants... <OR>
5801 		 * control->length == 0.
5802 		 */
5803 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5804 			/* we are done with this control */
5805 			if (control->length == 0) {
5806 				if (control->data) {
5807 #ifdef INVARIANTS
5808 					panic("control->data not null at read eor?");
5809 #else
5810 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5811 					sctp_m_freem(control->data);
5812 					control->data = NULL;
5813 #endif
5814 				}
5815 		done_with_control:
5816 				if (hold_rlock == 0) {
5817 					SCTP_INP_READ_LOCK(inp);
5818 					hold_rlock = 1;
5819 				}
5820 				TAILQ_REMOVE(&inp->read_queue, control, next);
5821 				/* Add back any hiddend data */
5822 				if (control->held_length) {
5823 					held_length = 0;
5824 					control->held_length = 0;
5825 					wakeup_read_socket = 1;
5826 				}
5827 				if (control->aux_data) {
5828 					sctp_m_free(control->aux_data);
5829 					control->aux_data = NULL;
5830 				}
5831 				no_rcv_needed = control->do_not_ref_stcb;
5832 				sctp_free_remote_addr(control->whoFrom);
5833 				control->data = NULL;
5834 #ifdef INVARIANTS
5835 				if (control->on_strm_q) {
5836 					panic("About to free ctl:%p so:%p and its in %d",
5837 					    control, so, control->on_strm_q);
5838 				}
5839 #endif
5840 				sctp_free_a_readq(stcb, control);
5841 				control = NULL;
5842 				if ((freed_so_far >= rwnd_req) &&
5843 				    (no_rcv_needed == 0))
5844 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5845 
5846 			} else {
5847 				/*
5848 				 * The user did not read all of this
5849 				 * message, turn off the returned MSG_EOR
5850 				 * since we are leaving more behind on the
5851 				 * control to read.
5852 				 */
5853 #ifdef INVARIANTS
5854 				if (control->end_added &&
5855 				    (control->data == NULL) &&
5856 				    (control->tail_mbuf == NULL)) {
5857 					panic("Gak, control->length is corrupt?");
5858 				}
5859 #endif
5860 				no_rcv_needed = control->do_not_ref_stcb;
5861 				out_flags &= ~MSG_EOR;
5862 			}
5863 		}
5864 		if (out_flags & MSG_EOR) {
5865 			goto release;
5866 		}
5867 		if ((uio->uio_resid == 0) ||
5868 		    ((in_eeor_mode) &&
5869 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5870 			goto release;
5871 		}
5872 		/*
5873 		 * If I hit here the receiver wants more and this message is
5874 		 * NOT done (pd-api). So two questions. Can we block? if not
5875 		 * we are done. Did the user NOT set MSG_WAITALL?
5876 		 */
5877 		if (block_allowed == 0) {
5878 			goto release;
5879 		}
5880 		/*
5881 		 * We need to wait for more data a few things: - We don't
5882 		 * sbunlock() so we don't get someone else reading. - We
5883 		 * must be sure to account for the case where what is added
5884 		 * is NOT to our control when we wakeup.
5885 		 */
5886 
5887 		/*
5888 		 * Do we need to tell the transport a rwnd update might be
5889 		 * needed before we go to sleep?
5890 		 */
5891 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5892 		    ((freed_so_far >= rwnd_req) &&
5893 		    (control->do_not_ref_stcb == 0) &&
5894 		    (no_rcv_needed == 0))) {
5895 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5896 		}
5897 wait_some_more:
5898 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5899 			goto release;
5900 		}
5901 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5902 			goto release;
5903 
5904 		if (hold_rlock == 1) {
5905 			SCTP_INP_READ_UNLOCK(inp);
5906 			hold_rlock = 0;
5907 		}
5908 		if (hold_sblock == 0) {
5909 			SOCKBUF_LOCK(&so->so_rcv);
5910 			hold_sblock = 1;
5911 		}
5912 		if ((copied_so_far) && (control->length == 0) &&
5913 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5914 			goto release;
5915 		}
5916 		if (so->so_rcv.sb_cc <= control->held_length) {
5917 			error = sbwait(&so->so_rcv);
5918 			if (error) {
5919 				goto release;
5920 			}
5921 			control->held_length = 0;
5922 		}
5923 		if (hold_sblock) {
5924 			SOCKBUF_UNLOCK(&so->so_rcv);
5925 			hold_sblock = 0;
5926 		}
5927 		if (control->length == 0) {
5928 			/* still nothing here */
5929 			if (control->end_added == 1) {
5930 				/* he aborted, or is done i.e.did a shutdown */
5931 				out_flags |= MSG_EOR;
5932 				if (control->pdapi_aborted) {
5933 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5934 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5935 
5936 					out_flags |= MSG_TRUNC;
5937 				} else {
5938 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5939 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5940 				}
5941 				goto done_with_control;
5942 			}
5943 			if (so->so_rcv.sb_cc > held_length) {
5944 				control->held_length = so->so_rcv.sb_cc;
5945 				held_length = 0;
5946 			}
5947 			goto wait_some_more;
5948 		} else if (control->data == NULL) {
5949 			/*
5950 			 * we must re-sync since data is probably being
5951 			 * added
5952 			 */
5953 			SCTP_INP_READ_LOCK(inp);
5954 			if ((control->length > 0) && (control->data == NULL)) {
5955 				/*
5956 				 * big trouble.. we have the lock and its
5957 				 * corrupt?
5958 				 */
5959 #ifdef INVARIANTS
5960 				panic("Impossible data==NULL length !=0");
5961 #endif
5962 				out_flags |= MSG_EOR;
5963 				out_flags |= MSG_TRUNC;
5964 				control->length = 0;
5965 				SCTP_INP_READ_UNLOCK(inp);
5966 				goto done_with_control;
5967 			}
5968 			SCTP_INP_READ_UNLOCK(inp);
5969 			/* We will fall around to get more data */
5970 		}
5971 		goto get_more_data;
5972 	} else {
5973 		/*-
5974 		 * Give caller back the mbuf chain,
5975 		 * store in uio_resid the length
5976 		 */
5977 		wakeup_read_socket = 0;
5978 		if ((control->end_added == 0) ||
5979 		    (TAILQ_NEXT(control, next) == NULL)) {
5980 			/* Need to get rlock */
5981 			if (hold_rlock == 0) {
5982 				SCTP_INP_READ_LOCK(inp);
5983 				hold_rlock = 1;
5984 			}
5985 		}
5986 		if (control->end_added) {
5987 			out_flags |= MSG_EOR;
5988 			if ((control->do_not_ref_stcb == 0) &&
5989 			    (control->stcb != NULL) &&
5990 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5991 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5992 		}
5993 		if (control->spec_flags & M_NOTIFICATION) {
5994 			out_flags |= MSG_NOTIFICATION;
5995 		}
5996 		uio->uio_resid = control->length;
5997 		*mp = control->data;
5998 		m = control->data;
5999 		while (m) {
6000 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6001 				sctp_sblog(&so->so_rcv,
6002 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6003 			}
6004 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6005 			freed_so_far += SCTP_BUF_LEN(m);
6006 			freed_so_far += MSIZE;
6007 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6008 				sctp_sblog(&so->so_rcv,
6009 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6010 			}
6011 			m = SCTP_BUF_NEXT(m);
6012 		}
6013 		control->data = control->tail_mbuf = NULL;
6014 		control->length = 0;
6015 		if (out_flags & MSG_EOR) {
6016 			/* Done with this control */
6017 			goto done_with_control;
6018 		}
6019 	}
6020 release:
6021 	if (hold_rlock == 1) {
6022 		SCTP_INP_READ_UNLOCK(inp);
6023 		hold_rlock = 0;
6024 	}
6025 	if (hold_sblock == 1) {
6026 		SOCKBUF_UNLOCK(&so->so_rcv);
6027 		hold_sblock = 0;
6028 	}
6029 	sbunlock(&so->so_rcv);
6030 	sockbuf_lock = 0;
6031 
6032 release_unlocked:
6033 	if (hold_sblock) {
6034 		SOCKBUF_UNLOCK(&so->so_rcv);
6035 		hold_sblock = 0;
6036 	}
6037 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6038 		if ((freed_so_far >= rwnd_req) &&
6039 		    (control && (control->do_not_ref_stcb == 0)) &&
6040 		    (no_rcv_needed == 0))
6041 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6042 	}
6043 out:
6044 	if (msg_flags) {
6045 		*msg_flags = out_flags;
6046 	}
6047 	if (((out_flags & MSG_EOR) == 0) &&
6048 	    ((in_flags & MSG_PEEK) == 0) &&
6049 	    (sinfo) &&
6050 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6051 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6052 		struct sctp_extrcvinfo *s_extra;
6053 
6054 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6055 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6056 	}
6057 	if (hold_rlock == 1) {
6058 		SCTP_INP_READ_UNLOCK(inp);
6059 	}
6060 	if (hold_sblock) {
6061 		SOCKBUF_UNLOCK(&so->so_rcv);
6062 	}
6063 	if (sockbuf_lock) {
6064 		sbunlock(&so->so_rcv);
6065 	}
6066 	if (freecnt_applied) {
6067 		/*
6068 		 * The lock on the socket buffer protects us so the free
6069 		 * code will stop. But since we used the socketbuf lock and
6070 		 * the sender uses the tcb_lock to increment, we need to use
6071 		 * the atomic add to the refcnt.
6072 		 */
6073 		if (stcb == NULL) {
6074 #ifdef INVARIANTS
6075 			panic("stcb for refcnt has gone NULL?");
6076 			goto stage_left;
6077 #else
6078 			goto stage_left;
6079 #endif
6080 		}
6081 		atomic_add_int(&stcb->asoc.refcnt, -1);
6082 		/* Save the value back for next time */
6083 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6084 	}
6085 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6086 		if (stcb) {
6087 			sctp_misc_ints(SCTP_SORECV_DONE,
6088 			    freed_so_far,
6089 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6090 			    stcb->asoc.my_rwnd,
6091 			    so->so_rcv.sb_cc);
6092 		} else {
6093 			sctp_misc_ints(SCTP_SORECV_DONE,
6094 			    freed_so_far,
6095 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6096 			    0,
6097 			    so->so_rcv.sb_cc);
6098 		}
6099 	}
6100 stage_left:
6101 	if (wakeup_read_socket) {
6102 		sctp_sorwakeup(inp, so);
6103 	}
6104 	return (error);
6105 }
6106 
6107 
6108 #ifdef SCTP_MBUF_LOGGING
6109 struct mbuf *
6110 sctp_m_free(struct mbuf *m)
6111 {
6112 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6113 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6114 	}
6115 	return (m_free(m));
6116 }
6117 
6118 void
6119 sctp_m_freem(struct mbuf *mb)
6120 {
6121 	while (mb != NULL)
6122 		mb = sctp_m_free(mb);
6123 }
6124 
6125 #endif
6126 
6127 int
6128 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6129 {
6130 	/*
6131 	 * Given a local address. For all associations that holds the
6132 	 * address, request a peer-set-primary.
6133 	 */
6134 	struct sctp_ifa *ifa;
6135 	struct sctp_laddr *wi;
6136 
6137 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6138 	if (ifa == NULL) {
6139 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6140 		return (EADDRNOTAVAIL);
6141 	}
6142 	/*
6143 	 * Now that we have the ifa we must awaken the iterator with this
6144 	 * message.
6145 	 */
6146 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6147 	if (wi == NULL) {
6148 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6149 		return (ENOMEM);
6150 	}
6151 	/* Now incr the count and int wi structure */
6152 	SCTP_INCR_LADDR_COUNT();
6153 	bzero(wi, sizeof(*wi));
6154 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6155 	wi->ifa = ifa;
6156 	wi->action = SCTP_SET_PRIM_ADDR;
6157 	atomic_add_int(&ifa->refcount, 1);
6158 
6159 	/* Now add it to the work queue */
6160 	SCTP_WQ_ADDR_LOCK();
6161 	/*
6162 	 * Should this really be a tailq? As it is we will process the
6163 	 * newest first :-0
6164 	 */
6165 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6166 	SCTP_WQ_ADDR_UNLOCK();
6167 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6168 	    (struct sctp_inpcb *)NULL,
6169 	    (struct sctp_tcb *)NULL,
6170 	    (struct sctp_nets *)NULL);
6171 	return (0);
6172 }
6173 
6174 
6175 int
6176 sctp_soreceive(struct socket *so,
6177     struct sockaddr **psa,
6178     struct uio *uio,
6179     struct mbuf **mp0,
6180     struct mbuf **controlp,
6181     int *flagsp)
6182 {
6183 	int error, fromlen;
6184 	uint8_t sockbuf[256];
6185 	struct sockaddr *from;
6186 	struct sctp_extrcvinfo sinfo;
6187 	int filling_sinfo = 1;
6188 	struct sctp_inpcb *inp;
6189 
6190 	inp = (struct sctp_inpcb *)so->so_pcb;
6191 	/* pickup the assoc we are reading from */
6192 	if (inp == NULL) {
6193 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6194 		return (EINVAL);
6195 	}
6196 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6197 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6198 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6199 	    (controlp == NULL)) {
6200 		/* user does not want the sndrcv ctl */
6201 		filling_sinfo = 0;
6202 	}
6203 	if (psa) {
6204 		from = (struct sockaddr *)sockbuf;
6205 		fromlen = sizeof(sockbuf);
6206 		from->sa_len = 0;
6207 	} else {
6208 		from = NULL;
6209 		fromlen = 0;
6210 	}
6211 
6212 	if (filling_sinfo) {
6213 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6214 	}
6215 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6216 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6217 	if (controlp != NULL) {
6218 		/* copy back the sinfo in a CMSG format */
6219 		if (filling_sinfo)
6220 			*controlp = sctp_build_ctl_nchunk(inp,
6221 			    (struct sctp_sndrcvinfo *)&sinfo);
6222 		else
6223 			*controlp = NULL;
6224 	}
6225 	if (psa) {
6226 		/* copy back the address info */
6227 		if (from && from->sa_len) {
6228 			*psa = sodupsockaddr(from, M_NOWAIT);
6229 		} else {
6230 			*psa = NULL;
6231 		}
6232 	}
6233 	return (error);
6234 }
6235 
6236 
6237 
6238 
6239 
6240 int
6241 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6242     int totaddr, int *error)
6243 {
6244 	int added = 0;
6245 	int i;
6246 	struct sctp_inpcb *inp;
6247 	struct sockaddr *sa;
6248 	size_t incr = 0;
6249 
6250 #ifdef INET
6251 	struct sockaddr_in *sin;
6252 
6253 #endif
6254 #ifdef INET6
6255 	struct sockaddr_in6 *sin6;
6256 
6257 #endif
6258 
6259 	sa = addr;
6260 	inp = stcb->sctp_ep;
6261 	*error = 0;
6262 	for (i = 0; i < totaddr; i++) {
6263 		switch (sa->sa_family) {
6264 #ifdef INET
6265 		case AF_INET:
6266 			incr = sizeof(struct sockaddr_in);
6267 			sin = (struct sockaddr_in *)sa;
6268 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6269 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6270 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6271 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6272 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6273 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6274 				*error = EINVAL;
6275 				goto out_now;
6276 			}
6277 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6278 				/* assoc gone no un-lock */
6279 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6280 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6281 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6282 				*error = ENOBUFS;
6283 				goto out_now;
6284 			}
6285 			added++;
6286 			break;
6287 #endif
6288 #ifdef INET6
6289 		case AF_INET6:
6290 			incr = sizeof(struct sockaddr_in6);
6291 			sin6 = (struct sockaddr_in6 *)sa;
6292 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6293 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6294 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6295 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6296 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6297 				*error = EINVAL;
6298 				goto out_now;
6299 			}
6300 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6301 				/* assoc gone no un-lock */
6302 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6303 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6304 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6305 				*error = ENOBUFS;
6306 				goto out_now;
6307 			}
6308 			added++;
6309 			break;
6310 #endif
6311 		default:
6312 			break;
6313 		}
6314 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6315 	}
6316 out_now:
6317 	return (added);
6318 }
6319 
6320 struct sctp_tcb *
6321 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6322     unsigned int *totaddr,
6323     unsigned int *num_v4, unsigned int *num_v6, int *error,
6324     unsigned int limit, int *bad_addr)
6325 {
6326 	struct sockaddr *sa;
6327 	struct sctp_tcb *stcb = NULL;
6328 	unsigned int incr, at, i;
6329 
6330 	at = incr = 0;
6331 	sa = addr;
6332 	*error = *num_v6 = *num_v4 = 0;
6333 	/* account and validate addresses */
6334 	for (i = 0; i < *totaddr; i++) {
6335 		switch (sa->sa_family) {
6336 #ifdef INET
6337 		case AF_INET:
6338 			if (sa->sa_len != incr) {
6339 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6340 				*error = EINVAL;
6341 				*bad_addr = 1;
6342 				return (NULL);
6343 			}
6344 			(*num_v4) += 1;
6345 			incr = (unsigned int)sizeof(struct sockaddr_in);
6346 			break;
6347 #endif
6348 #ifdef INET6
6349 		case AF_INET6:
6350 			{
6351 				struct sockaddr_in6 *sin6;
6352 
6353 				sin6 = (struct sockaddr_in6 *)sa;
6354 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6355 					/* Must be non-mapped for connectx */
6356 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6357 					*error = EINVAL;
6358 					*bad_addr = 1;
6359 					return (NULL);
6360 				}
6361 				if (sa->sa_len != incr) {
6362 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6363 					*error = EINVAL;
6364 					*bad_addr = 1;
6365 					return (NULL);
6366 				}
6367 				(*num_v6) += 1;
6368 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6369 				break;
6370 			}
6371 #endif
6372 		default:
6373 			*totaddr = i;
6374 			/* we are done */
6375 			break;
6376 		}
6377 		if (i == *totaddr) {
6378 			break;
6379 		}
6380 		SCTP_INP_INCR_REF(inp);
6381 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6382 		if (stcb != NULL) {
6383 			/* Already have or am bring up an association */
6384 			return (stcb);
6385 		} else {
6386 			SCTP_INP_DECR_REF(inp);
6387 		}
6388 		if ((at + incr) > limit) {
6389 			*totaddr = i;
6390 			break;
6391 		}
6392 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6393 	}
6394 	return ((struct sctp_tcb *)NULL);
6395 }
6396 
6397 /*
6398  * sctp_bindx(ADD) for one address.
6399  * assumes all arguments are valid/checked by caller.
6400  */
6401 void
6402 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6403     struct sockaddr *sa, sctp_assoc_t assoc_id,
6404     uint32_t vrf_id, int *error, void *p)
6405 {
6406 	struct sockaddr *addr_touse;
6407 
6408 #if defined(INET) && defined(INET6)
6409 	struct sockaddr_in sin;
6410 
6411 #endif
6412 
6413 	/* see if we're bound all already! */
6414 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6415 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 		*error = EINVAL;
6417 		return;
6418 	}
6419 	addr_touse = sa;
6420 #ifdef INET6
6421 	if (sa->sa_family == AF_INET6) {
6422 #ifdef INET
6423 		struct sockaddr_in6 *sin6;
6424 
6425 #endif
6426 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6427 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6428 			*error = EINVAL;
6429 			return;
6430 		}
6431 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6432 			/* can only bind v6 on PF_INET6 sockets */
6433 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6434 			*error = EINVAL;
6435 			return;
6436 		}
6437 #ifdef INET
6438 		sin6 = (struct sockaddr_in6 *)addr_touse;
6439 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6440 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6441 			    SCTP_IPV6_V6ONLY(inp)) {
6442 				/* can't bind v4-mapped on PF_INET sockets */
6443 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 				*error = EINVAL;
6445 				return;
6446 			}
6447 			in6_sin6_2_sin(&sin, sin6);
6448 			addr_touse = (struct sockaddr *)&sin;
6449 		}
6450 #endif
6451 	}
6452 #endif
6453 #ifdef INET
6454 	if (sa->sa_family == AF_INET) {
6455 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6456 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6457 			*error = EINVAL;
6458 			return;
6459 		}
6460 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6461 		    SCTP_IPV6_V6ONLY(inp)) {
6462 			/* can't bind v4 on PF_INET sockets */
6463 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6464 			*error = EINVAL;
6465 			return;
6466 		}
6467 	}
6468 #endif
6469 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6470 		if (p == NULL) {
6471 			/* Can't get proc for Net/Open BSD */
6472 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473 			*error = EINVAL;
6474 			return;
6475 		}
6476 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6477 		return;
6478 	}
6479 	/*
6480 	 * No locks required here since bind and mgmt_ep_sa all do their own
6481 	 * locking. If we do something for the FIX: below we may need to
6482 	 * lock in that case.
6483 	 */
6484 	if (assoc_id == 0) {
6485 		/* add the address */
6486 		struct sctp_inpcb *lep;
6487 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6488 
6489 		/* validate the incoming port */
6490 		if ((lsin->sin_port != 0) &&
6491 		    (lsin->sin_port != inp->sctp_lport)) {
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		} else {
6496 			/* user specified 0 port, set it to existing port */
6497 			lsin->sin_port = inp->sctp_lport;
6498 		}
6499 
6500 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6501 		if (lep != NULL) {
6502 			/*
6503 			 * We must decrement the refcount since we have the
6504 			 * ep already and are binding. No remove going on
6505 			 * here.
6506 			 */
6507 			SCTP_INP_DECR_REF(lep);
6508 		}
6509 		if (lep == inp) {
6510 			/* already bound to it.. ok */
6511 			return;
6512 		} else if (lep == NULL) {
6513 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6514 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6515 			    SCTP_ADD_IP_ADDRESS,
6516 			    vrf_id, NULL);
6517 		} else {
6518 			*error = EADDRINUSE;
6519 		}
6520 		if (*error)
6521 			return;
6522 	} else {
6523 		/*
6524 		 * FIX: decide whether we allow assoc based bindx
6525 		 */
6526 	}
6527 }
6528 
6529 /*
6530  * sctp_bindx(DELETE) for one address.
6531  * assumes all arguments are valid/checked by caller.
6532  */
6533 void
6534 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6535     struct sockaddr *sa, sctp_assoc_t assoc_id,
6536     uint32_t vrf_id, int *error)
6537 {
6538 	struct sockaddr *addr_touse;
6539 
6540 #if defined(INET) && defined(INET6)
6541 	struct sockaddr_in sin;
6542 
6543 #endif
6544 
6545 	/* see if we're bound all already! */
6546 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6547 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 		*error = EINVAL;
6549 		return;
6550 	}
6551 	addr_touse = sa;
6552 #ifdef INET6
6553 	if (sa->sa_family == AF_INET6) {
6554 #ifdef INET
6555 		struct sockaddr_in6 *sin6;
6556 
6557 #endif
6558 
6559 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6560 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6561 			*error = EINVAL;
6562 			return;
6563 		}
6564 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6565 			/* can only bind v6 on PF_INET6 sockets */
6566 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567 			*error = EINVAL;
6568 			return;
6569 		}
6570 #ifdef INET
6571 		sin6 = (struct sockaddr_in6 *)addr_touse;
6572 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6573 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6574 			    SCTP_IPV6_V6ONLY(inp)) {
6575 				/* can't bind mapped-v4 on PF_INET sockets */
6576 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 				*error = EINVAL;
6578 				return;
6579 			}
6580 			in6_sin6_2_sin(&sin, sin6);
6581 			addr_touse = (struct sockaddr *)&sin;
6582 		}
6583 #endif
6584 	}
6585 #endif
6586 #ifdef INET
6587 	if (sa->sa_family == AF_INET) {
6588 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6594 		    SCTP_IPV6_V6ONLY(inp)) {
6595 			/* can't bind v4 on PF_INET sockets */
6596 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 			*error = EINVAL;
6598 			return;
6599 		}
6600 	}
6601 #endif
6602 	/*
6603 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6604 	 * below is ever changed we may need to lock before calling
6605 	 * association level binding.
6606 	 */
6607 	if (assoc_id == 0) {
6608 		/* delete the address */
6609 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6610 		    SCTP_DEL_IP_ADDRESS,
6611 		    vrf_id, NULL);
6612 	} else {
6613 		/*
6614 		 * FIX: decide whether we allow assoc based bindx
6615 		 */
6616 	}
6617 }
6618 
6619 /*
6620  * returns the valid local address count for an assoc, taking into account
6621  * all scoping rules
6622  */
6623 int
6624 sctp_local_addr_count(struct sctp_tcb *stcb)
6625 {
6626 	int loopback_scope;
6627 
6628 #if defined(INET)
6629 	int ipv4_local_scope, ipv4_addr_legal;
6630 
6631 #endif
6632 #if defined (INET6)
6633 	int local_scope, site_scope, ipv6_addr_legal;
6634 
6635 #endif
6636 	struct sctp_vrf *vrf;
6637 	struct sctp_ifn *sctp_ifn;
6638 	struct sctp_ifa *sctp_ifa;
6639 	int count = 0;
6640 
6641 	/* Turn on all the appropriate scopes */
6642 	loopback_scope = stcb->asoc.scope.loopback_scope;
6643 #if defined(INET)
6644 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6645 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6646 #endif
6647 #if defined(INET6)
6648 	local_scope = stcb->asoc.scope.local_scope;
6649 	site_scope = stcb->asoc.scope.site_scope;
6650 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6651 #endif
6652 	SCTP_IPI_ADDR_RLOCK();
6653 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6654 	if (vrf == NULL) {
6655 		/* no vrf, no addresses */
6656 		SCTP_IPI_ADDR_RUNLOCK();
6657 		return (0);
6658 	}
6659 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6660 		/*
6661 		 * bound all case: go through all ifns on the vrf
6662 		 */
6663 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6664 			if ((loopback_scope == 0) &&
6665 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6666 				continue;
6667 			}
6668 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6669 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6670 					continue;
6671 				switch (sctp_ifa->address.sa.sa_family) {
6672 #ifdef INET
6673 				case AF_INET:
6674 					if (ipv4_addr_legal) {
6675 						struct sockaddr_in *sin;
6676 
6677 						sin = &sctp_ifa->address.sin;
6678 						if (sin->sin_addr.s_addr == 0) {
6679 							/*
6680 							 * skip unspecified
6681 							 * addrs
6682 							 */
6683 							continue;
6684 						}
6685 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6686 						    &sin->sin_addr) != 0) {
6687 							continue;
6688 						}
6689 						if ((ipv4_local_scope == 0) &&
6690 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6691 							continue;
6692 						}
6693 						/* count this one */
6694 						count++;
6695 					} else {
6696 						continue;
6697 					}
6698 					break;
6699 #endif
6700 #ifdef INET6
6701 				case AF_INET6:
6702 					if (ipv6_addr_legal) {
6703 						struct sockaddr_in6 *sin6;
6704 
6705 						sin6 = &sctp_ifa->address.sin6;
6706 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6707 							continue;
6708 						}
6709 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6710 						    &sin6->sin6_addr) != 0) {
6711 							continue;
6712 						}
6713 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6714 							if (local_scope == 0)
6715 								continue;
6716 							if (sin6->sin6_scope_id == 0) {
6717 								if (sa6_recoverscope(sin6) != 0)
6718 									/*
6719 									 *
6720 									 * bad
6721 									 *
6722 									 * li
6723 									 * nk
6724 									 *
6725 									 * loc
6726 									 * al
6727 									 *
6728 									 * add
6729 									 * re
6730 									 * ss
6731 									 * */
6732 									continue;
6733 							}
6734 						}
6735 						if ((site_scope == 0) &&
6736 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6737 							continue;
6738 						}
6739 						/* count this one */
6740 						count++;
6741 					}
6742 					break;
6743 #endif
6744 				default:
6745 					/* TSNH */
6746 					break;
6747 				}
6748 			}
6749 		}
6750 	} else {
6751 		/*
6752 		 * subset bound case
6753 		 */
6754 		struct sctp_laddr *laddr;
6755 
6756 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6757 		    sctp_nxt_addr) {
6758 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6759 				continue;
6760 			}
6761 			/* count this one */
6762 			count++;
6763 		}
6764 	}
6765 	SCTP_IPI_ADDR_RUNLOCK();
6766 	return (count);
6767 }
6768 
6769 #if defined(SCTP_LOCAL_TRACE_BUF)
6770 
6771 void
6772 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6773 {
6774 	uint32_t saveindex, newindex;
6775 
6776 	do {
6777 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6778 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6779 			newindex = 1;
6780 		} else {
6781 			newindex = saveindex + 1;
6782 		}
6783 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6784 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6785 		saveindex = 0;
6786 	}
6787 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6788 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6789 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6790 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6791 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6792 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6793 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6794 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6795 }
6796 
6797 #endif
6798 static void
6799 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6800     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6801 {
6802 	struct ip *iph;
6803 
6804 #ifdef INET6
6805 	struct ip6_hdr *ip6;
6806 
6807 #endif
6808 	struct mbuf *sp, *last;
6809 	struct udphdr *uhdr;
6810 	uint16_t port;
6811 
6812 	if ((m->m_flags & M_PKTHDR) == 0) {
6813 		/* Can't handle one that is not a pkt hdr */
6814 		goto out;
6815 	}
6816 	/* Pull the src port */
6817 	iph = mtod(m, struct ip *);
6818 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6819 	port = uhdr->uh_sport;
6820 	/*
6821 	 * Split out the mbuf chain. Leave the IP header in m, place the
6822 	 * rest in the sp.
6823 	 */
6824 	sp = m_split(m, off, M_NOWAIT);
6825 	if (sp == NULL) {
6826 		/* Gak, drop packet, we can't do a split */
6827 		goto out;
6828 	}
6829 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6830 		/* Gak, packet can't have an SCTP header in it - too small */
6831 		m_freem(sp);
6832 		goto out;
6833 	}
6834 	/* Now pull up the UDP header and SCTP header together */
6835 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6836 	if (sp == NULL) {
6837 		/* Gak pullup failed */
6838 		goto out;
6839 	}
6840 	/* Trim out the UDP header */
6841 	m_adj(sp, sizeof(struct udphdr));
6842 
6843 	/* Now reconstruct the mbuf chain */
6844 	for (last = m; last->m_next; last = last->m_next);
6845 	last->m_next = sp;
6846 	m->m_pkthdr.len += sp->m_pkthdr.len;
6847 	/*
6848 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6849 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6850 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6851 	 * SCTP checksum. Therefore, clear the bit.
6852 	 */
6853 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6854 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6855 	    m->m_pkthdr.len,
6856 	    if_name(m->m_pkthdr.rcvif),
6857 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6858 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6859 	iph = mtod(m, struct ip *);
6860 	switch (iph->ip_v) {
6861 #ifdef INET
6862 	case IPVERSION:
6863 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6864 		sctp_input_with_port(m, off, port);
6865 		break;
6866 #endif
6867 #ifdef INET6
6868 	case IPV6_VERSION >> 4:
6869 		ip6 = mtod(m, struct ip6_hdr *);
6870 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6871 		sctp6_input_with_port(&m, &off, port);
6872 		break;
6873 #endif
6874 	default:
6875 		goto out;
6876 		break;
6877 	}
6878 	return;
6879 out:
6880 	m_freem(m);
6881 }
6882 
6883 void
6884 sctp_over_udp_stop(void)
6885 {
6886 	/*
6887 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6888 	 * for writting!
6889 	 */
6890 #ifdef INET
6891 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6892 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6893 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6894 	}
6895 #endif
6896 #ifdef INET6
6897 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6898 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6899 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6900 	}
6901 #endif
6902 }
6903 
6904 int
6905 sctp_over_udp_start(void)
6906 {
6907 	uint16_t port;
6908 	int ret;
6909 
6910 #ifdef INET
6911 	struct sockaddr_in sin;
6912 
6913 #endif
6914 #ifdef INET6
6915 	struct sockaddr_in6 sin6;
6916 
6917 #endif
6918 	/*
6919 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6920 	 * for writting!
6921 	 */
6922 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6923 	if (ntohs(port) == 0) {
6924 		/* Must have a port set */
6925 		return (EINVAL);
6926 	}
6927 #ifdef INET
6928 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6929 		/* Already running -- must stop first */
6930 		return (EALREADY);
6931 	}
6932 #endif
6933 #ifdef INET6
6934 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6935 		/* Already running -- must stop first */
6936 		return (EALREADY);
6937 	}
6938 #endif
6939 #ifdef INET
6940 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6941 	    SOCK_DGRAM, IPPROTO_UDP,
6942 	    curthread->td_ucred, curthread))) {
6943 		sctp_over_udp_stop();
6944 		return (ret);
6945 	}
6946 	/* Call the special UDP hook. */
6947 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6948 	    sctp_recv_udp_tunneled_packet, NULL))) {
6949 		sctp_over_udp_stop();
6950 		return (ret);
6951 	}
6952 	/* Ok, we have a socket, bind it to the port. */
6953 	memset(&sin, 0, sizeof(struct sockaddr_in));
6954 	sin.sin_len = sizeof(struct sockaddr_in);
6955 	sin.sin_family = AF_INET;
6956 	sin.sin_port = htons(port);
6957 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6958 	    (struct sockaddr *)&sin, curthread))) {
6959 		sctp_over_udp_stop();
6960 		return (ret);
6961 	}
6962 #endif
6963 #ifdef INET6
6964 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6965 	    SOCK_DGRAM, IPPROTO_UDP,
6966 	    curthread->td_ucred, curthread))) {
6967 		sctp_over_udp_stop();
6968 		return (ret);
6969 	}
6970 	/* Call the special UDP hook. */
6971 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6972 	    sctp_recv_udp_tunneled_packet, NULL))) {
6973 		sctp_over_udp_stop();
6974 		return (ret);
6975 	}
6976 	/* Ok, we have a socket, bind it to the port. */
6977 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
6978 	sin6.sin6_len = sizeof(struct sockaddr_in6);
6979 	sin6.sin6_family = AF_INET6;
6980 	sin6.sin6_port = htons(port);
6981 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
6982 	    (struct sockaddr *)&sin6, curthread))) {
6983 		sctp_over_udp_stop();
6984 		return (ret);
6985 	}
6986 #endif
6987 	return (0);
6988 }
6989