xref: /freebsd/sys/netinet/sctputil.c (revision 8ef24a0d4b28fe230e20637f56869cc4148cd2ca)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 #ifdef INET6
56 #include <netinet/icmp6.h>
57 #endif
58 
59 
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 
64 extern const struct sctp_cc_functions sctp_cc_functions[];
65 extern const struct sctp_ss_functions sctp_ss_functions[];
66 
67 void
68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->rtt / 1000;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 }
128 
129 void
130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
131 {
132 	struct sctp_cwnd_log sctp_clog;
133 
134 	sctp_clog.x.strlog.stcb = stcb;
135 	sctp_clog.x.strlog.n_tsn = tsn;
136 	sctp_clog.x.strlog.n_sseq = sseq;
137 	sctp_clog.x.strlog.e_tsn = 0;
138 	sctp_clog.x.strlog.e_sseq = 0;
139 	sctp_clog.x.strlog.strm = stream;
140 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
141 	    SCTP_LOG_EVENT_STRM,
142 	    from,
143 	    sctp_clog.x.misc.log1,
144 	    sctp_clog.x.misc.log2,
145 	    sctp_clog.x.misc.log3,
146 	    sctp_clog.x.misc.log4);
147 }
148 
149 void
150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
151 {
152 	struct sctp_cwnd_log sctp_clog;
153 
154 	sctp_clog.x.nagle.stcb = (void *)stcb;
155 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
156 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
157 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
158 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
159 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
160 	    SCTP_LOG_EVENT_NAGLE,
161 	    action,
162 	    sctp_clog.x.misc.log1,
163 	    sctp_clog.x.misc.log2,
164 	    sctp_clog.x.misc.log3,
165 	    sctp_clog.x.misc.log4);
166 }
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 }
222 
223 #ifdef SCTP_MBUF_LOGGING
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 void
250 sctp_log_mbc(struct mbuf *m, int from)
251 {
252 	struct mbuf *mat;
253 
254 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
255 		sctp_log_mb(mat, from);
256 	}
257 }
258 
259 #endif
260 
261 void
262 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
263 {
264 	struct sctp_cwnd_log sctp_clog;
265 
266 	if (control == NULL) {
267 		SCTP_PRINTF("Gak log of NULL?\n");
268 		return;
269 	}
270 	sctp_clog.x.strlog.stcb = control->stcb;
271 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
272 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
273 	sctp_clog.x.strlog.strm = control->sinfo_stream;
274 	if (poschk != NULL) {
275 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
276 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
277 	} else {
278 		sctp_clog.x.strlog.e_tsn = 0;
279 		sctp_clog.x.strlog.e_sseq = 0;
280 	}
281 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
282 	    SCTP_LOG_EVENT_STRM,
283 	    from,
284 	    sctp_clog.x.misc.log1,
285 	    sctp_clog.x.misc.log2,
286 	    sctp_clog.x.misc.log3,
287 	    sctp_clog.x.misc.log4);
288 }
289 
290 void
291 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
292 {
293 	struct sctp_cwnd_log sctp_clog;
294 
295 	sctp_clog.x.cwnd.net = net;
296 	if (stcb->asoc.send_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_send = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
300 	if (stcb->asoc.stream_queue_cnt > 255)
301 		sctp_clog.x.cwnd.cnt_in_str = 255;
302 	else
303 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
304 
305 	if (net) {
306 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
307 		sctp_clog.x.cwnd.inflight = net->flight_size;
308 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
310 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
311 	}
312 	if (SCTP_CWNDLOG_PRESEND == from) {
313 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
314 	}
315 	sctp_clog.x.cwnd.cwnd_augment = augment;
316 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
317 	    SCTP_LOG_EVENT_CWND,
318 	    from,
319 	    sctp_clog.x.misc.log1,
320 	    sctp_clog.x.misc.log2,
321 	    sctp_clog.x.misc.log3,
322 	    sctp_clog.x.misc.log4);
323 }
324 
325 void
326 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
327 {
328 	struct sctp_cwnd_log sctp_clog;
329 
330 	memset(&sctp_clog, 0, sizeof(sctp_clog));
331 	if (inp) {
332 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
333 
334 	} else {
335 		sctp_clog.x.lock.sock = (void *)NULL;
336 	}
337 	sctp_clog.x.lock.inp = (void *)inp;
338 	if (stcb) {
339 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
340 	} else {
341 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
342 	}
343 	if (inp) {
344 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
345 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
346 	} else {
347 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
349 	}
350 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
351 	if (inp && (inp->sctp_socket)) {
352 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
353 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
354 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
355 	} else {
356 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
357 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
358 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
359 	}
360 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
361 	    SCTP_LOG_LOCK_EVENT,
362 	    from,
363 	    sctp_clog.x.misc.log1,
364 	    sctp_clog.x.misc.log2,
365 	    sctp_clog.x.misc.log3,
366 	    sctp_clog.x.misc.log4);
367 }
368 
369 void
370 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
371 {
372 	struct sctp_cwnd_log sctp_clog;
373 
374 	memset(&sctp_clog, 0, sizeof(sctp_clog));
375 	sctp_clog.x.cwnd.net = net;
376 	sctp_clog.x.cwnd.cwnd_new_value = error;
377 	sctp_clog.x.cwnd.inflight = net->flight_size;
378 	sctp_clog.x.cwnd.cwnd_augment = burst;
379 	if (stcb->asoc.send_queue_cnt > 255)
380 		sctp_clog.x.cwnd.cnt_in_send = 255;
381 	else
382 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
383 	if (stcb->asoc.stream_queue_cnt > 255)
384 		sctp_clog.x.cwnd.cnt_in_str = 255;
385 	else
386 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
387 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
388 	    SCTP_LOG_EVENT_MAXBURST,
389 	    from,
390 	    sctp_clog.x.misc.log1,
391 	    sctp_clog.x.misc.log2,
392 	    sctp_clog.x.misc.log3,
393 	    sctp_clog.x.misc.log4);
394 }
395 
396 void
397 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
398 {
399 	struct sctp_cwnd_log sctp_clog;
400 
401 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
402 	sctp_clog.x.rwnd.send_size = snd_size;
403 	sctp_clog.x.rwnd.overhead = overhead;
404 	sctp_clog.x.rwnd.new_rwnd = 0;
405 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
406 	    SCTP_LOG_EVENT_RWND,
407 	    from,
408 	    sctp_clog.x.misc.log1,
409 	    sctp_clog.x.misc.log2,
410 	    sctp_clog.x.misc.log3,
411 	    sctp_clog.x.misc.log4);
412 }
413 
414 void
415 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
416 {
417 	struct sctp_cwnd_log sctp_clog;
418 
419 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
420 	sctp_clog.x.rwnd.send_size = flight_size;
421 	sctp_clog.x.rwnd.overhead = overhead;
422 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
423 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
424 	    SCTP_LOG_EVENT_RWND,
425 	    from,
426 	    sctp_clog.x.misc.log1,
427 	    sctp_clog.x.misc.log2,
428 	    sctp_clog.x.misc.log3,
429 	    sctp_clog.x.misc.log4);
430 }
431 
432 #ifdef SCTP_MBCNT_LOGGING
433 static void
434 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
435 {
436 	struct sctp_cwnd_log sctp_clog;
437 
438 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
439 	sctp_clog.x.mbcnt.size_change = book;
440 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
441 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
442 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
443 	    SCTP_LOG_EVENT_MBCNT,
444 	    from,
445 	    sctp_clog.x.misc.log1,
446 	    sctp_clog.x.misc.log2,
447 	    sctp_clog.x.misc.log3,
448 	    sctp_clog.x.misc.log4);
449 }
450 
451 #endif
452 
453 void
454 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
455 {
456 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
457 	    SCTP_LOG_MISC_EVENT,
458 	    from,
459 	    a, b, c, d);
460 }
461 
462 void
463 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
464 {
465 	struct sctp_cwnd_log sctp_clog;
466 
467 	sctp_clog.x.wake.stcb = (void *)stcb;
468 	sctp_clog.x.wake.wake_cnt = wake_cnt;
469 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
470 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
471 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
472 
473 	if (stcb->asoc.stream_queue_cnt < 0xff)
474 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
475 	else
476 		sctp_clog.x.wake.stream_qcnt = 0xff;
477 
478 	if (stcb->asoc.chunks_on_out_queue < 0xff)
479 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
480 	else
481 		sctp_clog.x.wake.chunks_on_oque = 0xff;
482 
483 	sctp_clog.x.wake.sctpflags = 0;
484 	/* set in the defered mode stuff */
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
486 		sctp_clog.x.wake.sctpflags |= 1;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
488 		sctp_clog.x.wake.sctpflags |= 2;
489 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
490 		sctp_clog.x.wake.sctpflags |= 4;
491 	/* what about the sb */
492 	if (stcb->sctp_socket) {
493 		struct socket *so = stcb->sctp_socket;
494 
495 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
496 	} else {
497 		sctp_clog.x.wake.sbflags = 0xff;
498 	}
499 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
500 	    SCTP_LOG_EVENT_WAKE,
501 	    from,
502 	    sctp_clog.x.misc.log1,
503 	    sctp_clog.x.misc.log2,
504 	    sctp_clog.x.misc.log3,
505 	    sctp_clog.x.misc.log4);
506 }
507 
508 void
509 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
510 {
511 	struct sctp_cwnd_log sctp_clog;
512 
513 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
514 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
515 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
516 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
517 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
518 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
519 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
520 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
521 	    SCTP_LOG_EVENT_BLOCK,
522 	    from,
523 	    sctp_clog.x.misc.log1,
524 	    sctp_clog.x.misc.log2,
525 	    sctp_clog.x.misc.log3,
526 	    sctp_clog.x.misc.log4);
527 }
528 
529 int
530 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
531 {
532 	/* May need to fix this if ktrdump does not work */
533 	return (0);
534 }
535 
536 #ifdef SCTP_AUDITING_ENABLED
537 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
538 static int sctp_audit_indx = 0;
539 
540 static
541 void
542 sctp_print_audit_report(void)
543 {
544 	int i;
545 	int cnt;
546 
547 	cnt = 0;
548 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	for (i = 0; i < sctp_audit_indx; i++) {
568 		if ((sctp_audit_data[i][0] == 0xe0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if (sctp_audit_data[i][0] == 0xf0) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
576 		    (sctp_audit_data[i][1] == 0x01)) {
577 			SCTP_PRINTF("\n");
578 			cnt = 0;
579 		}
580 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
581 		    (uint32_t) sctp_audit_data[i][1]);
582 		cnt++;
583 		if ((cnt % 14) == 0)
584 			SCTP_PRINTF("\n");
585 	}
586 	SCTP_PRINTF("\n");
587 }
588 
589 void
590 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
591     struct sctp_nets *net)
592 {
593 	int resend_cnt, tot_out, rep, tot_book_cnt;
594 	struct sctp_nets *lnet;
595 	struct sctp_tmit_chunk *chk;
596 
597 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
598 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
599 	sctp_audit_indx++;
600 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 		sctp_audit_indx = 0;
602 	}
603 	if (inp == NULL) {
604 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
605 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
606 		sctp_audit_indx++;
607 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
608 			sctp_audit_indx = 0;
609 		}
610 		return;
611 	}
612 	if (stcb == NULL) {
613 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
614 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
615 		sctp_audit_indx++;
616 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
617 			sctp_audit_indx = 0;
618 		}
619 		return;
620 	}
621 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
622 	sctp_audit_data[sctp_audit_indx][1] =
623 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
624 	sctp_audit_indx++;
625 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
626 		sctp_audit_indx = 0;
627 	}
628 	rep = 0;
629 	tot_book_cnt = 0;
630 	resend_cnt = tot_out = 0;
631 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
632 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
633 			resend_cnt++;
634 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
635 			tot_out += chk->book_size;
636 			tot_book_cnt++;
637 		}
638 	}
639 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
647 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
648 		rep = 1;
649 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
650 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
651 		sctp_audit_data[sctp_audit_indx][1] =
652 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 	}
658 	if (tot_out != stcb->asoc.total_flight) {
659 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
660 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
661 		sctp_audit_indx++;
662 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
663 			sctp_audit_indx = 0;
664 		}
665 		rep = 1;
666 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
667 		    (int)stcb->asoc.total_flight);
668 		stcb->asoc.total_flight = tot_out;
669 	}
670 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
671 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
672 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
673 		sctp_audit_indx++;
674 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
675 			sctp_audit_indx = 0;
676 		}
677 		rep = 1;
678 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
679 
680 		stcb->asoc.total_flight_count = tot_book_cnt;
681 	}
682 	tot_out = 0;
683 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
684 		tot_out += lnet->flight_size;
685 	}
686 	if (tot_out != stcb->asoc.total_flight) {
687 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
688 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
689 		sctp_audit_indx++;
690 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 			sctp_audit_indx = 0;
692 		}
693 		rep = 1;
694 		SCTP_PRINTF("real flight:%d net total was %d\n",
695 		    stcb->asoc.total_flight, tot_out);
696 		/* now corrective action */
697 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
698 
699 			tot_out = 0;
700 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
701 				if ((chk->whoTo == lnet) &&
702 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
703 					tot_out += chk->book_size;
704 				}
705 			}
706 			if (lnet->flight_size != tot_out) {
707 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
708 				    (void *)lnet, lnet->flight_size,
709 				    tot_out);
710 				lnet->flight_size = tot_out;
711 			}
712 		}
713 	}
714 	if (rep) {
715 		sctp_print_audit_report();
716 	}
717 }
718 
719 void
720 sctp_audit_log(uint8_t ev, uint8_t fd)
721 {
722 
723 	sctp_audit_data[sctp_audit_indx][0] = ev;
724 	sctp_audit_data[sctp_audit_indx][1] = fd;
725 	sctp_audit_indx++;
726 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 		sctp_audit_indx = 0;
728 	}
729 }
730 
731 #endif
732 
733 /*
734  * sctp_stop_timers_for_shutdown() should be called
735  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
736  * state to make sure that all timers are stopped.
737  */
738 void
739 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
740 {
741 	struct sctp_association *asoc;
742 	struct sctp_nets *net;
743 
744 	asoc = &stcb->asoc;
745 
746 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
751 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
752 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
753 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
754 	}
755 }
756 
757 /*
758  * a list of sizes based on typical mtu's, used only if next hop size not
759  * returned.
760  */
761 static uint32_t sctp_mtu_sizes[] = {
762 	68,
763 	296,
764 	508,
765 	512,
766 	544,
767 	576,
768 	1006,
769 	1492,
770 	1500,
771 	1536,
772 	2002,
773 	2048,
774 	4352,
775 	4464,
776 	8166,
777 	17914,
778 	32000,
779 	65535
780 };
781 
782 /*
783  * Return the largest MTU smaller than val. If there is no
784  * entry, just return val.
785  */
786 uint32_t
787 sctp_get_prev_mtu(uint32_t val)
788 {
789 	uint32_t i;
790 
791 	if (val <= sctp_mtu_sizes[0]) {
792 		return (val);
793 	}
794 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
795 		if (val <= sctp_mtu_sizes[i]) {
796 			break;
797 		}
798 	}
799 	return (sctp_mtu_sizes[i - 1]);
800 }
801 
802 /*
803  * Return the smallest MTU larger than val. If there is no
804  * entry, just return val.
805  */
806 uint32_t
807 sctp_get_next_mtu(uint32_t val)
808 {
809 	/* select another MTU that is just bigger than this one */
810 	uint32_t i;
811 
812 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
813 		if (val < sctp_mtu_sizes[i]) {
814 			return (sctp_mtu_sizes[i]);
815 		}
816 	}
817 	return (val);
818 }
819 
820 void
821 sctp_fill_random_store(struct sctp_pcb *m)
822 {
823 	/*
824 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
825 	 * our counter. The result becomes our good random numbers and we
826 	 * then setup to give these out. Note that we do no locking to
827 	 * protect this. This is ok, since if competing folks call this we
828 	 * will get more gobbled gook in the random store which is what we
829 	 * want. There is a danger that two guys will use the same random
830 	 * numbers, but thats ok too since that is random as well :->
831 	 */
832 	m->store_at = 0;
833 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
834 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
835 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
836 	m->random_counter++;
837 }
838 
839 uint32_t
840 sctp_select_initial_TSN(struct sctp_pcb *inp)
841 {
842 	/*
843 	 * A true implementation should use random selection process to get
844 	 * the initial stream sequence number, using RFC1750 as a good
845 	 * guideline
846 	 */
847 	uint32_t x, *xp;
848 	uint8_t *p;
849 	int store_at, new_store;
850 
851 	if (inp->initial_sequence_debug != 0) {
852 		uint32_t ret;
853 
854 		ret = inp->initial_sequence_debug;
855 		inp->initial_sequence_debug++;
856 		return (ret);
857 	}
858 retry:
859 	store_at = inp->store_at;
860 	new_store = store_at + sizeof(uint32_t);
861 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
862 		new_store = 0;
863 	}
864 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
865 		goto retry;
866 	}
867 	if (new_store == 0) {
868 		/* Refill the random store */
869 		sctp_fill_random_store(inp);
870 	}
871 	p = &inp->random_store[store_at];
872 	xp = (uint32_t *) p;
873 	x = *xp;
874 	return (x);
875 }
876 
877 uint32_t
878 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
879 {
880 	uint32_t x;
881 	struct timeval now;
882 
883 	if (check) {
884 		(void)SCTP_GETTIME_TIMEVAL(&now);
885 	}
886 	for (;;) {
887 		x = sctp_select_initial_TSN(&inp->sctp_ep);
888 		if (x == 0) {
889 			/* we never use 0 */
890 			continue;
891 		}
892 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
893 			break;
894 		}
895 	}
896 	return (x);
897 }
898 
899 int32_t
900 sctp_map_assoc_state(int kernel_state)
901 {
902 	int32_t user_state;
903 
904 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
905 		user_state = SCTP_CLOSED;
906 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
907 		user_state = SCTP_SHUTDOWN_PENDING;
908 	} else {
909 		switch (kernel_state & SCTP_STATE_MASK) {
910 		case SCTP_STATE_EMPTY:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_INUSE:
914 			user_state = SCTP_CLOSED;
915 			break;
916 		case SCTP_STATE_COOKIE_WAIT:
917 			user_state = SCTP_COOKIE_WAIT;
918 			break;
919 		case SCTP_STATE_COOKIE_ECHOED:
920 			user_state = SCTP_COOKIE_ECHOED;
921 			break;
922 		case SCTP_STATE_OPEN:
923 			user_state = SCTP_ESTABLISHED;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_SENT:
926 			user_state = SCTP_SHUTDOWN_SENT;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_RECEIVED:
929 			user_state = SCTP_SHUTDOWN_RECEIVED;
930 			break;
931 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
932 			user_state = SCTP_SHUTDOWN_ACK_SENT;
933 			break;
934 		default:
935 			user_state = SCTP_CLOSED;
936 			break;
937 		}
938 	}
939 	return (user_state);
940 }
941 
942 int
943 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
944     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
945 {
946 	struct sctp_association *asoc;
947 
948 	/*
949 	 * Anything set to zero is taken care of by the allocation routine's
950 	 * bzero
951 	 */
952 
953 	/*
954 	 * Up front select what scoping to apply on addresses I tell my peer
955 	 * Not sure what to do with these right now, we will need to come up
956 	 * with a way to set them. We may need to pass them through from the
957 	 * caller in the sctp_aloc_assoc() function.
958 	 */
959 	int i;
960 
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 
964 #endif
965 
966 	asoc = &stcb->asoc;
967 	/* init all variables to a known value. */
968 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
969 	asoc->max_burst = inp->sctp_ep.max_burst;
970 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
971 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
972 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
973 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
974 	asoc->ecn_supported = inp->ecn_supported;
975 	asoc->prsctp_supported = inp->prsctp_supported;
976 	asoc->idata_supported = inp->idata_supported;
977 	asoc->auth_supported = inp->auth_supported;
978 	asoc->asconf_supported = inp->asconf_supported;
979 	asoc->reconfig_supported = inp->reconfig_supported;
980 	asoc->nrsack_supported = inp->nrsack_supported;
981 	asoc->pktdrop_supported = inp->pktdrop_supported;
982 	asoc->idata_supported = inp->idata_supported;
983 	asoc->sctp_cmt_pf = (uint8_t) 0;
984 	asoc->sctp_frag_point = inp->sctp_frag_point;
985 	asoc->sctp_features = inp->sctp_features;
986 	asoc->default_dscp = inp->sctp_ep.default_dscp;
987 	asoc->max_cwnd = inp->max_cwnd;
988 #ifdef INET6
989 	if (inp->sctp_ep.default_flowlabel) {
990 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
991 	} else {
992 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
993 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
994 			asoc->default_flowlabel &= 0x000fffff;
995 			asoc->default_flowlabel |= 0x80000000;
996 		} else {
997 			asoc->default_flowlabel = 0;
998 		}
999 	}
1000 #endif
1001 	asoc->sb_send_resv = 0;
1002 	if (override_tag) {
1003 		asoc->my_vtag = override_tag;
1004 	} else {
1005 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1006 	}
1007 	/* Get the nonce tags */
1008 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010 	asoc->vrf_id = vrf_id;
1011 
1012 #ifdef SCTP_ASOCLOG_OF_TSNS
1013 	asoc->tsn_in_at = 0;
1014 	asoc->tsn_out_at = 0;
1015 	asoc->tsn_in_wrapped = 0;
1016 	asoc->tsn_out_wrapped = 0;
1017 	asoc->cumack_log_at = 0;
1018 	asoc->cumack_log_atsnt = 0;
1019 #endif
1020 #ifdef SCTP_FS_SPEC_LOG
1021 	asoc->fs_index = 0;
1022 #endif
1023 	asoc->refcnt = 0;
1024 	asoc->assoc_up_sent = 0;
1025 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1026 	    sctp_select_initial_TSN(&inp->sctp_ep);
1027 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1028 	/* we are optimisitic here */
1029 	asoc->peer_supports_nat = 0;
1030 	asoc->sent_queue_retran_cnt = 0;
1031 
1032 	/* for CMT */
1033 	asoc->last_net_cmt_send_started = NULL;
1034 
1035 	/* This will need to be adjusted */
1036 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1037 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1038 	asoc->asconf_seq_in = asoc->last_acked_seq;
1039 
1040 	/* here we are different, we hold the next one we expect */
1041 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1042 
1043 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1044 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1045 
1046 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1047 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1048 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1049 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1050 	asoc->free_chunk_cnt = 0;
1051 
1052 	asoc->iam_blocking = 0;
1053 	asoc->context = inp->sctp_context;
1054 	asoc->local_strreset_support = inp->local_strreset_support;
1055 	asoc->def_send = inp->def_send;
1056 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1057 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1058 	asoc->pr_sctp_cnt = 0;
1059 	asoc->total_output_queue_size = 0;
1060 
1061 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1062 		asoc->scope.ipv6_addr_legal = 1;
1063 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1064 			asoc->scope.ipv4_addr_legal = 1;
1065 		} else {
1066 			asoc->scope.ipv4_addr_legal = 0;
1067 		}
1068 	} else {
1069 		asoc->scope.ipv6_addr_legal = 0;
1070 		asoc->scope.ipv4_addr_legal = 1;
1071 	}
1072 
1073 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1074 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1075 
1076 	asoc->smallest_mtu = inp->sctp_frag_point;
1077 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1078 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1079 
1080 	asoc->locked_on_sending = NULL;
1081 	asoc->stream_locked_on = 0;
1082 	asoc->ecn_echo_cnt_onq = 0;
1083 	asoc->stream_locked = 0;
1084 
1085 	asoc->send_sack = 1;
1086 
1087 	LIST_INIT(&asoc->sctp_restricted_addrs);
1088 
1089 	TAILQ_INIT(&asoc->nets);
1090 	TAILQ_INIT(&asoc->pending_reply_queue);
1091 	TAILQ_INIT(&asoc->asconf_ack_sent);
1092 	/* Setup to fill the hb random cache at first HB */
1093 	asoc->hb_random_idx = 4;
1094 
1095 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1096 
1097 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1098 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1099 
1100 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1101 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1102 
1103 	/*
1104 	 * Now the stream parameters, here we allocate space for all streams
1105 	 * that we request by default.
1106 	 */
1107 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1108 	    o_strms;
1109 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1110 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1111 	    SCTP_M_STRMO);
1112 	if (asoc->strmout == NULL) {
1113 		/* big trouble no memory */
1114 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1115 		return (ENOMEM);
1116 	}
1117 	for (i = 0; i < asoc->streamoutcnt; i++) {
1118 		/*
1119 		 * inbound side must be set to 0xffff, also NOTE when we get
1120 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1121 		 * count (streamoutcnt) but first check if we sent to any of
1122 		 * the upper streams that were dropped (if some were). Those
1123 		 * that were dropped must be notified to the upper layer as
1124 		 * failed to send.
1125 		 */
1126 		asoc->strmout[i].next_sequence_send = 0x0;
1127 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1128 		asoc->strmout[i].chunks_on_queues = 0;
1129 #if defined(SCTP_DETAILED_STR_STATS)
1130 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1131 			asoc->strmout[i].abandoned_sent[j] = 0;
1132 			asoc->strmout[i].abandoned_unsent[j] = 0;
1133 		}
1134 #else
1135 		asoc->strmout[i].abandoned_sent[0] = 0;
1136 		asoc->strmout[i].abandoned_unsent[0] = 0;
1137 #endif
1138 		asoc->strmout[i].stream_no = i;
1139 		asoc->strmout[i].last_msg_incomplete = 0;
1140 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1141 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1142 	}
1143 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1144 
1145 	/* Now the mapping array */
1146 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1147 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1148 	    SCTP_M_MAP);
1149 	if (asoc->mapping_array == NULL) {
1150 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1151 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1152 		return (ENOMEM);
1153 	}
1154 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1155 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1156 	    SCTP_M_MAP);
1157 	if (asoc->nr_mapping_array == NULL) {
1158 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1159 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1160 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1161 		return (ENOMEM);
1162 	}
1163 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1164 
1165 	/* Now the init of the other outqueues */
1166 	TAILQ_INIT(&asoc->free_chunks);
1167 	TAILQ_INIT(&asoc->control_send_queue);
1168 	TAILQ_INIT(&asoc->asconf_send_queue);
1169 	TAILQ_INIT(&asoc->send_queue);
1170 	TAILQ_INIT(&asoc->sent_queue);
1171 	TAILQ_INIT(&asoc->resetHead);
1172 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1173 	TAILQ_INIT(&asoc->asconf_queue);
1174 	/* authentication fields */
1175 	asoc->authinfo.random = NULL;
1176 	asoc->authinfo.active_keyid = 0;
1177 	asoc->authinfo.assoc_key = NULL;
1178 	asoc->authinfo.assoc_keyid = 0;
1179 	asoc->authinfo.recv_key = NULL;
1180 	asoc->authinfo.recv_keyid = 0;
1181 	LIST_INIT(&asoc->shared_keys);
1182 	asoc->marked_retrans = 0;
1183 	asoc->port = inp->sctp_ep.port;
1184 	asoc->timoinit = 0;
1185 	asoc->timodata = 0;
1186 	asoc->timosack = 0;
1187 	asoc->timoshutdown = 0;
1188 	asoc->timoheartbeat = 0;
1189 	asoc->timocookie = 0;
1190 	asoc->timoshutdownack = 0;
1191 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1192 	asoc->discontinuity_time = asoc->start_time;
1193 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1194 		asoc->abandoned_unsent[i] = 0;
1195 		asoc->abandoned_sent[i] = 0;
1196 	}
1197 	/*
1198 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1199 	 * freed later when the association is freed.
1200 	 */
1201 	return (0);
1202 }
1203 
1204 void
1205 sctp_print_mapping_array(struct sctp_association *asoc)
1206 {
1207 	unsigned int i, limit;
1208 
1209 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1210 	    asoc->mapping_array_size,
1211 	    asoc->mapping_array_base_tsn,
1212 	    asoc->cumulative_tsn,
1213 	    asoc->highest_tsn_inside_map,
1214 	    asoc->highest_tsn_inside_nr_map);
1215 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1216 		if (asoc->mapping_array[limit - 1] != 0) {
1217 			break;
1218 		}
1219 	}
1220 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1221 	for (i = 0; i < limit; i++) {
1222 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1223 	}
1224 	if (limit % 16)
1225 		SCTP_PRINTF("\n");
1226 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1227 		if (asoc->nr_mapping_array[limit - 1]) {
1228 			break;
1229 		}
1230 	}
1231 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1232 	for (i = 0; i < limit; i++) {
1233 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1234 	}
1235 	if (limit % 16)
1236 		SCTP_PRINTF("\n");
1237 }
1238 
1239 int
1240 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1241 {
1242 	/* mapping array needs to grow */
1243 	uint8_t *new_array1, *new_array2;
1244 	uint32_t new_size;
1245 
1246 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1247 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1248 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1249 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1250 		/* can't get more, forget it */
1251 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1252 		if (new_array1) {
1253 			SCTP_FREE(new_array1, SCTP_M_MAP);
1254 		}
1255 		if (new_array2) {
1256 			SCTP_FREE(new_array2, SCTP_M_MAP);
1257 		}
1258 		return (-1);
1259 	}
1260 	memset(new_array1, 0, new_size);
1261 	memset(new_array2, 0, new_size);
1262 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1263 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1264 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1265 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1266 	asoc->mapping_array = new_array1;
1267 	asoc->nr_mapping_array = new_array2;
1268 	asoc->mapping_array_size = new_size;
1269 	return (0);
1270 }
1271 
1272 
1273 static void
1274 sctp_iterator_work(struct sctp_iterator *it)
1275 {
1276 	int iteration_count = 0;
1277 	int inp_skip = 0;
1278 	int first_in = 1;
1279 	struct sctp_inpcb *tinp;
1280 
1281 	SCTP_INP_INFO_RLOCK();
1282 	SCTP_ITERATOR_LOCK();
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		SCTP_ITERATOR_UNLOCK();
1291 		SCTP_INP_INFO_RUNLOCK();
1292 		if (it->function_atend != NULL) {
1293 			(*it->function_atend) (it->pointer, it->val);
1294 		}
1295 		SCTP_FREE(it, SCTP_M_ITER);
1296 		return;
1297 	}
1298 select_a_new_ep:
1299 	if (first_in) {
1300 		first_in = 0;
1301 	} else {
1302 		SCTP_INP_RLOCK(it->inp);
1303 	}
1304 	while (((it->pcb_flags) &&
1305 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1306 	    ((it->pcb_features) &&
1307 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1308 		/* endpoint flags or features don't match, so keep looking */
1309 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1310 			SCTP_INP_RUNLOCK(it->inp);
1311 			goto done_with_iterator;
1312 		}
1313 		tinp = it->inp;
1314 		it->inp = LIST_NEXT(it->inp, sctp_list);
1315 		SCTP_INP_RUNLOCK(tinp);
1316 		if (it->inp == NULL) {
1317 			goto done_with_iterator;
1318 		}
1319 		SCTP_INP_RLOCK(it->inp);
1320 	}
1321 	/* now go through each assoc which is in the desired state */
1322 	if (it->done_current_ep == 0) {
1323 		if (it->function_inp != NULL)
1324 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1325 		it->done_current_ep = 1;
1326 	}
1327 	if (it->stcb == NULL) {
1328 		/* run the per instance function */
1329 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1330 	}
1331 	if ((inp_skip) || it->stcb == NULL) {
1332 		if (it->function_inp_end != NULL) {
1333 			inp_skip = (*it->function_inp_end) (it->inp,
1334 			    it->pointer,
1335 			    it->val);
1336 		}
1337 		SCTP_INP_RUNLOCK(it->inp);
1338 		goto no_stcb;
1339 	}
1340 	while (it->stcb) {
1341 		SCTP_TCB_LOCK(it->stcb);
1342 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1343 			/* not in the right state... keep looking */
1344 			SCTP_TCB_UNLOCK(it->stcb);
1345 			goto next_assoc;
1346 		}
1347 		/* see if we have limited out the iterator loop */
1348 		iteration_count++;
1349 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1350 			/* Pause to let others grab the lock */
1351 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1352 			SCTP_TCB_UNLOCK(it->stcb);
1353 			SCTP_INP_INCR_REF(it->inp);
1354 			SCTP_INP_RUNLOCK(it->inp);
1355 			SCTP_ITERATOR_UNLOCK();
1356 			SCTP_INP_INFO_RUNLOCK();
1357 			SCTP_INP_INFO_RLOCK();
1358 			SCTP_ITERATOR_LOCK();
1359 			if (sctp_it_ctl.iterator_flags) {
1360 				/* We won't be staying here */
1361 				SCTP_INP_DECR_REF(it->inp);
1362 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1363 				if (sctp_it_ctl.iterator_flags &
1364 				    SCTP_ITERATOR_STOP_CUR_IT) {
1365 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1366 					goto done_with_iterator;
1367 				}
1368 				if (sctp_it_ctl.iterator_flags &
1369 				    SCTP_ITERATOR_STOP_CUR_INP) {
1370 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1371 					goto no_stcb;
1372 				}
1373 				/* If we reach here huh? */
1374 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1375 				    sctp_it_ctl.iterator_flags);
1376 				sctp_it_ctl.iterator_flags = 0;
1377 			}
1378 			SCTP_INP_RLOCK(it->inp);
1379 			SCTP_INP_DECR_REF(it->inp);
1380 			SCTP_TCB_LOCK(it->stcb);
1381 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1382 			iteration_count = 0;
1383 		}
1384 		/* run function on this one */
1385 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1386 
1387 		/*
1388 		 * we lie here, it really needs to have its own type but
1389 		 * first I must verify that this won't effect things :-0
1390 		 */
1391 		if (it->no_chunk_output == 0)
1392 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1393 
1394 		SCTP_TCB_UNLOCK(it->stcb);
1395 next_assoc:
1396 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1397 		if (it->stcb == NULL) {
1398 			/* Run last function */
1399 			if (it->function_inp_end != NULL) {
1400 				inp_skip = (*it->function_inp_end) (it->inp,
1401 				    it->pointer,
1402 				    it->val);
1403 			}
1404 		}
1405 	}
1406 	SCTP_INP_RUNLOCK(it->inp);
1407 no_stcb:
1408 	/* done with all assocs on this endpoint, move on to next endpoint */
1409 	it->done_current_ep = 0;
1410 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1411 		it->inp = NULL;
1412 	} else {
1413 		it->inp = LIST_NEXT(it->inp, sctp_list);
1414 	}
1415 	if (it->inp == NULL) {
1416 		goto done_with_iterator;
1417 	}
1418 	goto select_a_new_ep;
1419 }
1420 
1421 void
1422 sctp_iterator_worker(void)
1423 {
1424 	struct sctp_iterator *it, *nit;
1425 
1426 	/* This function is called with the WQ lock in place */
1427 
1428 	sctp_it_ctl.iterator_running = 1;
1429 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1430 		sctp_it_ctl.cur_it = it;
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		sctp_it_ctl.cur_it = NULL;
1437 		CURVNET_RESTORE();
1438 		SCTP_IPI_ITERATOR_WQ_LOCK();
1439 		/* sa_ignore FREED_MEMORY */
1440 	}
1441 	sctp_it_ctl.iterator_running = 0;
1442 	return;
1443 }
1444 
1445 
1446 static void
1447 sctp_handle_addr_wq(void)
1448 {
1449 	/* deal with the ADDR wq from the rtsock calls */
1450 	struct sctp_laddr *wi, *nwi;
1451 	struct sctp_asconf_iterator *asc;
1452 
1453 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1454 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1455 	if (asc == NULL) {
1456 		/* Try later, no memory */
1457 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1458 		    (struct sctp_inpcb *)NULL,
1459 		    (struct sctp_tcb *)NULL,
1460 		    (struct sctp_nets *)NULL);
1461 		return;
1462 	}
1463 	LIST_INIT(&asc->list_of_work);
1464 	asc->cnt = 0;
1465 
1466 	SCTP_WQ_ADDR_LOCK();
1467 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1468 		LIST_REMOVE(wi, sctp_nxt_addr);
1469 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1470 		asc->cnt++;
1471 	}
1472 	SCTP_WQ_ADDR_UNLOCK();
1473 
1474 	if (asc->cnt == 0) {
1475 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1476 	} else {
1477 		int ret;
1478 
1479 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1480 		    sctp_asconf_iterator_stcb,
1481 		    NULL,	/* No ep end for boundall */
1482 		    SCTP_PCB_FLAGS_BOUNDALL,
1483 		    SCTP_PCB_ANY_FEATURES,
1484 		    SCTP_ASOC_ANY_STATE,
1485 		    (void *)asc, 0,
1486 		    sctp_asconf_iterator_end, NULL, 0);
1487 		if (ret) {
1488 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1489 			/*
1490 			 * Freeing if we are stopping or put back on the
1491 			 * addr_wq.
1492 			 */
1493 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1494 				sctp_asconf_iterator_end(asc, 0);
1495 			} else {
1496 				SCTP_WQ_ADDR_LOCK();
1497 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1498 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1499 				}
1500 				SCTP_WQ_ADDR_UNLOCK();
1501 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1502 			}
1503 		}
1504 	}
1505 }
1506 
1507 void
1508 sctp_timeout_handler(void *t)
1509 {
1510 	struct sctp_inpcb *inp;
1511 	struct sctp_tcb *stcb;
1512 	struct sctp_nets *net;
1513 	struct sctp_timer *tmr;
1514 	struct mbuf *op_err;
1515 
1516 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1517 	struct socket *so;
1518 
1519 #endif
1520 	int did_output;
1521 	int type;
1522 
1523 	tmr = (struct sctp_timer *)t;
1524 	inp = (struct sctp_inpcb *)tmr->ep;
1525 	stcb = (struct sctp_tcb *)tmr->tcb;
1526 	net = (struct sctp_nets *)tmr->net;
1527 	CURVNET_SET((struct vnet *)tmr->vnet);
1528 	did_output = 1;
1529 
1530 #ifdef SCTP_AUDITING_ENABLED
1531 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1532 	sctp_auditing(3, inp, stcb, net);
1533 #endif
1534 
1535 	/* sanity checks... */
1536 	if (tmr->self != (void *)tmr) {
1537 		/*
1538 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1539 		 * (void *)tmr);
1540 		 */
1541 		CURVNET_RESTORE();
1542 		return;
1543 	}
1544 	tmr->stopped_from = 0xa001;
1545 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1546 		/*
1547 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1548 		 * tmr->type);
1549 		 */
1550 		CURVNET_RESTORE();
1551 		return;
1552 	}
1553 	tmr->stopped_from = 0xa002;
1554 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1555 		CURVNET_RESTORE();
1556 		return;
1557 	}
1558 	/* if this is an iterator timeout, get the struct and clear inp */
1559 	tmr->stopped_from = 0xa003;
1560 	if (inp) {
1561 		SCTP_INP_INCR_REF(inp);
1562 		if ((inp->sctp_socket == NULL) &&
1563 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1569 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1570 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1571 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1572 		    ) {
1573 			SCTP_INP_DECR_REF(inp);
1574 			CURVNET_RESTORE();
1575 			return;
1576 		}
1577 	}
1578 	tmr->stopped_from = 0xa004;
1579 	if (stcb) {
1580 		atomic_add_int(&stcb->asoc.refcnt, 1);
1581 		if (stcb->asoc.state == 0) {
1582 			atomic_add_int(&stcb->asoc.refcnt, -1);
1583 			if (inp) {
1584 				SCTP_INP_DECR_REF(inp);
1585 			}
1586 			CURVNET_RESTORE();
1587 			return;
1588 		}
1589 	}
1590 	type = tmr->type;
1591 	tmr->stopped_from = 0xa005;
1592 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1593 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1594 		if (inp) {
1595 			SCTP_INP_DECR_REF(inp);
1596 		}
1597 		if (stcb) {
1598 			atomic_add_int(&stcb->asoc.refcnt, -1);
1599 		}
1600 		CURVNET_RESTORE();
1601 		return;
1602 	}
1603 	tmr->stopped_from = 0xa006;
1604 
1605 	if (stcb) {
1606 		SCTP_TCB_LOCK(stcb);
1607 		atomic_add_int(&stcb->asoc.refcnt, -1);
1608 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1609 		    ((stcb->asoc.state == 0) ||
1610 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1611 			SCTP_TCB_UNLOCK(stcb);
1612 			if (inp) {
1613 				SCTP_INP_DECR_REF(inp);
1614 			}
1615 			CURVNET_RESTORE();
1616 			return;
1617 		}
1618 	}
1619 	/* record in stopped what t-o occurred */
1620 	tmr->stopped_from = type;
1621 
1622 	/* mark as being serviced now */
1623 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1624 		/*
1625 		 * Callout has been rescheduled.
1626 		 */
1627 		goto get_out;
1628 	}
1629 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1630 		/*
1631 		 * Not active, so no action.
1632 		 */
1633 		goto get_out;
1634 	}
1635 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1636 
1637 	/* call the handler for the appropriate timer type */
1638 	switch (type) {
1639 	case SCTP_TIMER_TYPE_ZERO_COPY:
1640 		if (inp == NULL) {
1641 			break;
1642 		}
1643 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1644 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1645 		}
1646 		break;
1647 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1648 		if (inp == NULL) {
1649 			break;
1650 		}
1651 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1652 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1653 		}
1654 		break;
1655 	case SCTP_TIMER_TYPE_ADDR_WQ:
1656 		sctp_handle_addr_wq();
1657 		break;
1658 	case SCTP_TIMER_TYPE_SEND:
1659 		if ((stcb == NULL) || (inp == NULL)) {
1660 			break;
1661 		}
1662 		SCTP_STAT_INCR(sctps_timodata);
1663 		stcb->asoc.timodata++;
1664 		stcb->asoc.num_send_timers_up--;
1665 		if (stcb->asoc.num_send_timers_up < 0) {
1666 			stcb->asoc.num_send_timers_up = 0;
1667 		}
1668 		SCTP_TCB_LOCK_ASSERT(stcb);
1669 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1670 			/* no need to unlock on tcb its gone */
1671 
1672 			goto out_decr;
1673 		}
1674 		SCTP_TCB_LOCK_ASSERT(stcb);
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1679 		if ((stcb->asoc.num_send_timers_up == 0) &&
1680 		    (stcb->asoc.sent_queue_cnt > 0)) {
1681 			struct sctp_tmit_chunk *chk;
1682 
1683 			/*
1684 			 * safeguard. If there on some on the sent queue
1685 			 * somewhere but no timers running something is
1686 			 * wrong... so we start a timer on the first chunk
1687 			 * on the send queue on whatever net it is sent to.
1688 			 */
1689 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1690 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1691 			    chk->whoTo);
1692 		}
1693 		break;
1694 	case SCTP_TIMER_TYPE_INIT:
1695 		if ((stcb == NULL) || (inp == NULL)) {
1696 			break;
1697 		}
1698 		SCTP_STAT_INCR(sctps_timoinit);
1699 		stcb->asoc.timoinit++;
1700 		if (sctp_t1init_timer(inp, stcb, net)) {
1701 			/* no need to unlock on tcb its gone */
1702 			goto out_decr;
1703 		}
1704 		/* We do output but not here */
1705 		did_output = 0;
1706 		break;
1707 	case SCTP_TIMER_TYPE_RECV:
1708 		if ((stcb == NULL) || (inp == NULL)) {
1709 			break;
1710 		}
1711 		SCTP_STAT_INCR(sctps_timosack);
1712 		stcb->asoc.timosack++;
1713 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1714 #ifdef SCTP_AUDITING_ENABLED
1715 		sctp_auditing(4, inp, stcb, net);
1716 #endif
1717 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1718 		break;
1719 	case SCTP_TIMER_TYPE_SHUTDOWN:
1720 		if ((stcb == NULL) || (inp == NULL)) {
1721 			break;
1722 		}
1723 		if (sctp_shutdown_timer(inp, stcb, net)) {
1724 			/* no need to unlock on tcb its gone */
1725 			goto out_decr;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdown);
1728 		stcb->asoc.timoshutdown++;
1729 #ifdef SCTP_AUDITING_ENABLED
1730 		sctp_auditing(4, inp, stcb, net);
1731 #endif
1732 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1733 		break;
1734 	case SCTP_TIMER_TYPE_HEARTBEAT:
1735 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1736 			break;
1737 		}
1738 		SCTP_STAT_INCR(sctps_timoheartbeat);
1739 		stcb->asoc.timoheartbeat++;
1740 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1741 			/* no need to unlock on tcb its gone */
1742 			goto out_decr;
1743 		}
1744 #ifdef SCTP_AUDITING_ENABLED
1745 		sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1748 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1749 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1750 		}
1751 		break;
1752 	case SCTP_TIMER_TYPE_COOKIE:
1753 		if ((stcb == NULL) || (inp == NULL)) {
1754 			break;
1755 		}
1756 		if (sctp_cookie_timer(inp, stcb, net)) {
1757 			/* no need to unlock on tcb its gone */
1758 			goto out_decr;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timocookie);
1761 		stcb->asoc.timocookie++;
1762 #ifdef SCTP_AUDITING_ENABLED
1763 		sctp_auditing(4, inp, stcb, net);
1764 #endif
1765 		/*
1766 		 * We consider T3 and Cookie timer pretty much the same with
1767 		 * respect to where from in chunk_output.
1768 		 */
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1770 		break;
1771 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1772 		{
1773 			struct timeval tv;
1774 			int i, secret;
1775 
1776 			if (inp == NULL) {
1777 				break;
1778 			}
1779 			SCTP_STAT_INCR(sctps_timosecret);
1780 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1781 			SCTP_INP_WLOCK(inp);
1782 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1783 			inp->sctp_ep.last_secret_number =
1784 			    inp->sctp_ep.current_secret_number;
1785 			inp->sctp_ep.current_secret_number++;
1786 			if (inp->sctp_ep.current_secret_number >=
1787 			    SCTP_HOW_MANY_SECRETS) {
1788 				inp->sctp_ep.current_secret_number = 0;
1789 			}
1790 			secret = (int)inp->sctp_ep.current_secret_number;
1791 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1792 				inp->sctp_ep.secret_key[secret][i] =
1793 				    sctp_select_initial_TSN(&inp->sctp_ep);
1794 			}
1795 			SCTP_INP_WUNLOCK(inp);
1796 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1797 		}
1798 		did_output = 0;
1799 		break;
1800 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		SCTP_STAT_INCR(sctps_timopathmtu);
1805 		sctp_pathmtu_timer(inp, stcb, net);
1806 		did_output = 0;
1807 		break;
1808 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1809 		if ((stcb == NULL) || (inp == NULL)) {
1810 			break;
1811 		}
1812 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1813 			/* no need to unlock on tcb its gone */
1814 			goto out_decr;
1815 		}
1816 		SCTP_STAT_INCR(sctps_timoshutdownack);
1817 		stcb->asoc.timoshutdownack++;
1818 #ifdef SCTP_AUDITING_ENABLED
1819 		sctp_auditing(4, inp, stcb, net);
1820 #endif
1821 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1822 		break;
1823 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1824 		if ((stcb == NULL) || (inp == NULL)) {
1825 			break;
1826 		}
1827 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1828 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1829 		    "Shutdown guard timer expired");
1830 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1831 		/* no need to unlock on tcb its gone */
1832 		goto out_decr;
1833 
1834 	case SCTP_TIMER_TYPE_STRRESET:
1835 		if ((stcb == NULL) || (inp == NULL)) {
1836 			break;
1837 		}
1838 		if (sctp_strreset_timer(inp, stcb, net)) {
1839 			/* no need to unlock on tcb its gone */
1840 			goto out_decr;
1841 		}
1842 		SCTP_STAT_INCR(sctps_timostrmrst);
1843 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1844 		break;
1845 	case SCTP_TIMER_TYPE_ASCONF:
1846 		if ((stcb == NULL) || (inp == NULL)) {
1847 			break;
1848 		}
1849 		if (sctp_asconf_timer(inp, stcb, net)) {
1850 			/* no need to unlock on tcb its gone */
1851 			goto out_decr;
1852 		}
1853 		SCTP_STAT_INCR(sctps_timoasconf);
1854 #ifdef SCTP_AUDITING_ENABLED
1855 		sctp_auditing(4, inp, stcb, net);
1856 #endif
1857 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1858 		break;
1859 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1860 		if ((stcb == NULL) || (inp == NULL)) {
1861 			break;
1862 		}
1863 		sctp_delete_prim_timer(inp, stcb, net);
1864 		SCTP_STAT_INCR(sctps_timodelprim);
1865 		break;
1866 
1867 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1868 		if ((stcb == NULL) || (inp == NULL)) {
1869 			break;
1870 		}
1871 		SCTP_STAT_INCR(sctps_timoautoclose);
1872 		sctp_autoclose_timer(inp, stcb, net);
1873 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1874 		did_output = 0;
1875 		break;
1876 	case SCTP_TIMER_TYPE_ASOCKILL:
1877 		if ((stcb == NULL) || (inp == NULL)) {
1878 			break;
1879 		}
1880 		SCTP_STAT_INCR(sctps_timoassockill);
1881 		/* Can we free it yet? */
1882 		SCTP_INP_DECR_REF(inp);
1883 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1884 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1885 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1886 		so = SCTP_INP_SO(inp);
1887 		atomic_add_int(&stcb->asoc.refcnt, 1);
1888 		SCTP_TCB_UNLOCK(stcb);
1889 		SCTP_SOCKET_LOCK(so, 1);
1890 		SCTP_TCB_LOCK(stcb);
1891 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1892 #endif
1893 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1894 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1895 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1896 		SCTP_SOCKET_UNLOCK(so, 1);
1897 #endif
1898 		/*
1899 		 * free asoc, always unlocks (or destroy's) so prevent
1900 		 * duplicate unlock or unlock of a free mtx :-0
1901 		 */
1902 		stcb = NULL;
1903 		goto out_no_decr;
1904 	case SCTP_TIMER_TYPE_INPKILL:
1905 		SCTP_STAT_INCR(sctps_timoinpkill);
1906 		if (inp == NULL) {
1907 			break;
1908 		}
1909 		/*
1910 		 * special case, take away our increment since WE are the
1911 		 * killer
1912 		 */
1913 		SCTP_INP_DECR_REF(inp);
1914 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1915 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1916 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1917 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1918 		inp = NULL;
1919 		goto out_no_decr;
1920 	default:
1921 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1922 		    type);
1923 		break;
1924 	}
1925 #ifdef SCTP_AUDITING_ENABLED
1926 	sctp_audit_log(0xF1, (uint8_t) type);
1927 	if (inp)
1928 		sctp_auditing(5, inp, stcb, net);
1929 #endif
1930 	if ((did_output) && stcb) {
1931 		/*
1932 		 * Now we need to clean up the control chunk chain if an
1933 		 * ECNE is on it. It must be marked as UNSENT again so next
1934 		 * call will continue to send it until such time that we get
1935 		 * a CWR, to remove it. It is, however, less likely that we
1936 		 * will find a ecn echo on the chain though.
1937 		 */
1938 		sctp_fix_ecn_echo(&stcb->asoc);
1939 	}
1940 get_out:
1941 	if (stcb) {
1942 		SCTP_TCB_UNLOCK(stcb);
1943 	}
1944 out_decr:
1945 	if (inp) {
1946 		SCTP_INP_DECR_REF(inp);
1947 	}
1948 out_no_decr:
1949 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1950 	CURVNET_RESTORE();
1951 }
1952 
1953 void
1954 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1955     struct sctp_nets *net)
1956 {
1957 	uint32_t to_ticks;
1958 	struct sctp_timer *tmr;
1959 
1960 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1961 		return;
1962 
1963 	tmr = NULL;
1964 	if (stcb) {
1965 		SCTP_TCB_LOCK_ASSERT(stcb);
1966 	}
1967 	switch (t_type) {
1968 	case SCTP_TIMER_TYPE_ZERO_COPY:
1969 		tmr = &inp->sctp_ep.zero_copy_timer;
1970 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1971 		break;
1972 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1973 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1974 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1975 		break;
1976 	case SCTP_TIMER_TYPE_ADDR_WQ:
1977 		/* Only 1 tick away :-) */
1978 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1979 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1980 		break;
1981 	case SCTP_TIMER_TYPE_SEND:
1982 		/* Here we use the RTO timer */
1983 		{
1984 			int rto_val;
1985 
1986 			if ((stcb == NULL) || (net == NULL)) {
1987 				return;
1988 			}
1989 			tmr = &net->rxt_timer;
1990 			if (net->RTO == 0) {
1991 				rto_val = stcb->asoc.initial_rto;
1992 			} else {
1993 				rto_val = net->RTO;
1994 			}
1995 			to_ticks = MSEC_TO_TICKS(rto_val);
1996 		}
1997 		break;
1998 	case SCTP_TIMER_TYPE_INIT:
1999 		/*
2000 		 * Here we use the INIT timer default usually about 1
2001 		 * minute.
2002 		 */
2003 		if ((stcb == NULL) || (net == NULL)) {
2004 			return;
2005 		}
2006 		tmr = &net->rxt_timer;
2007 		if (net->RTO == 0) {
2008 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2009 		} else {
2010 			to_ticks = MSEC_TO_TICKS(net->RTO);
2011 		}
2012 		break;
2013 	case SCTP_TIMER_TYPE_RECV:
2014 		/*
2015 		 * Here we use the Delayed-Ack timer value from the inp
2016 		 * ususually about 200ms.
2017 		 */
2018 		if (stcb == NULL) {
2019 			return;
2020 		}
2021 		tmr = &stcb->asoc.dack_timer;
2022 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2023 		break;
2024 	case SCTP_TIMER_TYPE_SHUTDOWN:
2025 		/* Here we use the RTO of the destination. */
2026 		if ((stcb == NULL) || (net == NULL)) {
2027 			return;
2028 		}
2029 		if (net->RTO == 0) {
2030 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2031 		} else {
2032 			to_ticks = MSEC_TO_TICKS(net->RTO);
2033 		}
2034 		tmr = &net->rxt_timer;
2035 		break;
2036 	case SCTP_TIMER_TYPE_HEARTBEAT:
2037 		/*
2038 		 * the net is used here so that we can add in the RTO. Even
2039 		 * though we use a different timer. We also add the HB timer
2040 		 * PLUS a random jitter.
2041 		 */
2042 		if ((stcb == NULL) || (net == NULL)) {
2043 			return;
2044 		} else {
2045 			uint32_t rndval;
2046 			uint32_t jitter;
2047 
2048 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2049 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2050 				return;
2051 			}
2052 			if (net->RTO == 0) {
2053 				to_ticks = stcb->asoc.initial_rto;
2054 			} else {
2055 				to_ticks = net->RTO;
2056 			}
2057 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2058 			jitter = rndval % to_ticks;
2059 			if (jitter >= (to_ticks >> 1)) {
2060 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2061 			} else {
2062 				to_ticks = to_ticks - jitter;
2063 			}
2064 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2065 			    !(net->dest_state & SCTP_ADDR_PF)) {
2066 				to_ticks += net->heart_beat_delay;
2067 			}
2068 			/*
2069 			 * Now we must convert the to_ticks that are now in
2070 			 * ms to ticks.
2071 			 */
2072 			to_ticks = MSEC_TO_TICKS(to_ticks);
2073 			tmr = &net->hb_timer;
2074 		}
2075 		break;
2076 	case SCTP_TIMER_TYPE_COOKIE:
2077 		/*
2078 		 * Here we can use the RTO timer from the network since one
2079 		 * RTT was compelete. If a retran happened then we will be
2080 		 * using the RTO initial value.
2081 		 */
2082 		if ((stcb == NULL) || (net == NULL)) {
2083 			return;
2084 		}
2085 		if (net->RTO == 0) {
2086 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2087 		} else {
2088 			to_ticks = MSEC_TO_TICKS(net->RTO);
2089 		}
2090 		tmr = &net->rxt_timer;
2091 		break;
2092 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2093 		/*
2094 		 * nothing needed but the endpoint here ususually about 60
2095 		 * minutes.
2096 		 */
2097 		tmr = &inp->sctp_ep.signature_change;
2098 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2099 		break;
2100 	case SCTP_TIMER_TYPE_ASOCKILL:
2101 		if (stcb == NULL) {
2102 			return;
2103 		}
2104 		tmr = &stcb->asoc.strreset_timer;
2105 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2106 		break;
2107 	case SCTP_TIMER_TYPE_INPKILL:
2108 		/*
2109 		 * The inp is setup to die. We re-use the signature_chage
2110 		 * timer since that has stopped and we are in the GONE
2111 		 * state.
2112 		 */
2113 		tmr = &inp->sctp_ep.signature_change;
2114 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2115 		break;
2116 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2117 		/*
2118 		 * Here we use the value found in the EP for PMTU ususually
2119 		 * about 10 minutes.
2120 		 */
2121 		if ((stcb == NULL) || (net == NULL)) {
2122 			return;
2123 		}
2124 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2125 			return;
2126 		}
2127 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2128 		tmr = &net->pmtu_timer;
2129 		break;
2130 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2131 		/* Here we use the RTO of the destination */
2132 		if ((stcb == NULL) || (net == NULL)) {
2133 			return;
2134 		}
2135 		if (net->RTO == 0) {
2136 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2137 		} else {
2138 			to_ticks = MSEC_TO_TICKS(net->RTO);
2139 		}
2140 		tmr = &net->rxt_timer;
2141 		break;
2142 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2143 		/*
2144 		 * Here we use the endpoints shutdown guard timer usually
2145 		 * about 3 minutes.
2146 		 */
2147 		if (stcb == NULL) {
2148 			return;
2149 		}
2150 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2151 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2152 		} else {
2153 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2154 		}
2155 		tmr = &stcb->asoc.shut_guard_timer;
2156 		break;
2157 	case SCTP_TIMER_TYPE_STRRESET:
2158 		/*
2159 		 * Here the timer comes from the stcb but its value is from
2160 		 * the net's RTO.
2161 		 */
2162 		if ((stcb == NULL) || (net == NULL)) {
2163 			return;
2164 		}
2165 		if (net->RTO == 0) {
2166 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2167 		} else {
2168 			to_ticks = MSEC_TO_TICKS(net->RTO);
2169 		}
2170 		tmr = &stcb->asoc.strreset_timer;
2171 		break;
2172 	case SCTP_TIMER_TYPE_ASCONF:
2173 		/*
2174 		 * Here the timer comes from the stcb but its value is from
2175 		 * the net's RTO.
2176 		 */
2177 		if ((stcb == NULL) || (net == NULL)) {
2178 			return;
2179 		}
2180 		if (net->RTO == 0) {
2181 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2182 		} else {
2183 			to_ticks = MSEC_TO_TICKS(net->RTO);
2184 		}
2185 		tmr = &stcb->asoc.asconf_timer;
2186 		break;
2187 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2188 		if ((stcb == NULL) || (net != NULL)) {
2189 			return;
2190 		}
2191 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2192 		tmr = &stcb->asoc.delete_prim_timer;
2193 		break;
2194 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2195 		if (stcb == NULL) {
2196 			return;
2197 		}
2198 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2199 			/*
2200 			 * Really an error since stcb is NOT set to
2201 			 * autoclose
2202 			 */
2203 			return;
2204 		}
2205 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2206 		tmr = &stcb->asoc.autoclose_timer;
2207 		break;
2208 	default:
2209 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2210 		    __func__, t_type);
2211 		return;
2212 		break;
2213 	}
2214 	if ((to_ticks <= 0) || (tmr == NULL)) {
2215 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2216 		    __func__, t_type, to_ticks, (void *)tmr);
2217 		return;
2218 	}
2219 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2220 		/*
2221 		 * we do NOT allow you to have it already running. if it is
2222 		 * we leave the current one up unchanged
2223 		 */
2224 		return;
2225 	}
2226 	/* At this point we can proceed */
2227 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2228 		stcb->asoc.num_send_timers_up++;
2229 	}
2230 	tmr->stopped_from = 0;
2231 	tmr->type = t_type;
2232 	tmr->ep = (void *)inp;
2233 	tmr->tcb = (void *)stcb;
2234 	tmr->net = (void *)net;
2235 	tmr->self = (void *)tmr;
2236 	tmr->vnet = (void *)curvnet;
2237 	tmr->ticks = sctp_get_tick_count();
2238 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2239 	return;
2240 }
2241 
2242 void
2243 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2244     struct sctp_nets *net, uint32_t from)
2245 {
2246 	struct sctp_timer *tmr;
2247 
2248 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2249 	    (inp == NULL))
2250 		return;
2251 
2252 	tmr = NULL;
2253 	if (stcb) {
2254 		SCTP_TCB_LOCK_ASSERT(stcb);
2255 	}
2256 	switch (t_type) {
2257 	case SCTP_TIMER_TYPE_ZERO_COPY:
2258 		tmr = &inp->sctp_ep.zero_copy_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2261 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_ADDR_WQ:
2264 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2265 		break;
2266 	case SCTP_TIMER_TYPE_SEND:
2267 		if ((stcb == NULL) || (net == NULL)) {
2268 			return;
2269 		}
2270 		tmr = &net->rxt_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_INIT:
2273 		if ((stcb == NULL) || (net == NULL)) {
2274 			return;
2275 		}
2276 		tmr = &net->rxt_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_RECV:
2279 		if (stcb == NULL) {
2280 			return;
2281 		}
2282 		tmr = &stcb->asoc.dack_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_SHUTDOWN:
2285 		if ((stcb == NULL) || (net == NULL)) {
2286 			return;
2287 		}
2288 		tmr = &net->rxt_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_HEARTBEAT:
2291 		if ((stcb == NULL) || (net == NULL)) {
2292 			return;
2293 		}
2294 		tmr = &net->hb_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_COOKIE:
2297 		if ((stcb == NULL) || (net == NULL)) {
2298 			return;
2299 		}
2300 		tmr = &net->rxt_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2303 		/* nothing needed but the endpoint here */
2304 		tmr = &inp->sctp_ep.signature_change;
2305 		/*
2306 		 * We re-use the newcookie timer for the INP kill timer. We
2307 		 * must assure that we do not kill it by accident.
2308 		 */
2309 		break;
2310 	case SCTP_TIMER_TYPE_ASOCKILL:
2311 		/*
2312 		 * Stop the asoc kill timer.
2313 		 */
2314 		if (stcb == NULL) {
2315 			return;
2316 		}
2317 		tmr = &stcb->asoc.strreset_timer;
2318 		break;
2319 
2320 	case SCTP_TIMER_TYPE_INPKILL:
2321 		/*
2322 		 * The inp is setup to die. We re-use the signature_chage
2323 		 * timer since that has stopped and we are in the GONE
2324 		 * state.
2325 		 */
2326 		tmr = &inp->sctp_ep.signature_change;
2327 		break;
2328 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2329 		if ((stcb == NULL) || (net == NULL)) {
2330 			return;
2331 		}
2332 		tmr = &net->pmtu_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2335 		if ((stcb == NULL) || (net == NULL)) {
2336 			return;
2337 		}
2338 		tmr = &net->rxt_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2341 		if (stcb == NULL) {
2342 			return;
2343 		}
2344 		tmr = &stcb->asoc.shut_guard_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_STRRESET:
2347 		if (stcb == NULL) {
2348 			return;
2349 		}
2350 		tmr = &stcb->asoc.strreset_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_ASCONF:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.asconf_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.delete_prim_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2365 		if (stcb == NULL) {
2366 			return;
2367 		}
2368 		tmr = &stcb->asoc.autoclose_timer;
2369 		break;
2370 	default:
2371 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2372 		    __func__, t_type);
2373 		break;
2374 	}
2375 	if (tmr == NULL) {
2376 		return;
2377 	}
2378 	if ((tmr->type != t_type) && tmr->type) {
2379 		/*
2380 		 * Ok we have a timer that is under joint use. Cookie timer
2381 		 * per chance with the SEND timer. We therefore are NOT
2382 		 * running the timer that the caller wants stopped.  So just
2383 		 * return.
2384 		 */
2385 		return;
2386 	}
2387 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2388 		stcb->asoc.num_send_timers_up--;
2389 		if (stcb->asoc.num_send_timers_up < 0) {
2390 			stcb->asoc.num_send_timers_up = 0;
2391 		}
2392 	}
2393 	tmr->self = NULL;
2394 	tmr->stopped_from = from;
2395 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2396 	return;
2397 }
2398 
2399 uint32_t
2400 sctp_calculate_len(struct mbuf *m)
2401 {
2402 	uint32_t tlen = 0;
2403 	struct mbuf *at;
2404 
2405 	at = m;
2406 	while (at) {
2407 		tlen += SCTP_BUF_LEN(at);
2408 		at = SCTP_BUF_NEXT(at);
2409 	}
2410 	return (tlen);
2411 }
2412 
2413 void
2414 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2415     struct sctp_association *asoc, uint32_t mtu)
2416 {
2417 	/*
2418 	 * Reset the P-MTU size on this association, this involves changing
2419 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2420 	 * allow the DF flag to be cleared.
2421 	 */
2422 	struct sctp_tmit_chunk *chk;
2423 	unsigned int eff_mtu, ovh;
2424 
2425 	asoc->smallest_mtu = mtu;
2426 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2427 		ovh = SCTP_MIN_OVERHEAD;
2428 	} else {
2429 		ovh = SCTP_MIN_V4_OVERHEAD;
2430 	}
2431 	eff_mtu = mtu - ovh;
2432 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2433 		if (chk->send_size > eff_mtu) {
2434 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2435 		}
2436 	}
2437 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2438 		if (chk->send_size > eff_mtu) {
2439 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2440 		}
2441 	}
2442 }
2443 
2444 
2445 /*
2446  * given an association and starting time of the current RTT period return
2447  * RTO in number of msecs net should point to the current network
2448  */
2449 
2450 uint32_t
2451 sctp_calculate_rto(struct sctp_tcb *stcb,
2452     struct sctp_association *asoc,
2453     struct sctp_nets *net,
2454     struct timeval *told,
2455     int safe, int rtt_from_sack)
2456 {
2457 	/*-
2458 	 * given an association and the starting time of the current RTT
2459 	 * period (in value1/value2) return RTO in number of msecs.
2460 	 */
2461 	int32_t rtt;		/* RTT in ms */
2462 	uint32_t new_rto;
2463 	int first_measure = 0;
2464 	struct timeval now, then, *old;
2465 
2466 	/* Copy it out for sparc64 */
2467 	if (safe == sctp_align_unsafe_makecopy) {
2468 		old = &then;
2469 		memcpy(&then, told, sizeof(struct timeval));
2470 	} else if (safe == sctp_align_safe_nocopy) {
2471 		old = told;
2472 	} else {
2473 		/* error */
2474 		SCTP_PRINTF("Huh, bad rto calc call\n");
2475 		return (0);
2476 	}
2477 	/************************/
2478 	/* 1. calculate new RTT */
2479 	/************************/
2480 	/* get the current time */
2481 	if (stcb->asoc.use_precise_time) {
2482 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2483 	} else {
2484 		(void)SCTP_GETTIME_TIMEVAL(&now);
2485 	}
2486 	timevalsub(&now, old);
2487 	/* store the current RTT in us */
2488 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2489 	        (uint64_t) now.tv_usec;
2490 
2491 	/* compute rtt in ms */
2492 	rtt = (int32_t) (net->rtt / 1000);
2493 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2494 		/*
2495 		 * Tell the CC module that a new update has just occurred
2496 		 * from a sack
2497 		 */
2498 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2499 	}
2500 	/*
2501 	 * Do we need to determine the lan? We do this only on sacks i.e.
2502 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2503 	 */
2504 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2505 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2506 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2507 			net->lan_type = SCTP_LAN_INTERNET;
2508 		} else {
2509 			net->lan_type = SCTP_LAN_LOCAL;
2510 		}
2511 	}
2512 	/***************************/
2513 	/* 2. update RTTVAR & SRTT */
2514 	/***************************/
2515 	/*-
2516 	 * Compute the scaled average lastsa and the
2517 	 * scaled variance lastsv as described in van Jacobson
2518 	 * Paper "Congestion Avoidance and Control", Annex A.
2519 	 *
2520 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2521 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2522 	 */
2523 	if (net->RTO_measured) {
2524 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2525 		net->lastsa += rtt;
2526 		if (rtt < 0) {
2527 			rtt = -rtt;
2528 		}
2529 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2530 		net->lastsv += rtt;
2531 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2532 			rto_logging(net, SCTP_LOG_RTTVAR);
2533 		}
2534 	} else {
2535 		/* First RTO measurment */
2536 		net->RTO_measured = 1;
2537 		first_measure = 1;
2538 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2539 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2540 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2541 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2542 		}
2543 	}
2544 	if (net->lastsv == 0) {
2545 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2546 	}
2547 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2548 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2549 	    (stcb->asoc.sat_network_lockout == 0)) {
2550 		stcb->asoc.sat_network = 1;
2551 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2552 		stcb->asoc.sat_network = 0;
2553 		stcb->asoc.sat_network_lockout = 1;
2554 	}
2555 	/* bound it, per C6/C7 in Section 5.3.1 */
2556 	if (new_rto < stcb->asoc.minrto) {
2557 		new_rto = stcb->asoc.minrto;
2558 	}
2559 	if (new_rto > stcb->asoc.maxrto) {
2560 		new_rto = stcb->asoc.maxrto;
2561 	}
2562 	/* we are now returning the RTO */
2563 	return (new_rto);
2564 }
2565 
2566 /*
2567  * return a pointer to a contiguous piece of data from the given mbuf chain
2568  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2569  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2570  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2571  */
2572 caddr_t
2573 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2574 {
2575 	uint32_t count;
2576 	uint8_t *ptr;
2577 
2578 	ptr = in_ptr;
2579 	if ((off < 0) || (len <= 0))
2580 		return (NULL);
2581 
2582 	/* find the desired start location */
2583 	while ((m != NULL) && (off > 0)) {
2584 		if (off < SCTP_BUF_LEN(m))
2585 			break;
2586 		off -= SCTP_BUF_LEN(m);
2587 		m = SCTP_BUF_NEXT(m);
2588 	}
2589 	if (m == NULL)
2590 		return (NULL);
2591 
2592 	/* is the current mbuf large enough (eg. contiguous)? */
2593 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2594 		return (mtod(m, caddr_t)+off);
2595 	} else {
2596 		/* else, it spans more than one mbuf, so save a temp copy... */
2597 		while ((m != NULL) && (len > 0)) {
2598 			count = min(SCTP_BUF_LEN(m) - off, len);
2599 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2600 			len -= count;
2601 			ptr += count;
2602 			off = 0;
2603 			m = SCTP_BUF_NEXT(m);
2604 		}
2605 		if ((m == NULL) && (len > 0))
2606 			return (NULL);
2607 		else
2608 			return ((caddr_t)in_ptr);
2609 	}
2610 }
2611 
2612 
2613 
2614 struct sctp_paramhdr *
2615 sctp_get_next_param(struct mbuf *m,
2616     int offset,
2617     struct sctp_paramhdr *pull,
2618     int pull_limit)
2619 {
2620 	/* This just provides a typed signature to Peter's Pull routine */
2621 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2622 	    (uint8_t *) pull));
2623 }
2624 
2625 
2626 struct mbuf *
2627 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2628 {
2629 	struct mbuf *m_last;
2630 	caddr_t dp;
2631 
2632 	if (padlen > 3) {
2633 		return (NULL);
2634 	}
2635 	if (padlen <= M_TRAILINGSPACE(m)) {
2636 		/*
2637 		 * The easy way. We hope the majority of the time we hit
2638 		 * here :)
2639 		 */
2640 		m_last = m;
2641 	} else {
2642 		/* Hard way we must grow the mbuf chain */
2643 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2644 		if (m_last == NULL) {
2645 			return (NULL);
2646 		}
2647 		SCTP_BUF_LEN(m_last) = 0;
2648 		SCTP_BUF_NEXT(m_last) = NULL;
2649 		SCTP_BUF_NEXT(m) = m_last;
2650 	}
2651 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2652 	SCTP_BUF_LEN(m_last) += padlen;
2653 	memset(dp, 0, padlen);
2654 	return (m_last);
2655 }
2656 
2657 struct mbuf *
2658 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2659 {
2660 	/* find the last mbuf in chain and pad it */
2661 	struct mbuf *m_at;
2662 
2663 	if (last_mbuf != NULL) {
2664 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2665 	} else {
2666 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2667 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2668 				return (sctp_add_pad_tombuf(m_at, padval));
2669 			}
2670 		}
2671 	}
2672 	return (NULL);
2673 }
2674 
2675 static void
2676 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2677     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2678 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2679     SCTP_UNUSED
2680 #endif
2681 )
2682 {
2683 	struct mbuf *m_notify;
2684 	struct sctp_assoc_change *sac;
2685 	struct sctp_queued_to_read *control;
2686 	unsigned int notif_len;
2687 	uint16_t abort_len;
2688 	unsigned int i;
2689 
2690 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2691 	struct socket *so;
2692 
2693 #endif
2694 
2695 	if (stcb == NULL) {
2696 		return;
2697 	}
2698 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2699 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2700 		if (abort != NULL) {
2701 			abort_len = ntohs(abort->ch.chunk_length);
2702 		} else {
2703 			abort_len = 0;
2704 		}
2705 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2706 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2707 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2708 			notif_len += abort_len;
2709 		}
2710 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2711 		if (m_notify == NULL) {
2712 			/* Retry with smaller value. */
2713 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2714 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2715 			if (m_notify == NULL) {
2716 				goto set_error;
2717 			}
2718 		}
2719 		SCTP_BUF_NEXT(m_notify) = NULL;
2720 		sac = mtod(m_notify, struct sctp_assoc_change *);
2721 		memset(sac, 0, notif_len);
2722 		sac->sac_type = SCTP_ASSOC_CHANGE;
2723 		sac->sac_flags = 0;
2724 		sac->sac_length = sizeof(struct sctp_assoc_change);
2725 		sac->sac_state = state;
2726 		sac->sac_error = error;
2727 		/* XXX verify these stream counts */
2728 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2729 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2730 		sac->sac_assoc_id = sctp_get_associd(stcb);
2731 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2732 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2733 				i = 0;
2734 				if (stcb->asoc.prsctp_supported == 1) {
2735 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2736 				}
2737 				if (stcb->asoc.auth_supported == 1) {
2738 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2739 				}
2740 				if (stcb->asoc.asconf_supported == 1) {
2741 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2742 				}
2743 				if (stcb->asoc.idata_supported == 1) {
2744 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2745 				}
2746 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2747 				if (stcb->asoc.reconfig_supported == 1) {
2748 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2749 				}
2750 				sac->sac_length += i;
2751 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2752 				memcpy(sac->sac_info, abort, abort_len);
2753 				sac->sac_length += abort_len;
2754 			}
2755 		}
2756 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2757 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2758 		    0, 0, stcb->asoc.context, 0, 0, 0,
2759 		    m_notify);
2760 		if (control != NULL) {
2761 			control->length = SCTP_BUF_LEN(m_notify);
2762 			/* not that we need this */
2763 			control->tail_mbuf = m_notify;
2764 			control->spec_flags = M_NOTIFICATION;
2765 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2766 			    control,
2767 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2768 			    so_locked);
2769 		} else {
2770 			sctp_m_freem(m_notify);
2771 		}
2772 	}
2773 	/*
2774 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2775 	 * comes in.
2776 	 */
2777 set_error:
2778 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2779 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2780 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2781 		SOCK_LOCK(stcb->sctp_socket);
2782 		if (from_peer) {
2783 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2784 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2785 				stcb->sctp_socket->so_error = ECONNREFUSED;
2786 			} else {
2787 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2788 				stcb->sctp_socket->so_error = ECONNRESET;
2789 			}
2790 		} else {
2791 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2792 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2793 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2794 				stcb->sctp_socket->so_error = ETIMEDOUT;
2795 			} else {
2796 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2797 				stcb->sctp_socket->so_error = ECONNABORTED;
2798 			}
2799 		}
2800 	}
2801 	/* Wake ANY sleepers */
2802 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2803 	so = SCTP_INP_SO(stcb->sctp_ep);
2804 	if (!so_locked) {
2805 		atomic_add_int(&stcb->asoc.refcnt, 1);
2806 		SCTP_TCB_UNLOCK(stcb);
2807 		SCTP_SOCKET_LOCK(so, 1);
2808 		SCTP_TCB_LOCK(stcb);
2809 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2810 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2811 			SCTP_SOCKET_UNLOCK(so, 1);
2812 			return;
2813 		}
2814 	}
2815 #endif
2816 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2817 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2818 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2819 		socantrcvmore_locked(stcb->sctp_socket);
2820 	}
2821 	sorwakeup(stcb->sctp_socket);
2822 	sowwakeup(stcb->sctp_socket);
2823 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2824 	if (!so_locked) {
2825 		SCTP_SOCKET_UNLOCK(so, 1);
2826 	}
2827 #endif
2828 }
2829 
2830 static void
2831 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2832     struct sockaddr *sa, uint32_t error, int so_locked
2833 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2834     SCTP_UNUSED
2835 #endif
2836 )
2837 {
2838 	struct mbuf *m_notify;
2839 	struct sctp_paddr_change *spc;
2840 	struct sctp_queued_to_read *control;
2841 
2842 	if ((stcb == NULL) ||
2843 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2844 		/* event not enabled */
2845 		return;
2846 	}
2847 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2848 	if (m_notify == NULL)
2849 		return;
2850 	SCTP_BUF_LEN(m_notify) = 0;
2851 	spc = mtod(m_notify, struct sctp_paddr_change *);
2852 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2853 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2854 	spc->spc_flags = 0;
2855 	spc->spc_length = sizeof(struct sctp_paddr_change);
2856 	switch (sa->sa_family) {
2857 #ifdef INET
2858 	case AF_INET:
2859 #ifdef INET6
2860 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2861 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2862 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2863 		} else {
2864 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2865 		}
2866 #else
2867 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2868 #endif
2869 		break;
2870 #endif
2871 #ifdef INET6
2872 	case AF_INET6:
2873 		{
2874 			struct sockaddr_in6 *sin6;
2875 
2876 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2877 
2878 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2879 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2880 				if (sin6->sin6_scope_id == 0) {
2881 					/* recover scope_id for user */
2882 					(void)sa6_recoverscope(sin6);
2883 				} else {
2884 					/* clear embedded scope_id for user */
2885 					in6_clearscope(&sin6->sin6_addr);
2886 				}
2887 			}
2888 			break;
2889 		}
2890 #endif
2891 	default:
2892 		/* TSNH */
2893 		break;
2894 	}
2895 	spc->spc_state = state;
2896 	spc->spc_error = error;
2897 	spc->spc_assoc_id = sctp_get_associd(stcb);
2898 
2899 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2900 	SCTP_BUF_NEXT(m_notify) = NULL;
2901 
2902 	/* append to socket */
2903 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2904 	    0, 0, stcb->asoc.context, 0, 0, 0,
2905 	    m_notify);
2906 	if (control == NULL) {
2907 		/* no memory */
2908 		sctp_m_freem(m_notify);
2909 		return;
2910 	}
2911 	control->length = SCTP_BUF_LEN(m_notify);
2912 	control->spec_flags = M_NOTIFICATION;
2913 	/* not that we need this */
2914 	control->tail_mbuf = m_notify;
2915 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2916 	    control,
2917 	    &stcb->sctp_socket->so_rcv, 1,
2918 	    SCTP_READ_LOCK_NOT_HELD,
2919 	    so_locked);
2920 }
2921 
2922 
2923 static void
2924 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2925     struct sctp_tmit_chunk *chk, int so_locked
2926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2927     SCTP_UNUSED
2928 #endif
2929 )
2930 {
2931 	struct mbuf *m_notify;
2932 	struct sctp_send_failed *ssf;
2933 	struct sctp_send_failed_event *ssfe;
2934 	struct sctp_queued_to_read *control;
2935 	int length;
2936 
2937 	if ((stcb == NULL) ||
2938 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2939 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2940 		/* event not enabled */
2941 		return;
2942 	}
2943 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2944 		length = sizeof(struct sctp_send_failed_event);
2945 	} else {
2946 		length = sizeof(struct sctp_send_failed);
2947 	}
2948 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2949 	if (m_notify == NULL)
2950 		/* no space left */
2951 		return;
2952 	SCTP_BUF_LEN(m_notify) = 0;
2953 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2954 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2955 		memset(ssfe, 0, length);
2956 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2957 		if (sent) {
2958 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2959 		} else {
2960 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2961 		}
2962 		length += chk->send_size;
2963 		length -= sizeof(struct sctp_data_chunk);
2964 		ssfe->ssfe_length = length;
2965 		ssfe->ssfe_error = error;
2966 		/* not exactly what the user sent in, but should be close :) */
2967 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2968 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2969 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2970 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2971 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2972 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2973 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2974 	} else {
2975 		ssf = mtod(m_notify, struct sctp_send_failed *);
2976 		memset(ssf, 0, length);
2977 		ssf->ssf_type = SCTP_SEND_FAILED;
2978 		if (sent) {
2979 			ssf->ssf_flags = SCTP_DATA_SENT;
2980 		} else {
2981 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2982 		}
2983 		length += chk->send_size;
2984 		length -= sizeof(struct sctp_data_chunk);
2985 		ssf->ssf_length = length;
2986 		ssf->ssf_error = error;
2987 		/* not exactly what the user sent in, but should be close :) */
2988 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2989 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2990 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2991 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2992 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2993 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2994 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2995 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2996 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2997 	}
2998 	if (chk->data) {
2999 		/*
3000 		 * trim off the sctp chunk header(it should be there)
3001 		 */
3002 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3003 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3004 			sctp_mbuf_crush(chk->data);
3005 			chk->send_size -= sizeof(struct sctp_data_chunk);
3006 		}
3007 	}
3008 	SCTP_BUF_NEXT(m_notify) = chk->data;
3009 	/* Steal off the mbuf */
3010 	chk->data = NULL;
3011 	/*
3012 	 * For this case, we check the actual socket buffer, since the assoc
3013 	 * is going away we don't want to overfill the socket buffer for a
3014 	 * non-reader
3015 	 */
3016 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3017 		sctp_m_freem(m_notify);
3018 		return;
3019 	}
3020 	/* append to socket */
3021 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3022 	    0, 0, stcb->asoc.context, 0, 0, 0,
3023 	    m_notify);
3024 	if (control == NULL) {
3025 		/* no memory */
3026 		sctp_m_freem(m_notify);
3027 		return;
3028 	}
3029 	control->spec_flags = M_NOTIFICATION;
3030 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3031 	    control,
3032 	    &stcb->sctp_socket->so_rcv, 1,
3033 	    SCTP_READ_LOCK_NOT_HELD,
3034 	    so_locked);
3035 }
3036 
3037 
3038 static void
3039 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3040     struct sctp_stream_queue_pending *sp, int so_locked
3041 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3042     SCTP_UNUSED
3043 #endif
3044 )
3045 {
3046 	struct mbuf *m_notify;
3047 	struct sctp_send_failed *ssf;
3048 	struct sctp_send_failed_event *ssfe;
3049 	struct sctp_queued_to_read *control;
3050 	int length;
3051 
3052 	if ((stcb == NULL) ||
3053 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3054 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3055 		/* event not enabled */
3056 		return;
3057 	}
3058 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3059 		length = sizeof(struct sctp_send_failed_event);
3060 	} else {
3061 		length = sizeof(struct sctp_send_failed);
3062 	}
3063 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3064 	if (m_notify == NULL) {
3065 		/* no space left */
3066 		return;
3067 	}
3068 	SCTP_BUF_LEN(m_notify) = 0;
3069 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3070 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3071 		memset(ssfe, 0, length);
3072 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3073 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3074 		length += sp->length;
3075 		ssfe->ssfe_length = length;
3076 		ssfe->ssfe_error = error;
3077 		/* not exactly what the user sent in, but should be close :) */
3078 		ssfe->ssfe_info.snd_sid = sp->stream;
3079 		if (sp->some_taken) {
3080 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3081 		} else {
3082 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3083 		}
3084 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3085 		ssfe->ssfe_info.snd_context = sp->context;
3086 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3087 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3088 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3089 	} else {
3090 		ssf = mtod(m_notify, struct sctp_send_failed *);
3091 		memset(ssf, 0, length);
3092 		ssf->ssf_type = SCTP_SEND_FAILED;
3093 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3094 		length += sp->length;
3095 		ssf->ssf_length = length;
3096 		ssf->ssf_error = error;
3097 		/* not exactly what the user sent in, but should be close :) */
3098 		ssf->ssf_info.sinfo_stream = sp->stream;
3099 		ssf->ssf_info.sinfo_ssn = 0;
3100 		if (sp->some_taken) {
3101 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3102 		} else {
3103 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3104 		}
3105 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3106 		ssf->ssf_info.sinfo_context = sp->context;
3107 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3108 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3109 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3110 	}
3111 	SCTP_BUF_NEXT(m_notify) = sp->data;
3112 
3113 	/* Steal off the mbuf */
3114 	sp->data = NULL;
3115 	/*
3116 	 * For this case, we check the actual socket buffer, since the assoc
3117 	 * is going away we don't want to overfill the socket buffer for a
3118 	 * non-reader
3119 	 */
3120 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3121 		sctp_m_freem(m_notify);
3122 		return;
3123 	}
3124 	/* append to socket */
3125 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3126 	    0, 0, stcb->asoc.context, 0, 0, 0,
3127 	    m_notify);
3128 	if (control == NULL) {
3129 		/* no memory */
3130 		sctp_m_freem(m_notify);
3131 		return;
3132 	}
3133 	control->spec_flags = M_NOTIFICATION;
3134 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3135 	    control,
3136 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3137 }
3138 
3139 
3140 
3141 static void
3142 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3143 {
3144 	struct mbuf *m_notify;
3145 	struct sctp_adaptation_event *sai;
3146 	struct sctp_queued_to_read *control;
3147 
3148 	if ((stcb == NULL) ||
3149 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3150 		/* event not enabled */
3151 		return;
3152 	}
3153 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3154 	if (m_notify == NULL)
3155 		/* no space left */
3156 		return;
3157 	SCTP_BUF_LEN(m_notify) = 0;
3158 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3159 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3160 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3161 	sai->sai_flags = 0;
3162 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3163 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3164 	sai->sai_assoc_id = sctp_get_associd(stcb);
3165 
3166 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3167 	SCTP_BUF_NEXT(m_notify) = NULL;
3168 
3169 	/* append to socket */
3170 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3171 	    0, 0, stcb->asoc.context, 0, 0, 0,
3172 	    m_notify);
3173 	if (control == NULL) {
3174 		/* no memory */
3175 		sctp_m_freem(m_notify);
3176 		return;
3177 	}
3178 	control->length = SCTP_BUF_LEN(m_notify);
3179 	control->spec_flags = M_NOTIFICATION;
3180 	/* not that we need this */
3181 	control->tail_mbuf = m_notify;
3182 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3183 	    control,
3184 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3185 }
3186 
3187 /* This always must be called with the read-queue LOCKED in the INP */
3188 static void
3189 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3190     uint32_t val, int so_locked
3191 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3192     SCTP_UNUSED
3193 #endif
3194 )
3195 {
3196 	struct mbuf *m_notify;
3197 	struct sctp_pdapi_event *pdapi;
3198 	struct sctp_queued_to_read *control;
3199 	struct sockbuf *sb;
3200 
3201 	if ((stcb == NULL) ||
3202 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3203 		/* event not enabled */
3204 		return;
3205 	}
3206 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3207 		return;
3208 	}
3209 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3210 	if (m_notify == NULL)
3211 		/* no space left */
3212 		return;
3213 	SCTP_BUF_LEN(m_notify) = 0;
3214 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3215 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3216 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3217 	pdapi->pdapi_flags = 0;
3218 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3219 	pdapi->pdapi_indication = error;
3220 	pdapi->pdapi_stream = (val >> 16);
3221 	pdapi->pdapi_seq = (val & 0x0000ffff);
3222 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3223 
3224 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3225 	SCTP_BUF_NEXT(m_notify) = NULL;
3226 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3227 	    0, 0, stcb->asoc.context, 0, 0, 0,
3228 	    m_notify);
3229 	if (control == NULL) {
3230 		/* no memory */
3231 		sctp_m_freem(m_notify);
3232 		return;
3233 	}
3234 	control->spec_flags = M_NOTIFICATION;
3235 	control->length = SCTP_BUF_LEN(m_notify);
3236 	/* not that we need this */
3237 	control->tail_mbuf = m_notify;
3238 	control->held_length = 0;
3239 	control->length = 0;
3240 	sb = &stcb->sctp_socket->so_rcv;
3241 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3242 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3243 	}
3244 	sctp_sballoc(stcb, sb, m_notify);
3245 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3246 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3247 	}
3248 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3249 	control->end_added = 1;
3250 	if (stcb->asoc.control_pdapi)
3251 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3252 	else {
3253 		/* we really should not see this case */
3254 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3255 	}
3256 	if (stcb->sctp_ep && stcb->sctp_socket) {
3257 		/* This should always be the case */
3258 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3259 		struct socket *so;
3260 
3261 		so = SCTP_INP_SO(stcb->sctp_ep);
3262 		if (!so_locked) {
3263 			atomic_add_int(&stcb->asoc.refcnt, 1);
3264 			SCTP_TCB_UNLOCK(stcb);
3265 			SCTP_SOCKET_LOCK(so, 1);
3266 			SCTP_TCB_LOCK(stcb);
3267 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3268 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3269 				SCTP_SOCKET_UNLOCK(so, 1);
3270 				return;
3271 			}
3272 		}
3273 #endif
3274 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3275 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3276 		if (!so_locked) {
3277 			SCTP_SOCKET_UNLOCK(so, 1);
3278 		}
3279 #endif
3280 	}
3281 }
3282 
3283 static void
3284 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3285 {
3286 	struct mbuf *m_notify;
3287 	struct sctp_shutdown_event *sse;
3288 	struct sctp_queued_to_read *control;
3289 
3290 	/*
3291 	 * For TCP model AND UDP connected sockets we will send an error up
3292 	 * when an SHUTDOWN completes
3293 	 */
3294 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3295 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3296 		/* mark socket closed for read/write and wakeup! */
3297 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3298 		struct socket *so;
3299 
3300 		so = SCTP_INP_SO(stcb->sctp_ep);
3301 		atomic_add_int(&stcb->asoc.refcnt, 1);
3302 		SCTP_TCB_UNLOCK(stcb);
3303 		SCTP_SOCKET_LOCK(so, 1);
3304 		SCTP_TCB_LOCK(stcb);
3305 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3306 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3307 			SCTP_SOCKET_UNLOCK(so, 1);
3308 			return;
3309 		}
3310 #endif
3311 		socantsendmore(stcb->sctp_socket);
3312 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3313 		SCTP_SOCKET_UNLOCK(so, 1);
3314 #endif
3315 	}
3316 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3317 		/* event not enabled */
3318 		return;
3319 	}
3320 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3321 	if (m_notify == NULL)
3322 		/* no space left */
3323 		return;
3324 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3325 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3326 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3327 	sse->sse_flags = 0;
3328 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3329 	sse->sse_assoc_id = sctp_get_associd(stcb);
3330 
3331 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3332 	SCTP_BUF_NEXT(m_notify) = NULL;
3333 
3334 	/* append to socket */
3335 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3336 	    0, 0, stcb->asoc.context, 0, 0, 0,
3337 	    m_notify);
3338 	if (control == NULL) {
3339 		/* no memory */
3340 		sctp_m_freem(m_notify);
3341 		return;
3342 	}
3343 	control->spec_flags = M_NOTIFICATION;
3344 	control->length = SCTP_BUF_LEN(m_notify);
3345 	/* not that we need this */
3346 	control->tail_mbuf = m_notify;
3347 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3348 	    control,
3349 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3350 }
3351 
3352 static void
3353 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3354     int so_locked
3355 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3356     SCTP_UNUSED
3357 #endif
3358 )
3359 {
3360 	struct mbuf *m_notify;
3361 	struct sctp_sender_dry_event *event;
3362 	struct sctp_queued_to_read *control;
3363 
3364 	if ((stcb == NULL) ||
3365 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3366 		/* event not enabled */
3367 		return;
3368 	}
3369 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3370 	if (m_notify == NULL) {
3371 		/* no space left */
3372 		return;
3373 	}
3374 	SCTP_BUF_LEN(m_notify) = 0;
3375 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3376 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3377 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3378 	event->sender_dry_flags = 0;
3379 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3380 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3381 
3382 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3383 	SCTP_BUF_NEXT(m_notify) = NULL;
3384 
3385 	/* append to socket */
3386 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3387 	    0, 0, stcb->asoc.context, 0, 0, 0,
3388 	    m_notify);
3389 	if (control == NULL) {
3390 		/* no memory */
3391 		sctp_m_freem(m_notify);
3392 		return;
3393 	}
3394 	control->length = SCTP_BUF_LEN(m_notify);
3395 	control->spec_flags = M_NOTIFICATION;
3396 	/* not that we need this */
3397 	control->tail_mbuf = m_notify;
3398 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3399 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3400 }
3401 
3402 
3403 void
3404 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3405 {
3406 	struct mbuf *m_notify;
3407 	struct sctp_queued_to_read *control;
3408 	struct sctp_stream_change_event *stradd;
3409 
3410 	if ((stcb == NULL) ||
3411 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3412 		/* event not enabled */
3413 		return;
3414 	}
3415 	if ((stcb->asoc.peer_req_out) && flag) {
3416 		/* Peer made the request, don't tell the local user */
3417 		stcb->asoc.peer_req_out = 0;
3418 		return;
3419 	}
3420 	stcb->asoc.peer_req_out = 0;
3421 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3422 	if (m_notify == NULL)
3423 		/* no space left */
3424 		return;
3425 	SCTP_BUF_LEN(m_notify) = 0;
3426 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3427 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3428 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3429 	stradd->strchange_flags = flag;
3430 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3431 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3432 	stradd->strchange_instrms = numberin;
3433 	stradd->strchange_outstrms = numberout;
3434 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3435 	SCTP_BUF_NEXT(m_notify) = NULL;
3436 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3437 		/* no space */
3438 		sctp_m_freem(m_notify);
3439 		return;
3440 	}
3441 	/* append to socket */
3442 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3443 	    0, 0, stcb->asoc.context, 0, 0, 0,
3444 	    m_notify);
3445 	if (control == NULL) {
3446 		/* no memory */
3447 		sctp_m_freem(m_notify);
3448 		return;
3449 	}
3450 	control->spec_flags = M_NOTIFICATION;
3451 	control->length = SCTP_BUF_LEN(m_notify);
3452 	/* not that we need this */
3453 	control->tail_mbuf = m_notify;
3454 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3455 	    control,
3456 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3457 }
3458 
3459 void
3460 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3461 {
3462 	struct mbuf *m_notify;
3463 	struct sctp_queued_to_read *control;
3464 	struct sctp_assoc_reset_event *strasoc;
3465 
3466 	if ((stcb == NULL) ||
3467 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3468 		/* event not enabled */
3469 		return;
3470 	}
3471 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3472 	if (m_notify == NULL)
3473 		/* no space left */
3474 		return;
3475 	SCTP_BUF_LEN(m_notify) = 0;
3476 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3477 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3478 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3479 	strasoc->assocreset_flags = flag;
3480 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3481 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3482 	strasoc->assocreset_local_tsn = sending_tsn;
3483 	strasoc->assocreset_remote_tsn = recv_tsn;
3484 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3485 	SCTP_BUF_NEXT(m_notify) = NULL;
3486 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3487 		/* no space */
3488 		sctp_m_freem(m_notify);
3489 		return;
3490 	}
3491 	/* append to socket */
3492 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3493 	    0, 0, stcb->asoc.context, 0, 0, 0,
3494 	    m_notify);
3495 	if (control == NULL) {
3496 		/* no memory */
3497 		sctp_m_freem(m_notify);
3498 		return;
3499 	}
3500 	control->spec_flags = M_NOTIFICATION;
3501 	control->length = SCTP_BUF_LEN(m_notify);
3502 	/* not that we need this */
3503 	control->tail_mbuf = m_notify;
3504 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3505 	    control,
3506 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3507 }
3508 
3509 
3510 
3511 static void
3512 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3513     int number_entries, uint16_t * list, int flag)
3514 {
3515 	struct mbuf *m_notify;
3516 	struct sctp_queued_to_read *control;
3517 	struct sctp_stream_reset_event *strreset;
3518 	int len;
3519 
3520 	if ((stcb == NULL) ||
3521 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3522 		/* event not enabled */
3523 		return;
3524 	}
3525 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3526 	if (m_notify == NULL)
3527 		/* no space left */
3528 		return;
3529 	SCTP_BUF_LEN(m_notify) = 0;
3530 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3531 	if (len > M_TRAILINGSPACE(m_notify)) {
3532 		/* never enough room */
3533 		sctp_m_freem(m_notify);
3534 		return;
3535 	}
3536 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3537 	memset(strreset, 0, len);
3538 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3539 	strreset->strreset_flags = flag;
3540 	strreset->strreset_length = len;
3541 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3542 	if (number_entries) {
3543 		int i;
3544 
3545 		for (i = 0; i < number_entries; i++) {
3546 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3547 		}
3548 	}
3549 	SCTP_BUF_LEN(m_notify) = len;
3550 	SCTP_BUF_NEXT(m_notify) = NULL;
3551 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3552 		/* no space */
3553 		sctp_m_freem(m_notify);
3554 		return;
3555 	}
3556 	/* append to socket */
3557 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3558 	    0, 0, stcb->asoc.context, 0, 0, 0,
3559 	    m_notify);
3560 	if (control == NULL) {
3561 		/* no memory */
3562 		sctp_m_freem(m_notify);
3563 		return;
3564 	}
3565 	control->spec_flags = M_NOTIFICATION;
3566 	control->length = SCTP_BUF_LEN(m_notify);
3567 	/* not that we need this */
3568 	control->tail_mbuf = m_notify;
3569 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3570 	    control,
3571 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3572 }
3573 
3574 
3575 static void
3576 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3577 {
3578 	struct mbuf *m_notify;
3579 	struct sctp_remote_error *sre;
3580 	struct sctp_queued_to_read *control;
3581 	unsigned int notif_len;
3582 	uint16_t chunk_len;
3583 
3584 	if ((stcb == NULL) ||
3585 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3586 		return;
3587 	}
3588 	if (chunk != NULL) {
3589 		chunk_len = ntohs(chunk->ch.chunk_length);
3590 	} else {
3591 		chunk_len = 0;
3592 	}
3593 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3594 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3595 	if (m_notify == NULL) {
3596 		/* Retry with smaller value. */
3597 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3598 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3599 		if (m_notify == NULL) {
3600 			return;
3601 		}
3602 	}
3603 	SCTP_BUF_NEXT(m_notify) = NULL;
3604 	sre = mtod(m_notify, struct sctp_remote_error *);
3605 	memset(sre, 0, notif_len);
3606 	sre->sre_type = SCTP_REMOTE_ERROR;
3607 	sre->sre_flags = 0;
3608 	sre->sre_length = sizeof(struct sctp_remote_error);
3609 	sre->sre_error = error;
3610 	sre->sre_assoc_id = sctp_get_associd(stcb);
3611 	if (notif_len > sizeof(struct sctp_remote_error)) {
3612 		memcpy(sre->sre_data, chunk, chunk_len);
3613 		sre->sre_length += chunk_len;
3614 	}
3615 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3616 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3617 	    0, 0, stcb->asoc.context, 0, 0, 0,
3618 	    m_notify);
3619 	if (control != NULL) {
3620 		control->length = SCTP_BUF_LEN(m_notify);
3621 		/* not that we need this */
3622 		control->tail_mbuf = m_notify;
3623 		control->spec_flags = M_NOTIFICATION;
3624 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3625 		    control,
3626 		    &stcb->sctp_socket->so_rcv, 1,
3627 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3628 	} else {
3629 		sctp_m_freem(m_notify);
3630 	}
3631 }
3632 
3633 
3634 void
3635 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3636     uint32_t error, void *data, int so_locked
3637 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3638     SCTP_UNUSED
3639 #endif
3640 )
3641 {
3642 	if ((stcb == NULL) ||
3643 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3644 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3645 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3646 		/* If the socket is gone we are out of here */
3647 		return;
3648 	}
3649 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3650 		return;
3651 	}
3652 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3653 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3654 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3655 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3656 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3657 			/* Don't report these in front states */
3658 			return;
3659 		}
3660 	}
3661 	switch (notification) {
3662 	case SCTP_NOTIFY_ASSOC_UP:
3663 		if (stcb->asoc.assoc_up_sent == 0) {
3664 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3665 			stcb->asoc.assoc_up_sent = 1;
3666 		}
3667 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3668 			sctp_notify_adaptation_layer(stcb);
3669 		}
3670 		if (stcb->asoc.auth_supported == 0) {
3671 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3672 			    NULL, so_locked);
3673 		}
3674 		break;
3675 	case SCTP_NOTIFY_ASSOC_DOWN:
3676 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3677 		break;
3678 	case SCTP_NOTIFY_INTERFACE_DOWN:
3679 		{
3680 			struct sctp_nets *net;
3681 
3682 			net = (struct sctp_nets *)data;
3683 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3684 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3685 			break;
3686 		}
3687 	case SCTP_NOTIFY_INTERFACE_UP:
3688 		{
3689 			struct sctp_nets *net;
3690 
3691 			net = (struct sctp_nets *)data;
3692 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3693 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3694 			break;
3695 		}
3696 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3697 		{
3698 			struct sctp_nets *net;
3699 
3700 			net = (struct sctp_nets *)data;
3701 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3702 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3703 			break;
3704 		}
3705 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3706 		sctp_notify_send_failed2(stcb, error,
3707 		    (struct sctp_stream_queue_pending *)data, so_locked);
3708 		break;
3709 	case SCTP_NOTIFY_SENT_DG_FAIL:
3710 		sctp_notify_send_failed(stcb, 1, error,
3711 		    (struct sctp_tmit_chunk *)data, so_locked);
3712 		break;
3713 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3714 		sctp_notify_send_failed(stcb, 0, error,
3715 		    (struct sctp_tmit_chunk *)data, so_locked);
3716 		break;
3717 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3718 		{
3719 			uint32_t val;
3720 
3721 			val = *((uint32_t *) data);
3722 
3723 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3724 			break;
3725 		}
3726 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3727 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3728 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3729 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3730 		} else {
3731 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3732 		}
3733 		break;
3734 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3735 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3736 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3737 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3738 		} else {
3739 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3740 		}
3741 		break;
3742 	case SCTP_NOTIFY_ASSOC_RESTART:
3743 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3744 		if (stcb->asoc.auth_supported == 0) {
3745 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3746 			    NULL, so_locked);
3747 		}
3748 		break;
3749 	case SCTP_NOTIFY_STR_RESET_SEND:
3750 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3751 		break;
3752 	case SCTP_NOTIFY_STR_RESET_RECV:
3753 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3754 		break;
3755 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3756 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3757 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3758 		break;
3759 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3760 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3761 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3762 		break;
3763 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3764 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3765 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3766 		break;
3767 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3768 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3769 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3770 		break;
3771 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3772 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3773 		    error, so_locked);
3774 		break;
3775 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3776 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3777 		    error, so_locked);
3778 		break;
3779 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3780 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3781 		    error, so_locked);
3782 		break;
3783 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3784 		sctp_notify_shutdown_event(stcb);
3785 		break;
3786 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3787 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3788 		    (uint16_t) (uintptr_t) data,
3789 		    so_locked);
3790 		break;
3791 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3792 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3793 		    (uint16_t) (uintptr_t) data,
3794 		    so_locked);
3795 		break;
3796 	case SCTP_NOTIFY_NO_PEER_AUTH:
3797 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3798 		    (uint16_t) (uintptr_t) data,
3799 		    so_locked);
3800 		break;
3801 	case SCTP_NOTIFY_SENDER_DRY:
3802 		sctp_notify_sender_dry_event(stcb, so_locked);
3803 		break;
3804 	case SCTP_NOTIFY_REMOTE_ERROR:
3805 		sctp_notify_remote_error(stcb, error, data);
3806 		break;
3807 	default:
3808 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3809 		    __func__, notification, notification);
3810 		break;
3811 	}			/* end switch */
3812 }
3813 
3814 void
3815 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3816 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3817     SCTP_UNUSED
3818 #endif
3819 )
3820 {
3821 	struct sctp_association *asoc;
3822 	struct sctp_stream_out *outs;
3823 	struct sctp_tmit_chunk *chk, *nchk;
3824 	struct sctp_stream_queue_pending *sp, *nsp;
3825 	int i;
3826 
3827 	if (stcb == NULL) {
3828 		return;
3829 	}
3830 	asoc = &stcb->asoc;
3831 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3832 		/* already being freed */
3833 		return;
3834 	}
3835 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3836 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3837 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3838 		return;
3839 	}
3840 	/* now through all the gunk freeing chunks */
3841 	if (holds_lock == 0) {
3842 		SCTP_TCB_SEND_LOCK(stcb);
3843 	}
3844 	/* sent queue SHOULD be empty */
3845 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3846 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3847 		asoc->sent_queue_cnt--;
3848 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3849 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3850 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3851 #ifdef INVARIANTS
3852 			} else {
3853 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3854 #endif
3855 			}
3856 		}
3857 		if (chk->data != NULL) {
3858 			sctp_free_bufspace(stcb, asoc, chk, 1);
3859 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3860 			    error, chk, so_locked);
3861 			if (chk->data) {
3862 				sctp_m_freem(chk->data);
3863 				chk->data = NULL;
3864 			}
3865 		}
3866 		sctp_free_a_chunk(stcb, chk, so_locked);
3867 		/* sa_ignore FREED_MEMORY */
3868 	}
3869 	/* pending send queue SHOULD be empty */
3870 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3871 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3872 		asoc->send_queue_cnt--;
3873 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3874 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3875 #ifdef INVARIANTS
3876 		} else {
3877 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3878 #endif
3879 		}
3880 		if (chk->data != NULL) {
3881 			sctp_free_bufspace(stcb, asoc, chk, 1);
3882 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3883 			    error, chk, so_locked);
3884 			if (chk->data) {
3885 				sctp_m_freem(chk->data);
3886 				chk->data = NULL;
3887 			}
3888 		}
3889 		sctp_free_a_chunk(stcb, chk, so_locked);
3890 		/* sa_ignore FREED_MEMORY */
3891 	}
3892 	for (i = 0; i < asoc->streamoutcnt; i++) {
3893 		/* For each stream */
3894 		outs = &asoc->strmout[i];
3895 		/* clean up any sends there */
3896 		asoc->locked_on_sending = NULL;
3897 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3898 			asoc->stream_queue_cnt--;
3899 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3900 			sctp_free_spbufspace(stcb, asoc, sp);
3901 			if (sp->data) {
3902 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3903 				    error, (void *)sp, so_locked);
3904 				if (sp->data) {
3905 					sctp_m_freem(sp->data);
3906 					sp->data = NULL;
3907 					sp->tail_mbuf = NULL;
3908 					sp->length = 0;
3909 				}
3910 			}
3911 			if (sp->net) {
3912 				sctp_free_remote_addr(sp->net);
3913 				sp->net = NULL;
3914 			}
3915 			/* Free the chunk */
3916 			sctp_free_a_strmoq(stcb, sp, so_locked);
3917 			/* sa_ignore FREED_MEMORY */
3918 		}
3919 	}
3920 
3921 	if (holds_lock == 0) {
3922 		SCTP_TCB_SEND_UNLOCK(stcb);
3923 	}
3924 }
3925 
3926 void
3927 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3928     struct sctp_abort_chunk *abort, int so_locked
3929 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3930     SCTP_UNUSED
3931 #endif
3932 )
3933 {
3934 	if (stcb == NULL) {
3935 		return;
3936 	}
3937 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3938 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3939 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3940 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3941 	}
3942 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3943 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3944 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3945 		return;
3946 	}
3947 	/* Tell them we lost the asoc */
3948 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3949 	if (from_peer) {
3950 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3951 	} else {
3952 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3953 	}
3954 }
3955 
3956 void
3957 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3958     struct mbuf *m, int iphlen,
3959     struct sockaddr *src, struct sockaddr *dst,
3960     struct sctphdr *sh, struct mbuf *op_err,
3961     uint8_t mflowtype, uint32_t mflowid,
3962     uint32_t vrf_id, uint16_t port)
3963 {
3964 	uint32_t vtag;
3965 
3966 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3967 	struct socket *so;
3968 
3969 #endif
3970 
3971 	vtag = 0;
3972 	if (stcb != NULL) {
3973 		/* We have a TCB to abort, send notification too */
3974 		vtag = stcb->asoc.peer_vtag;
3975 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3976 		/* get the assoc vrf id and table id */
3977 		vrf_id = stcb->asoc.vrf_id;
3978 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3979 	}
3980 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3981 	    mflowtype, mflowid, inp->fibnum,
3982 	    vrf_id, port);
3983 	if (stcb != NULL) {
3984 		/* Ok, now lets free it */
3985 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3986 		so = SCTP_INP_SO(inp);
3987 		atomic_add_int(&stcb->asoc.refcnt, 1);
3988 		SCTP_TCB_UNLOCK(stcb);
3989 		SCTP_SOCKET_LOCK(so, 1);
3990 		SCTP_TCB_LOCK(stcb);
3991 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3992 #endif
3993 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3994 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3995 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3996 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3997 		}
3998 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3999 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4000 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4001 		SCTP_SOCKET_UNLOCK(so, 1);
4002 #endif
4003 	}
4004 }
4005 
4006 #ifdef SCTP_ASOCLOG_OF_TSNS
4007 void
4008 sctp_print_out_track_log(struct sctp_tcb *stcb)
4009 {
4010 #ifdef NOSIY_PRINTS
4011 	int i;
4012 
4013 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4014 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4015 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4016 		SCTP_PRINTF("None rcvd\n");
4017 		goto none_in;
4018 	}
4019 	if (stcb->asoc.tsn_in_wrapped) {
4020 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4021 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4022 			    stcb->asoc.in_tsnlog[i].tsn,
4023 			    stcb->asoc.in_tsnlog[i].strm,
4024 			    stcb->asoc.in_tsnlog[i].seq,
4025 			    stcb->asoc.in_tsnlog[i].flgs,
4026 			    stcb->asoc.in_tsnlog[i].sz);
4027 		}
4028 	}
4029 	if (stcb->asoc.tsn_in_at) {
4030 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4031 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4032 			    stcb->asoc.in_tsnlog[i].tsn,
4033 			    stcb->asoc.in_tsnlog[i].strm,
4034 			    stcb->asoc.in_tsnlog[i].seq,
4035 			    stcb->asoc.in_tsnlog[i].flgs,
4036 			    stcb->asoc.in_tsnlog[i].sz);
4037 		}
4038 	}
4039 none_in:
4040 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4041 	if ((stcb->asoc.tsn_out_at == 0) &&
4042 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4043 		SCTP_PRINTF("None sent\n");
4044 	}
4045 	if (stcb->asoc.tsn_out_wrapped) {
4046 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4047 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4048 			    stcb->asoc.out_tsnlog[i].tsn,
4049 			    stcb->asoc.out_tsnlog[i].strm,
4050 			    stcb->asoc.out_tsnlog[i].seq,
4051 			    stcb->asoc.out_tsnlog[i].flgs,
4052 			    stcb->asoc.out_tsnlog[i].sz);
4053 		}
4054 	}
4055 	if (stcb->asoc.tsn_out_at) {
4056 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4057 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4058 			    stcb->asoc.out_tsnlog[i].tsn,
4059 			    stcb->asoc.out_tsnlog[i].strm,
4060 			    stcb->asoc.out_tsnlog[i].seq,
4061 			    stcb->asoc.out_tsnlog[i].flgs,
4062 			    stcb->asoc.out_tsnlog[i].sz);
4063 		}
4064 	}
4065 #endif
4066 }
4067 
4068 #endif
4069 
4070 void
4071 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4072     struct mbuf *op_err,
4073     int so_locked
4074 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4075     SCTP_UNUSED
4076 #endif
4077 )
4078 {
4079 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4080 	struct socket *so;
4081 
4082 #endif
4083 
4084 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4085 	so = SCTP_INP_SO(inp);
4086 #endif
4087 	if (stcb == NULL) {
4088 		/* Got to have a TCB */
4089 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4090 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4091 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4092 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4093 			}
4094 		}
4095 		return;
4096 	} else {
4097 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4098 	}
4099 	/* notify the ulp */
4100 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4101 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4102 	}
4103 	/* notify the peer */
4104 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4105 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4106 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4107 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4108 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4109 	}
4110 	/* now free the asoc */
4111 #ifdef SCTP_ASOCLOG_OF_TSNS
4112 	sctp_print_out_track_log(stcb);
4113 #endif
4114 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4115 	if (!so_locked) {
4116 		atomic_add_int(&stcb->asoc.refcnt, 1);
4117 		SCTP_TCB_UNLOCK(stcb);
4118 		SCTP_SOCKET_LOCK(so, 1);
4119 		SCTP_TCB_LOCK(stcb);
4120 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4121 	}
4122 #endif
4123 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4124 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4125 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4126 	if (!so_locked) {
4127 		SCTP_SOCKET_UNLOCK(so, 1);
4128 	}
4129 #endif
4130 }
4131 
4132 void
4133 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4134     struct sockaddr *src, struct sockaddr *dst,
4135     struct sctphdr *sh, struct sctp_inpcb *inp,
4136     struct mbuf *cause,
4137     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4138     uint32_t vrf_id, uint16_t port)
4139 {
4140 	struct sctp_chunkhdr *ch, chunk_buf;
4141 	unsigned int chk_length;
4142 	int contains_init_chunk;
4143 
4144 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4145 	/* Generate a TO address for future reference */
4146 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4147 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4148 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4149 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4150 		}
4151 	}
4152 	contains_init_chunk = 0;
4153 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4154 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4155 	while (ch != NULL) {
4156 		chk_length = ntohs(ch->chunk_length);
4157 		if (chk_length < sizeof(*ch)) {
4158 			/* break to abort land */
4159 			break;
4160 		}
4161 		switch (ch->chunk_type) {
4162 		case SCTP_INIT:
4163 			contains_init_chunk = 1;
4164 			break;
4165 		case SCTP_PACKET_DROPPED:
4166 			/* we don't respond to pkt-dropped */
4167 			return;
4168 		case SCTP_ABORT_ASSOCIATION:
4169 			/* we don't respond with an ABORT to an ABORT */
4170 			return;
4171 		case SCTP_SHUTDOWN_COMPLETE:
4172 			/*
4173 			 * we ignore it since we are not waiting for it and
4174 			 * peer is gone
4175 			 */
4176 			return;
4177 		case SCTP_SHUTDOWN_ACK:
4178 			sctp_send_shutdown_complete2(src, dst, sh,
4179 			    mflowtype, mflowid, fibnum,
4180 			    vrf_id, port);
4181 			return;
4182 		default:
4183 			break;
4184 		}
4185 		offset += SCTP_SIZE32(chk_length);
4186 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4187 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4188 	}
4189 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4190 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4191 	    (contains_init_chunk == 0))) {
4192 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4193 		    mflowtype, mflowid, fibnum,
4194 		    vrf_id, port);
4195 	}
4196 }
4197 
4198 /*
4199  * check the inbound datagram to make sure there is not an abort inside it,
4200  * if there is return 1, else return 0.
4201  */
4202 int
4203 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4204 {
4205 	struct sctp_chunkhdr *ch;
4206 	struct sctp_init_chunk *init_chk, chunk_buf;
4207 	int offset;
4208 	unsigned int chk_length;
4209 
4210 	offset = iphlen + sizeof(struct sctphdr);
4211 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4212 	    (uint8_t *) & chunk_buf);
4213 	while (ch != NULL) {
4214 		chk_length = ntohs(ch->chunk_length);
4215 		if (chk_length < sizeof(*ch)) {
4216 			/* packet is probably corrupt */
4217 			break;
4218 		}
4219 		/* we seem to be ok, is it an abort? */
4220 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4221 			/* yep, tell them */
4222 			return (1);
4223 		}
4224 		if (ch->chunk_type == SCTP_INITIATION) {
4225 			/* need to update the Vtag */
4226 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4227 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4228 			if (init_chk != NULL) {
4229 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4230 			}
4231 		}
4232 		/* Nope, move to the next chunk */
4233 		offset += SCTP_SIZE32(chk_length);
4234 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4235 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4236 	}
4237 	return (0);
4238 }
4239 
4240 /*
4241  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4242  * set (i.e. it's 0) so, create this function to compare link local scopes
4243  */
4244 #ifdef INET6
4245 uint32_t
4246 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4247 {
4248 	struct sockaddr_in6 a, b;
4249 
4250 	/* save copies */
4251 	a = *addr1;
4252 	b = *addr2;
4253 
4254 	if (a.sin6_scope_id == 0)
4255 		if (sa6_recoverscope(&a)) {
4256 			/* can't get scope, so can't match */
4257 			return (0);
4258 		}
4259 	if (b.sin6_scope_id == 0)
4260 		if (sa6_recoverscope(&b)) {
4261 			/* can't get scope, so can't match */
4262 			return (0);
4263 		}
4264 	if (a.sin6_scope_id != b.sin6_scope_id)
4265 		return (0);
4266 
4267 	return (1);
4268 }
4269 
4270 /*
4271  * returns a sockaddr_in6 with embedded scope recovered and removed
4272  */
4273 struct sockaddr_in6 *
4274 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4275 {
4276 	/* check and strip embedded scope junk */
4277 	if (addr->sin6_family == AF_INET6) {
4278 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4279 			if (addr->sin6_scope_id == 0) {
4280 				*store = *addr;
4281 				if (!sa6_recoverscope(store)) {
4282 					/* use the recovered scope */
4283 					addr = store;
4284 				}
4285 			} else {
4286 				/* else, return the original "to" addr */
4287 				in6_clearscope(&addr->sin6_addr);
4288 			}
4289 		}
4290 	}
4291 	return (addr);
4292 }
4293 
4294 #endif
4295 
4296 /*
4297  * are the two addresses the same?  currently a "scopeless" check returns: 1
4298  * if same, 0 if not
4299  */
4300 int
4301 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4302 {
4303 
4304 	/* must be valid */
4305 	if (sa1 == NULL || sa2 == NULL)
4306 		return (0);
4307 
4308 	/* must be the same family */
4309 	if (sa1->sa_family != sa2->sa_family)
4310 		return (0);
4311 
4312 	switch (sa1->sa_family) {
4313 #ifdef INET6
4314 	case AF_INET6:
4315 		{
4316 			/* IPv6 addresses */
4317 			struct sockaddr_in6 *sin6_1, *sin6_2;
4318 
4319 			sin6_1 = (struct sockaddr_in6 *)sa1;
4320 			sin6_2 = (struct sockaddr_in6 *)sa2;
4321 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4322 			    sin6_2));
4323 		}
4324 #endif
4325 #ifdef INET
4326 	case AF_INET:
4327 		{
4328 			/* IPv4 addresses */
4329 			struct sockaddr_in *sin_1, *sin_2;
4330 
4331 			sin_1 = (struct sockaddr_in *)sa1;
4332 			sin_2 = (struct sockaddr_in *)sa2;
4333 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4334 		}
4335 #endif
4336 	default:
4337 		/* we don't do these... */
4338 		return (0);
4339 	}
4340 }
4341 
4342 void
4343 sctp_print_address(struct sockaddr *sa)
4344 {
4345 #ifdef INET6
4346 	char ip6buf[INET6_ADDRSTRLEN];
4347 
4348 #endif
4349 
4350 	switch (sa->sa_family) {
4351 #ifdef INET6
4352 	case AF_INET6:
4353 		{
4354 			struct sockaddr_in6 *sin6;
4355 
4356 			sin6 = (struct sockaddr_in6 *)sa;
4357 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4358 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4359 			    ntohs(sin6->sin6_port),
4360 			    sin6->sin6_scope_id);
4361 			break;
4362 		}
4363 #endif
4364 #ifdef INET
4365 	case AF_INET:
4366 		{
4367 			struct sockaddr_in *sin;
4368 			unsigned char *p;
4369 
4370 			sin = (struct sockaddr_in *)sa;
4371 			p = (unsigned char *)&sin->sin_addr;
4372 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4373 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4374 			break;
4375 		}
4376 #endif
4377 	default:
4378 		SCTP_PRINTF("?\n");
4379 		break;
4380 	}
4381 }
4382 
4383 void
4384 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4385     struct sctp_inpcb *new_inp,
4386     struct sctp_tcb *stcb,
4387     int waitflags)
4388 {
4389 	/*
4390 	 * go through our old INP and pull off any control structures that
4391 	 * belong to stcb and move then to the new inp.
4392 	 */
4393 	struct socket *old_so, *new_so;
4394 	struct sctp_queued_to_read *control, *nctl;
4395 	struct sctp_readhead tmp_queue;
4396 	struct mbuf *m;
4397 	int error = 0;
4398 
4399 	old_so = old_inp->sctp_socket;
4400 	new_so = new_inp->sctp_socket;
4401 	TAILQ_INIT(&tmp_queue);
4402 	error = sblock(&old_so->so_rcv, waitflags);
4403 	if (error) {
4404 		/*
4405 		 * Gak, can't get sblock, we have a problem. data will be
4406 		 * left stranded.. and we don't dare look at it since the
4407 		 * other thread may be reading something. Oh well, its a
4408 		 * screwed up app that does a peeloff OR a accept while
4409 		 * reading from the main socket... actually its only the
4410 		 * peeloff() case, since I think read will fail on a
4411 		 * listening socket..
4412 		 */
4413 		return;
4414 	}
4415 	/* lock the socket buffers */
4416 	SCTP_INP_READ_LOCK(old_inp);
4417 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4418 		/* Pull off all for out target stcb */
4419 		if (control->stcb == stcb) {
4420 			/* remove it we want it */
4421 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4422 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4423 			m = control->data;
4424 			while (m) {
4425 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4426 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4427 				}
4428 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4429 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4430 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4431 				}
4432 				m = SCTP_BUF_NEXT(m);
4433 			}
4434 		}
4435 	}
4436 	SCTP_INP_READ_UNLOCK(old_inp);
4437 	/* Remove the sb-lock on the old socket */
4438 
4439 	sbunlock(&old_so->so_rcv);
4440 	/* Now we move them over to the new socket buffer */
4441 	SCTP_INP_READ_LOCK(new_inp);
4442 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4443 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4444 		m = control->data;
4445 		while (m) {
4446 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4447 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4448 			}
4449 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4450 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4451 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4452 			}
4453 			m = SCTP_BUF_NEXT(m);
4454 		}
4455 	}
4456 	SCTP_INP_READ_UNLOCK(new_inp);
4457 }
4458 
4459 void
4460 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4461     struct sctp_tcb *stcb,
4462     int so_locked
4463 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4464     SCTP_UNUSED
4465 #endif
4466 )
4467 {
4468 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4469 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4470 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4471 		} else {
4472 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4473 			struct socket *so;
4474 
4475 			so = SCTP_INP_SO(inp);
4476 			if (!so_locked) {
4477 				if (stcb) {
4478 					atomic_add_int(&stcb->asoc.refcnt, 1);
4479 					SCTP_TCB_UNLOCK(stcb);
4480 				}
4481 				SCTP_SOCKET_LOCK(so, 1);
4482 				if (stcb) {
4483 					SCTP_TCB_LOCK(stcb);
4484 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4485 				}
4486 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4487 					SCTP_SOCKET_UNLOCK(so, 1);
4488 					return;
4489 				}
4490 			}
4491 #endif
4492 			sctp_sorwakeup(inp, inp->sctp_socket);
4493 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4494 			if (!so_locked) {
4495 				SCTP_SOCKET_UNLOCK(so, 1);
4496 			}
4497 #endif
4498 		}
4499 	}
4500 }
4501 
4502 void
4503 sctp_add_to_readq(struct sctp_inpcb *inp,
4504     struct sctp_tcb *stcb,
4505     struct sctp_queued_to_read *control,
4506     struct sockbuf *sb,
4507     int end,
4508     int inp_read_lock_held,
4509     int so_locked
4510 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4511     SCTP_UNUSED
4512 #endif
4513 )
4514 {
4515 	/*
4516 	 * Here we must place the control on the end of the socket read
4517 	 * queue AND increment sb_cc so that select will work properly on
4518 	 * read.
4519 	 */
4520 	struct mbuf *m, *prev = NULL;
4521 
4522 	if (inp == NULL) {
4523 		/* Gak, TSNH!! */
4524 #ifdef INVARIANTS
4525 		panic("Gak, inp NULL on add_to_readq");
4526 #endif
4527 		return;
4528 	}
4529 	if (inp_read_lock_held == 0)
4530 		SCTP_INP_READ_LOCK(inp);
4531 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4532 		sctp_free_remote_addr(control->whoFrom);
4533 		if (control->data) {
4534 			sctp_m_freem(control->data);
4535 			control->data = NULL;
4536 		}
4537 		sctp_free_a_readq(stcb, control);
4538 		if (inp_read_lock_held == 0)
4539 			SCTP_INP_READ_UNLOCK(inp);
4540 		return;
4541 	}
4542 	if (!(control->spec_flags & M_NOTIFICATION)) {
4543 		atomic_add_int(&inp->total_recvs, 1);
4544 		if (!control->do_not_ref_stcb) {
4545 			atomic_add_int(&stcb->total_recvs, 1);
4546 		}
4547 	}
4548 	m = control->data;
4549 	control->held_length = 0;
4550 	control->length = 0;
4551 	while (m) {
4552 		if (SCTP_BUF_LEN(m) == 0) {
4553 			/* Skip mbufs with NO length */
4554 			if (prev == NULL) {
4555 				/* First one */
4556 				control->data = sctp_m_free(m);
4557 				m = control->data;
4558 			} else {
4559 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4560 				m = SCTP_BUF_NEXT(prev);
4561 			}
4562 			if (m == NULL) {
4563 				control->tail_mbuf = prev;
4564 			}
4565 			continue;
4566 		}
4567 		prev = m;
4568 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4569 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4570 		}
4571 		sctp_sballoc(stcb, sb, m);
4572 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4573 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4574 		}
4575 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4576 		m = SCTP_BUF_NEXT(m);
4577 	}
4578 	if (prev != NULL) {
4579 		control->tail_mbuf = prev;
4580 	} else {
4581 		/* Everything got collapsed out?? */
4582 		sctp_free_remote_addr(control->whoFrom);
4583 		sctp_free_a_readq(stcb, control);
4584 		if (inp_read_lock_held == 0)
4585 			SCTP_INP_READ_UNLOCK(inp);
4586 		return;
4587 	}
4588 	if (end) {
4589 		control->end_added = 1;
4590 	}
4591 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4592 	control->on_read_q = 1;
4593 	if (inp_read_lock_held == 0)
4594 		SCTP_INP_READ_UNLOCK(inp);
4595 	if (inp && inp->sctp_socket) {
4596 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4597 	}
4598 }
4599 
4600 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4601  *************ALTERNATE ROUTING CODE
4602  */
4603 
4604 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4605  *************ALTERNATE ROUTING CODE
4606  */
4607 
4608 struct mbuf *
4609 sctp_generate_cause(uint16_t code, char *info)
4610 {
4611 	struct mbuf *m;
4612 	struct sctp_gen_error_cause *cause;
4613 	size_t info_len;
4614 	uint16_t len;
4615 
4616 	if ((code == 0) || (info == NULL)) {
4617 		return (NULL);
4618 	}
4619 	info_len = strlen(info);
4620 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4621 		return (NULL);
4622 	}
4623 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4624 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4625 	if (m != NULL) {
4626 		SCTP_BUF_LEN(m) = len;
4627 		cause = mtod(m, struct sctp_gen_error_cause *);
4628 		cause->code = htons(code);
4629 		cause->length = htons(len);
4630 		memcpy(cause->info, info, info_len);
4631 	}
4632 	return (m);
4633 }
4634 
4635 struct mbuf *
4636 sctp_generate_no_user_data_cause(uint32_t tsn)
4637 {
4638 	struct mbuf *m;
4639 	struct sctp_error_no_user_data *no_user_data_cause;
4640 	uint16_t len;
4641 
4642 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4643 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4644 	if (m != NULL) {
4645 		SCTP_BUF_LEN(m) = len;
4646 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4647 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4648 		no_user_data_cause->cause.length = htons(len);
4649 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4650 	}
4651 	return (m);
4652 }
4653 
4654 #ifdef SCTP_MBCNT_LOGGING
4655 void
4656 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4657     struct sctp_tmit_chunk *tp1, int chk_cnt)
4658 {
4659 	if (tp1->data == NULL) {
4660 		return;
4661 	}
4662 	asoc->chunks_on_out_queue -= chk_cnt;
4663 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4664 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4665 		    asoc->total_output_queue_size,
4666 		    tp1->book_size,
4667 		    0,
4668 		    tp1->mbcnt);
4669 	}
4670 	if (asoc->total_output_queue_size >= tp1->book_size) {
4671 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4672 	} else {
4673 		asoc->total_output_queue_size = 0;
4674 	}
4675 
4676 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4677 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4678 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4679 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4680 		} else {
4681 			stcb->sctp_socket->so_snd.sb_cc = 0;
4682 
4683 		}
4684 	}
4685 }
4686 
4687 #endif
4688 
4689 int
4690 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4691     uint8_t sent, int so_locked
4692 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4693     SCTP_UNUSED
4694 #endif
4695 )
4696 {
4697 	struct sctp_stream_out *strq;
4698 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4699 	struct sctp_stream_queue_pending *sp;
4700 	uint16_t stream = 0, seq = 0;
4701 	uint8_t foundeom = 0;
4702 	int ret_sz = 0;
4703 	int notdone;
4704 	int do_wakeup_routine = 0;
4705 
4706 	stream = tp1->rec.data.stream_number;
4707 	seq = tp1->rec.data.stream_seq;
4708 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4709 		stcb->asoc.abandoned_sent[0]++;
4710 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4711 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4712 #if defined(SCTP_DETAILED_STR_STATS)
4713 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4714 #endif
4715 	} else {
4716 		stcb->asoc.abandoned_unsent[0]++;
4717 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4718 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4719 #if defined(SCTP_DETAILED_STR_STATS)
4720 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4721 #endif
4722 	}
4723 	do {
4724 		ret_sz += tp1->book_size;
4725 		if (tp1->data != NULL) {
4726 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4727 				sctp_flight_size_decrease(tp1);
4728 				sctp_total_flight_decrease(stcb, tp1);
4729 			}
4730 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4731 			stcb->asoc.peers_rwnd += tp1->send_size;
4732 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4733 			if (sent) {
4734 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4735 			} else {
4736 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4737 			}
4738 			if (tp1->data) {
4739 				sctp_m_freem(tp1->data);
4740 				tp1->data = NULL;
4741 			}
4742 			do_wakeup_routine = 1;
4743 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4744 				stcb->asoc.sent_queue_cnt_removeable--;
4745 			}
4746 		}
4747 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4748 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4749 		    SCTP_DATA_NOT_FRAG) {
4750 			/* not frag'ed we ae done   */
4751 			notdone = 0;
4752 			foundeom = 1;
4753 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4754 			/* end of frag, we are done */
4755 			notdone = 0;
4756 			foundeom = 1;
4757 		} else {
4758 			/*
4759 			 * Its a begin or middle piece, we must mark all of
4760 			 * it
4761 			 */
4762 			notdone = 1;
4763 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4764 		}
4765 	} while (tp1 && notdone);
4766 	if (foundeom == 0) {
4767 		/*
4768 		 * The multi-part message was scattered across the send and
4769 		 * sent queue.
4770 		 */
4771 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4772 			if ((tp1->rec.data.stream_number != stream) ||
4773 			    (tp1->rec.data.stream_seq != seq)) {
4774 				break;
4775 			}
4776 			/*
4777 			 * save to chk in case we have some on stream out
4778 			 * queue. If so and we have an un-transmitted one we
4779 			 * don't have to fudge the TSN.
4780 			 */
4781 			chk = tp1;
4782 			ret_sz += tp1->book_size;
4783 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4784 			if (sent) {
4785 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4786 			} else {
4787 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4788 			}
4789 			if (tp1->data) {
4790 				sctp_m_freem(tp1->data);
4791 				tp1->data = NULL;
4792 			}
4793 			/* No flight involved here book the size to 0 */
4794 			tp1->book_size = 0;
4795 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4796 				foundeom = 1;
4797 			}
4798 			do_wakeup_routine = 1;
4799 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4800 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4801 			/*
4802 			 * on to the sent queue so we can wait for it to be
4803 			 * passed by.
4804 			 */
4805 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4806 			    sctp_next);
4807 			stcb->asoc.send_queue_cnt--;
4808 			stcb->asoc.sent_queue_cnt++;
4809 		}
4810 	}
4811 	if (foundeom == 0) {
4812 		/*
4813 		 * Still no eom found. That means there is stuff left on the
4814 		 * stream out queue.. yuck.
4815 		 */
4816 		SCTP_TCB_SEND_LOCK(stcb);
4817 		strq = &stcb->asoc.strmout[stream];
4818 		sp = TAILQ_FIRST(&strq->outqueue);
4819 		if (sp != NULL) {
4820 			sp->discard_rest = 1;
4821 			/*
4822 			 * We may need to put a chunk on the queue that
4823 			 * holds the TSN that would have been sent with the
4824 			 * LAST bit.
4825 			 */
4826 			if (chk == NULL) {
4827 				/* Yep, we have to */
4828 				sctp_alloc_a_chunk(stcb, chk);
4829 				if (chk == NULL) {
4830 					/*
4831 					 * we are hosed. All we can do is
4832 					 * nothing.. which will cause an
4833 					 * abort if the peer is paying
4834 					 * attention.
4835 					 */
4836 					goto oh_well;
4837 				}
4838 				memset(chk, 0, sizeof(*chk));
4839 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4840 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4841 				chk->asoc = &stcb->asoc;
4842 				chk->rec.data.stream_seq = strq->next_sequence_send;
4843 				chk->rec.data.stream_number = sp->stream;
4844 				chk->rec.data.payloadtype = sp->ppid;
4845 				chk->rec.data.context = sp->context;
4846 				chk->flags = sp->act_flags;
4847 				chk->whoTo = NULL;
4848 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4849 				strq->chunks_on_queues++;
4850 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4851 				stcb->asoc.sent_queue_cnt++;
4852 				stcb->asoc.pr_sctp_cnt++;
4853 			} else {
4854 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4855 			}
4856 			strq->next_sequence_send++;
4857 	oh_well:
4858 			if (sp->data) {
4859 				/*
4860 				 * Pull any data to free up the SB and allow
4861 				 * sender to "add more" while we will throw
4862 				 * away :-)
4863 				 */
4864 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4865 				ret_sz += sp->length;
4866 				do_wakeup_routine = 1;
4867 				sp->some_taken = 1;
4868 				sctp_m_freem(sp->data);
4869 				sp->data = NULL;
4870 				sp->tail_mbuf = NULL;
4871 				sp->length = 0;
4872 			}
4873 		}
4874 		SCTP_TCB_SEND_UNLOCK(stcb);
4875 	}
4876 	if (do_wakeup_routine) {
4877 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4878 		struct socket *so;
4879 
4880 		so = SCTP_INP_SO(stcb->sctp_ep);
4881 		if (!so_locked) {
4882 			atomic_add_int(&stcb->asoc.refcnt, 1);
4883 			SCTP_TCB_UNLOCK(stcb);
4884 			SCTP_SOCKET_LOCK(so, 1);
4885 			SCTP_TCB_LOCK(stcb);
4886 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4887 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4888 				/* assoc was freed while we were unlocked */
4889 				SCTP_SOCKET_UNLOCK(so, 1);
4890 				return (ret_sz);
4891 			}
4892 		}
4893 #endif
4894 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4895 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4896 		if (!so_locked) {
4897 			SCTP_SOCKET_UNLOCK(so, 1);
4898 		}
4899 #endif
4900 	}
4901 	return (ret_sz);
4902 }
4903 
4904 /*
4905  * checks to see if the given address, sa, is one that is currently known by
4906  * the kernel note: can't distinguish the same address on multiple interfaces
4907  * and doesn't handle multiple addresses with different zone/scope id's note:
4908  * ifa_ifwithaddr() compares the entire sockaddr struct
4909  */
4910 struct sctp_ifa *
4911 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4912     int holds_lock)
4913 {
4914 	struct sctp_laddr *laddr;
4915 
4916 	if (holds_lock == 0) {
4917 		SCTP_INP_RLOCK(inp);
4918 	}
4919 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4920 		if (laddr->ifa == NULL)
4921 			continue;
4922 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4923 			continue;
4924 #ifdef INET
4925 		if (addr->sa_family == AF_INET) {
4926 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4927 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4928 				/* found him. */
4929 				if (holds_lock == 0) {
4930 					SCTP_INP_RUNLOCK(inp);
4931 				}
4932 				return (laddr->ifa);
4933 				break;
4934 			}
4935 		}
4936 #endif
4937 #ifdef INET6
4938 		if (addr->sa_family == AF_INET6) {
4939 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4940 			    &laddr->ifa->address.sin6)) {
4941 				/* found him. */
4942 				if (holds_lock == 0) {
4943 					SCTP_INP_RUNLOCK(inp);
4944 				}
4945 				return (laddr->ifa);
4946 				break;
4947 			}
4948 		}
4949 #endif
4950 	}
4951 	if (holds_lock == 0) {
4952 		SCTP_INP_RUNLOCK(inp);
4953 	}
4954 	return (NULL);
4955 }
4956 
4957 uint32_t
4958 sctp_get_ifa_hash_val(struct sockaddr *addr)
4959 {
4960 	switch (addr->sa_family) {
4961 #ifdef INET
4962 	case AF_INET:
4963 		{
4964 			struct sockaddr_in *sin;
4965 
4966 			sin = (struct sockaddr_in *)addr;
4967 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4968 		}
4969 #endif
4970 #ifdef INET6
4971 	case AF_INET6:
4972 		{
4973 			struct sockaddr_in6 *sin6;
4974 			uint32_t hash_of_addr;
4975 
4976 			sin6 = (struct sockaddr_in6 *)addr;
4977 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4978 			    sin6->sin6_addr.s6_addr32[1] +
4979 			    sin6->sin6_addr.s6_addr32[2] +
4980 			    sin6->sin6_addr.s6_addr32[3]);
4981 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4982 			return (hash_of_addr);
4983 		}
4984 #endif
4985 	default:
4986 		break;
4987 	}
4988 	return (0);
4989 }
4990 
4991 struct sctp_ifa *
4992 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4993 {
4994 	struct sctp_ifa *sctp_ifap;
4995 	struct sctp_vrf *vrf;
4996 	struct sctp_ifalist *hash_head;
4997 	uint32_t hash_of_addr;
4998 
4999 	if (holds_lock == 0)
5000 		SCTP_IPI_ADDR_RLOCK();
5001 
5002 	vrf = sctp_find_vrf(vrf_id);
5003 	if (vrf == NULL) {
5004 		if (holds_lock == 0)
5005 			SCTP_IPI_ADDR_RUNLOCK();
5006 		return (NULL);
5007 	}
5008 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5009 
5010 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5011 	if (hash_head == NULL) {
5012 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5013 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5014 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5015 		sctp_print_address(addr);
5016 		SCTP_PRINTF("No such bucket for address\n");
5017 		if (holds_lock == 0)
5018 			SCTP_IPI_ADDR_RUNLOCK();
5019 
5020 		return (NULL);
5021 	}
5022 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5023 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5024 			continue;
5025 #ifdef INET
5026 		if (addr->sa_family == AF_INET) {
5027 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5028 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5029 				/* found him. */
5030 				if (holds_lock == 0)
5031 					SCTP_IPI_ADDR_RUNLOCK();
5032 				return (sctp_ifap);
5033 				break;
5034 			}
5035 		}
5036 #endif
5037 #ifdef INET6
5038 		if (addr->sa_family == AF_INET6) {
5039 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5040 			    &sctp_ifap->address.sin6)) {
5041 				/* found him. */
5042 				if (holds_lock == 0)
5043 					SCTP_IPI_ADDR_RUNLOCK();
5044 				return (sctp_ifap);
5045 				break;
5046 			}
5047 		}
5048 #endif
5049 	}
5050 	if (holds_lock == 0)
5051 		SCTP_IPI_ADDR_RUNLOCK();
5052 	return (NULL);
5053 }
5054 
5055 static void
5056 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5057     uint32_t rwnd_req)
5058 {
5059 	/* User pulled some data, do we need a rwnd update? */
5060 	int r_unlocked = 0;
5061 	uint32_t dif, rwnd;
5062 	struct socket *so = NULL;
5063 
5064 	if (stcb == NULL)
5065 		return;
5066 
5067 	atomic_add_int(&stcb->asoc.refcnt, 1);
5068 
5069 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5070 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5071 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5072 		/* Pre-check If we are freeing no update */
5073 		goto no_lock;
5074 	}
5075 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5076 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5077 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5078 		goto out;
5079 	}
5080 	so = stcb->sctp_socket;
5081 	if (so == NULL) {
5082 		goto out;
5083 	}
5084 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5085 	/* Have you have freed enough to look */
5086 	*freed_so_far = 0;
5087 	/* Yep, its worth a look and the lock overhead */
5088 
5089 	/* Figure out what the rwnd would be */
5090 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5091 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5092 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5093 	} else {
5094 		dif = 0;
5095 	}
5096 	if (dif >= rwnd_req) {
5097 		if (hold_rlock) {
5098 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5099 			r_unlocked = 1;
5100 		}
5101 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5102 			/*
5103 			 * One last check before we allow the guy possibly
5104 			 * to get in. There is a race, where the guy has not
5105 			 * reached the gate. In that case
5106 			 */
5107 			goto out;
5108 		}
5109 		SCTP_TCB_LOCK(stcb);
5110 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5111 			/* No reports here */
5112 			SCTP_TCB_UNLOCK(stcb);
5113 			goto out;
5114 		}
5115 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5116 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5117 
5118 		sctp_chunk_output(stcb->sctp_ep, stcb,
5119 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5120 		/* make sure no timer is running */
5121 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5122 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5123 		SCTP_TCB_UNLOCK(stcb);
5124 	} else {
5125 		/* Update how much we have pending */
5126 		stcb->freed_by_sorcv_sincelast = dif;
5127 	}
5128 out:
5129 	if (so && r_unlocked && hold_rlock) {
5130 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5131 	}
5132 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5133 no_lock:
5134 	atomic_add_int(&stcb->asoc.refcnt, -1);
5135 	return;
5136 }
5137 
5138 int
5139 sctp_sorecvmsg(struct socket *so,
5140     struct uio *uio,
5141     struct mbuf **mp,
5142     struct sockaddr *from,
5143     int fromlen,
5144     int *msg_flags,
5145     struct sctp_sndrcvinfo *sinfo,
5146     int filling_sinfo)
5147 {
5148 	/*
5149 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5150 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5151 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5152 	 * On the way out we may send out any combination of:
5153 	 * MSG_NOTIFICATION MSG_EOR
5154 	 *
5155 	 */
5156 	struct sctp_inpcb *inp = NULL;
5157 	int my_len = 0;
5158 	int cp_len = 0, error = 0;
5159 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5160 	struct mbuf *m = NULL;
5161 	struct sctp_tcb *stcb = NULL;
5162 	int wakeup_read_socket = 0;
5163 	int freecnt_applied = 0;
5164 	int out_flags = 0, in_flags = 0;
5165 	int block_allowed = 1;
5166 	uint32_t freed_so_far = 0;
5167 	uint32_t copied_so_far = 0;
5168 	int in_eeor_mode = 0;
5169 	int no_rcv_needed = 0;
5170 	uint32_t rwnd_req = 0;
5171 	int hold_sblock = 0;
5172 	int hold_rlock = 0;
5173 	ssize_t slen = 0;
5174 	uint32_t held_length = 0;
5175 	int sockbuf_lock = 0;
5176 
5177 	if (uio == NULL) {
5178 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5179 		return (EINVAL);
5180 	}
5181 	if (msg_flags) {
5182 		in_flags = *msg_flags;
5183 		if (in_flags & MSG_PEEK)
5184 			SCTP_STAT_INCR(sctps_read_peeks);
5185 	} else {
5186 		in_flags = 0;
5187 	}
5188 	slen = uio->uio_resid;
5189 
5190 	/* Pull in and set up our int flags */
5191 	if (in_flags & MSG_OOB) {
5192 		/* Out of band's NOT supported */
5193 		return (EOPNOTSUPP);
5194 	}
5195 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5196 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5197 		return (EINVAL);
5198 	}
5199 	if ((in_flags & (MSG_DONTWAIT
5200 	    | MSG_NBIO
5201 	    )) ||
5202 	    SCTP_SO_IS_NBIO(so)) {
5203 		block_allowed = 0;
5204 	}
5205 	/* setup the endpoint */
5206 	inp = (struct sctp_inpcb *)so->so_pcb;
5207 	if (inp == NULL) {
5208 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5209 		return (EFAULT);
5210 	}
5211 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5212 	/* Must be at least a MTU's worth */
5213 	if (rwnd_req < SCTP_MIN_RWND)
5214 		rwnd_req = SCTP_MIN_RWND;
5215 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5216 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5217 		sctp_misc_ints(SCTP_SORECV_ENTER,
5218 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5219 	}
5220 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5221 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5222 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5223 	}
5224 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5225 	if (error) {
5226 		goto release_unlocked;
5227 	}
5228 	sockbuf_lock = 1;
5229 restart:
5230 
5231 
5232 restart_nosblocks:
5233 	if (hold_sblock == 0) {
5234 		SOCKBUF_LOCK(&so->so_rcv);
5235 		hold_sblock = 1;
5236 	}
5237 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5238 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5239 		goto out;
5240 	}
5241 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5242 		if (so->so_error) {
5243 			error = so->so_error;
5244 			if ((in_flags & MSG_PEEK) == 0)
5245 				so->so_error = 0;
5246 			goto out;
5247 		} else {
5248 			if (so->so_rcv.sb_cc == 0) {
5249 				/* indicate EOF */
5250 				error = 0;
5251 				goto out;
5252 			}
5253 		}
5254 	}
5255 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5256 		/* we need to wait for data */
5257 		if ((so->so_rcv.sb_cc == 0) &&
5258 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5259 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5260 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5261 				/*
5262 				 * For active open side clear flags for
5263 				 * re-use passive open is blocked by
5264 				 * connect.
5265 				 */
5266 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5267 					/*
5268 					 * You were aborted, passive side
5269 					 * always hits here
5270 					 */
5271 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5272 					error = ECONNRESET;
5273 				}
5274 				so->so_state &= ~(SS_ISCONNECTING |
5275 				    SS_ISDISCONNECTING |
5276 				    SS_ISCONFIRMING |
5277 				    SS_ISCONNECTED);
5278 				if (error == 0) {
5279 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5280 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5281 						error = ENOTCONN;
5282 					}
5283 				}
5284 				goto out;
5285 			}
5286 		}
5287 		error = sbwait(&so->so_rcv);
5288 		if (error) {
5289 			goto out;
5290 		}
5291 		held_length = 0;
5292 		goto restart_nosblocks;
5293 	} else if (so->so_rcv.sb_cc == 0) {
5294 		if (so->so_error) {
5295 			error = so->so_error;
5296 			if ((in_flags & MSG_PEEK) == 0)
5297 				so->so_error = 0;
5298 		} else {
5299 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5300 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5301 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5302 					/*
5303 					 * For active open side clear flags
5304 					 * for re-use passive open is
5305 					 * blocked by connect.
5306 					 */
5307 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5308 						/*
5309 						 * You were aborted, passive
5310 						 * side always hits here
5311 						 */
5312 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5313 						error = ECONNRESET;
5314 					}
5315 					so->so_state &= ~(SS_ISCONNECTING |
5316 					    SS_ISDISCONNECTING |
5317 					    SS_ISCONFIRMING |
5318 					    SS_ISCONNECTED);
5319 					if (error == 0) {
5320 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5321 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5322 							error = ENOTCONN;
5323 						}
5324 					}
5325 					goto out;
5326 				}
5327 			}
5328 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5329 			error = EWOULDBLOCK;
5330 		}
5331 		goto out;
5332 	}
5333 	if (hold_sblock == 1) {
5334 		SOCKBUF_UNLOCK(&so->so_rcv);
5335 		hold_sblock = 0;
5336 	}
5337 	/* we possibly have data we can read */
5338 	/* sa_ignore FREED_MEMORY */
5339 	control = TAILQ_FIRST(&inp->read_queue);
5340 	if (control == NULL) {
5341 		/*
5342 		 * This could be happening since the appender did the
5343 		 * increment but as not yet did the tailq insert onto the
5344 		 * read_queue
5345 		 */
5346 		if (hold_rlock == 0) {
5347 			SCTP_INP_READ_LOCK(inp);
5348 		}
5349 		control = TAILQ_FIRST(&inp->read_queue);
5350 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5351 #ifdef INVARIANTS
5352 			panic("Huh, its non zero and nothing on control?");
5353 #endif
5354 			so->so_rcv.sb_cc = 0;
5355 		}
5356 		SCTP_INP_READ_UNLOCK(inp);
5357 		hold_rlock = 0;
5358 		goto restart;
5359 	}
5360 	if ((control->length == 0) &&
5361 	    (control->do_not_ref_stcb)) {
5362 		/*
5363 		 * Clean up code for freeing assoc that left behind a
5364 		 * pdapi.. maybe a peer in EEOR that just closed after
5365 		 * sending and never indicated a EOR.
5366 		 */
5367 		if (hold_rlock == 0) {
5368 			hold_rlock = 1;
5369 			SCTP_INP_READ_LOCK(inp);
5370 		}
5371 		control->held_length = 0;
5372 		if (control->data) {
5373 			/* Hmm there is data here .. fix */
5374 			struct mbuf *m_tmp;
5375 			int cnt = 0;
5376 
5377 			m_tmp = control->data;
5378 			while (m_tmp) {
5379 				cnt += SCTP_BUF_LEN(m_tmp);
5380 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5381 					control->tail_mbuf = m_tmp;
5382 					control->end_added = 1;
5383 				}
5384 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5385 			}
5386 			control->length = cnt;
5387 		} else {
5388 			/* remove it */
5389 			TAILQ_REMOVE(&inp->read_queue, control, next);
5390 			/* Add back any hiddend data */
5391 			sctp_free_remote_addr(control->whoFrom);
5392 			sctp_free_a_readq(stcb, control);
5393 		}
5394 		if (hold_rlock) {
5395 			hold_rlock = 0;
5396 			SCTP_INP_READ_UNLOCK(inp);
5397 		}
5398 		goto restart;
5399 	}
5400 	if ((control->length == 0) &&
5401 	    (control->end_added == 1)) {
5402 		/*
5403 		 * Do we also need to check for (control->pdapi_aborted ==
5404 		 * 1)?
5405 		 */
5406 		if (hold_rlock == 0) {
5407 			hold_rlock = 1;
5408 			SCTP_INP_READ_LOCK(inp);
5409 		}
5410 		TAILQ_REMOVE(&inp->read_queue, control, next);
5411 		if (control->data) {
5412 #ifdef INVARIANTS
5413 			panic("control->data not null but control->length == 0");
5414 #else
5415 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5416 			sctp_m_freem(control->data);
5417 			control->data = NULL;
5418 #endif
5419 		}
5420 		if (control->aux_data) {
5421 			sctp_m_free(control->aux_data);
5422 			control->aux_data = NULL;
5423 		}
5424 #ifdef INVARIANTS
5425 		if (control->on_strm_q) {
5426 			panic("About to free ctl:%p so:%p and its in %d",
5427 			    control, so, control->on_strm_q);
5428 		}
5429 #endif
5430 		sctp_free_remote_addr(control->whoFrom);
5431 		sctp_free_a_readq(stcb, control);
5432 		if (hold_rlock) {
5433 			hold_rlock = 0;
5434 			SCTP_INP_READ_UNLOCK(inp);
5435 		}
5436 		goto restart;
5437 	}
5438 	if (control->length == 0) {
5439 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5440 		    (filling_sinfo)) {
5441 			/* find a more suitable one then this */
5442 			ctl = TAILQ_NEXT(control, next);
5443 			while (ctl) {
5444 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5445 				    (ctl->some_taken ||
5446 				    (ctl->spec_flags & M_NOTIFICATION) ||
5447 				    ((ctl->do_not_ref_stcb == 0) &&
5448 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5449 				    ) {
5450 					/*-
5451 					 * If we have a different TCB next, and there is data
5452 					 * present. If we have already taken some (pdapi), OR we can
5453 					 * ref the tcb and no delivery as started on this stream, we
5454 					 * take it. Note we allow a notification on a different
5455 					 * assoc to be delivered..
5456 					 */
5457 					control = ctl;
5458 					goto found_one;
5459 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5460 					    (ctl->length) &&
5461 					    ((ctl->some_taken) ||
5462 					    ((ctl->do_not_ref_stcb == 0) &&
5463 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5464 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5465 					/*-
5466 					 * If we have the same tcb, and there is data present, and we
5467 					 * have the strm interleave feature present. Then if we have
5468 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5469 					 * not started a delivery for this stream, we can take it.
5470 					 * Note we do NOT allow a notificaiton on the same assoc to
5471 					 * be delivered.
5472 					 */
5473 					control = ctl;
5474 					goto found_one;
5475 				}
5476 				ctl = TAILQ_NEXT(ctl, next);
5477 			}
5478 		}
5479 		/*
5480 		 * if we reach here, not suitable replacement is available
5481 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5482 		 * into the our held count, and its time to sleep again.
5483 		 */
5484 		held_length = so->so_rcv.sb_cc;
5485 		control->held_length = so->so_rcv.sb_cc;
5486 		goto restart;
5487 	}
5488 	/* Clear the held length since there is something to read */
5489 	control->held_length = 0;
5490 	if (hold_rlock) {
5491 		SCTP_INP_READ_UNLOCK(inp);
5492 		hold_rlock = 0;
5493 	}
5494 found_one:
5495 	/*
5496 	 * If we reach here, control has a some data for us to read off.
5497 	 * Note that stcb COULD be NULL.
5498 	 */
5499 	control->some_taken++;
5500 	if (hold_sblock) {
5501 		SOCKBUF_UNLOCK(&so->so_rcv);
5502 		hold_sblock = 0;
5503 	}
5504 	stcb = control->stcb;
5505 	if (stcb) {
5506 		if ((control->do_not_ref_stcb == 0) &&
5507 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5508 			if (freecnt_applied == 0)
5509 				stcb = NULL;
5510 		} else if (control->do_not_ref_stcb == 0) {
5511 			/* you can't free it on me please */
5512 			/*
5513 			 * The lock on the socket buffer protects us so the
5514 			 * free code will stop. But since we used the
5515 			 * socketbuf lock and the sender uses the tcb_lock
5516 			 * to increment, we need to use the atomic add to
5517 			 * the refcnt
5518 			 */
5519 			if (freecnt_applied) {
5520 #ifdef INVARIANTS
5521 				panic("refcnt already incremented");
5522 #else
5523 				SCTP_PRINTF("refcnt already incremented?\n");
5524 #endif
5525 			} else {
5526 				atomic_add_int(&stcb->asoc.refcnt, 1);
5527 				freecnt_applied = 1;
5528 			}
5529 			/*
5530 			 * Setup to remember how much we have not yet told
5531 			 * the peer our rwnd has opened up. Note we grab the
5532 			 * value from the tcb from last time. Note too that
5533 			 * sack sending clears this when a sack is sent,
5534 			 * which is fine. Once we hit the rwnd_req, we then
5535 			 * will go to the sctp_user_rcvd() that will not
5536 			 * lock until it KNOWs it MUST send a WUP-SACK.
5537 			 */
5538 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5539 			stcb->freed_by_sorcv_sincelast = 0;
5540 		}
5541 	}
5542 	if (stcb &&
5543 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5544 	    control->do_not_ref_stcb == 0) {
5545 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5546 	}
5547 	/* First lets get off the sinfo and sockaddr info */
5548 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5549 		sinfo->sinfo_stream = control->sinfo_stream;
5550 		sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn;
5551 		sinfo->sinfo_flags = control->sinfo_flags;
5552 		sinfo->sinfo_ppid = control->sinfo_ppid;
5553 		sinfo->sinfo_context = control->sinfo_context;
5554 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5555 		sinfo->sinfo_tsn = control->sinfo_tsn;
5556 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5557 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5558 		nxt = TAILQ_NEXT(control, next);
5559 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5560 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5561 			struct sctp_extrcvinfo *s_extra;
5562 
5563 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5564 			if ((nxt) &&
5565 			    (nxt->length)) {
5566 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5567 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5568 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5569 				}
5570 				if (nxt->spec_flags & M_NOTIFICATION) {
5571 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5572 				}
5573 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5574 				s_extra->serinfo_next_length = nxt->length;
5575 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5576 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5577 				if (nxt->tail_mbuf != NULL) {
5578 					if (nxt->end_added) {
5579 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5580 					}
5581 				}
5582 			} else {
5583 				/*
5584 				 * we explicitly 0 this, since the memcpy
5585 				 * got some other things beyond the older
5586 				 * sinfo_ that is on the control's structure
5587 				 * :-D
5588 				 */
5589 				nxt = NULL;
5590 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5591 				s_extra->serinfo_next_aid = 0;
5592 				s_extra->serinfo_next_length = 0;
5593 				s_extra->serinfo_next_ppid = 0;
5594 				s_extra->serinfo_next_stream = 0;
5595 			}
5596 		}
5597 		/*
5598 		 * update off the real current cum-ack, if we have an stcb.
5599 		 */
5600 		if ((control->do_not_ref_stcb == 0) && stcb)
5601 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5602 		/*
5603 		 * mask off the high bits, we keep the actual chunk bits in
5604 		 * there.
5605 		 */
5606 		sinfo->sinfo_flags &= 0x00ff;
5607 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5608 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5609 		}
5610 	}
5611 #ifdef SCTP_ASOCLOG_OF_TSNS
5612 	{
5613 		int index, newindex;
5614 		struct sctp_pcbtsn_rlog *entry;
5615 
5616 		do {
5617 			index = inp->readlog_index;
5618 			newindex = index + 1;
5619 			if (newindex >= SCTP_READ_LOG_SIZE) {
5620 				newindex = 0;
5621 			}
5622 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5623 		entry = &inp->readlog[index];
5624 		entry->vtag = control->sinfo_assoc_id;
5625 		entry->strm = control->sinfo_stream;
5626 		entry->seq = control->sinfo_ssn;
5627 		entry->sz = control->length;
5628 		entry->flgs = control->sinfo_flags;
5629 	}
5630 #endif
5631 	if ((fromlen > 0) && (from != NULL)) {
5632 		union sctp_sockstore store;
5633 		size_t len;
5634 
5635 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5636 #ifdef INET6
5637 		case AF_INET6:
5638 			len = sizeof(struct sockaddr_in6);
5639 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5640 			store.sin6.sin6_port = control->port_from;
5641 			break;
5642 #endif
5643 #ifdef INET
5644 		case AF_INET:
5645 #ifdef INET6
5646 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5647 				len = sizeof(struct sockaddr_in6);
5648 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5649 				    &store.sin6);
5650 				store.sin6.sin6_port = control->port_from;
5651 			} else {
5652 				len = sizeof(struct sockaddr_in);
5653 				store.sin = control->whoFrom->ro._l_addr.sin;
5654 				store.sin.sin_port = control->port_from;
5655 			}
5656 #else
5657 			len = sizeof(struct sockaddr_in);
5658 			store.sin = control->whoFrom->ro._l_addr.sin;
5659 			store.sin.sin_port = control->port_from;
5660 #endif
5661 			break;
5662 #endif
5663 		default:
5664 			len = 0;
5665 			break;
5666 		}
5667 		memcpy(from, &store, min((size_t)fromlen, len));
5668 #ifdef INET6
5669 		{
5670 			struct sockaddr_in6 lsa6, *from6;
5671 
5672 			from6 = (struct sockaddr_in6 *)from;
5673 			sctp_recover_scope_mac(from6, (&lsa6));
5674 		}
5675 #endif
5676 	}
5677 	/* now copy out what data we can */
5678 	if (mp == NULL) {
5679 		/* copy out each mbuf in the chain up to length */
5680 get_more_data:
5681 		m = control->data;
5682 		while (m) {
5683 			/* Move out all we can */
5684 			cp_len = (int)uio->uio_resid;
5685 			my_len = (int)SCTP_BUF_LEN(m);
5686 			if (cp_len > my_len) {
5687 				/* not enough in this buf */
5688 				cp_len = my_len;
5689 			}
5690 			if (hold_rlock) {
5691 				SCTP_INP_READ_UNLOCK(inp);
5692 				hold_rlock = 0;
5693 			}
5694 			if (cp_len > 0)
5695 				error = uiomove(mtod(m, char *), cp_len, uio);
5696 			/* re-read */
5697 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5698 				goto release;
5699 			}
5700 			if ((control->do_not_ref_stcb == 0) && stcb &&
5701 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5702 				no_rcv_needed = 1;
5703 			}
5704 			if (error) {
5705 				/* error we are out of here */
5706 				goto release;
5707 			}
5708 			SCTP_INP_READ_LOCK(inp);
5709 			hold_rlock = 1;
5710 			if (cp_len == SCTP_BUF_LEN(m)) {
5711 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5712 				    (control->end_added)) {
5713 					out_flags |= MSG_EOR;
5714 					if ((control->do_not_ref_stcb == 0) &&
5715 					    (control->stcb != NULL) &&
5716 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5717 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5718 				}
5719 				if (control->spec_flags & M_NOTIFICATION) {
5720 					out_flags |= MSG_NOTIFICATION;
5721 				}
5722 				/* we ate up the mbuf */
5723 				if (in_flags & MSG_PEEK) {
5724 					/* just looking */
5725 					m = SCTP_BUF_NEXT(m);
5726 					copied_so_far += cp_len;
5727 				} else {
5728 					/* dispose of the mbuf */
5729 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5730 						sctp_sblog(&so->so_rcv,
5731 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5732 					}
5733 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5734 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5735 						sctp_sblog(&so->so_rcv,
5736 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5737 					}
5738 					copied_so_far += cp_len;
5739 					freed_so_far += cp_len;
5740 					freed_so_far += MSIZE;
5741 					atomic_subtract_int(&control->length, cp_len);
5742 					control->data = sctp_m_free(m);
5743 					m = control->data;
5744 					/*
5745 					 * been through it all, must hold sb
5746 					 * lock ok to null tail
5747 					 */
5748 					if (control->data == NULL) {
5749 #ifdef INVARIANTS
5750 						if ((control->end_added == 0) ||
5751 						    (TAILQ_NEXT(control, next) == NULL)) {
5752 							/*
5753 							 * If the end is not
5754 							 * added, OR the
5755 							 * next is NOT null
5756 							 * we MUST have the
5757 							 * lock.
5758 							 */
5759 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5760 								panic("Hmm we don't own the lock?");
5761 							}
5762 						}
5763 #endif
5764 						control->tail_mbuf = NULL;
5765 #ifdef INVARIANTS
5766 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5767 							panic("end_added, nothing left and no MSG_EOR");
5768 						}
5769 #endif
5770 					}
5771 				}
5772 			} else {
5773 				/* Do we need to trim the mbuf? */
5774 				if (control->spec_flags & M_NOTIFICATION) {
5775 					out_flags |= MSG_NOTIFICATION;
5776 				}
5777 				if ((in_flags & MSG_PEEK) == 0) {
5778 					SCTP_BUF_RESV_UF(m, cp_len);
5779 					SCTP_BUF_LEN(m) -= cp_len;
5780 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5781 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5782 					}
5783 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5784 					if ((control->do_not_ref_stcb == 0) &&
5785 					    stcb) {
5786 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5787 					}
5788 					copied_so_far += cp_len;
5789 					freed_so_far += cp_len;
5790 					freed_so_far += MSIZE;
5791 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5792 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5793 						    SCTP_LOG_SBRESULT, 0);
5794 					}
5795 					atomic_subtract_int(&control->length, cp_len);
5796 				} else {
5797 					copied_so_far += cp_len;
5798 				}
5799 			}
5800 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5801 				break;
5802 			}
5803 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5804 			    (control->do_not_ref_stcb == 0) &&
5805 			    (freed_so_far >= rwnd_req)) {
5806 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5807 			}
5808 		}		/* end while(m) */
5809 		/*
5810 		 * At this point we have looked at it all and we either have
5811 		 * a MSG_EOR/or read all the user wants... <OR>
5812 		 * control->length == 0.
5813 		 */
5814 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5815 			/* we are done with this control */
5816 			if (control->length == 0) {
5817 				if (control->data) {
5818 #ifdef INVARIANTS
5819 					panic("control->data not null at read eor?");
5820 #else
5821 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5822 					sctp_m_freem(control->data);
5823 					control->data = NULL;
5824 #endif
5825 				}
5826 		done_with_control:
5827 				if (hold_rlock == 0) {
5828 					SCTP_INP_READ_LOCK(inp);
5829 					hold_rlock = 1;
5830 				}
5831 				TAILQ_REMOVE(&inp->read_queue, control, next);
5832 				/* Add back any hiddend data */
5833 				if (control->held_length) {
5834 					held_length = 0;
5835 					control->held_length = 0;
5836 					wakeup_read_socket = 1;
5837 				}
5838 				if (control->aux_data) {
5839 					sctp_m_free(control->aux_data);
5840 					control->aux_data = NULL;
5841 				}
5842 				no_rcv_needed = control->do_not_ref_stcb;
5843 				sctp_free_remote_addr(control->whoFrom);
5844 				control->data = NULL;
5845 #ifdef INVARIANTS
5846 				if (control->on_strm_q) {
5847 					panic("About to free ctl:%p so:%p and its in %d",
5848 					    control, so, control->on_strm_q);
5849 				}
5850 #endif
5851 				sctp_free_a_readq(stcb, control);
5852 				control = NULL;
5853 				if ((freed_so_far >= rwnd_req) &&
5854 				    (no_rcv_needed == 0))
5855 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5856 
5857 			} else {
5858 				/*
5859 				 * The user did not read all of this
5860 				 * message, turn off the returned MSG_EOR
5861 				 * since we are leaving more behind on the
5862 				 * control to read.
5863 				 */
5864 #ifdef INVARIANTS
5865 				if (control->end_added &&
5866 				    (control->data == NULL) &&
5867 				    (control->tail_mbuf == NULL)) {
5868 					panic("Gak, control->length is corrupt?");
5869 				}
5870 #endif
5871 				no_rcv_needed = control->do_not_ref_stcb;
5872 				out_flags &= ~MSG_EOR;
5873 			}
5874 		}
5875 		if (out_flags & MSG_EOR) {
5876 			goto release;
5877 		}
5878 		if ((uio->uio_resid == 0) ||
5879 		    ((in_eeor_mode) &&
5880 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5881 			goto release;
5882 		}
5883 		/*
5884 		 * If I hit here the receiver wants more and this message is
5885 		 * NOT done (pd-api). So two questions. Can we block? if not
5886 		 * we are done. Did the user NOT set MSG_WAITALL?
5887 		 */
5888 		if (block_allowed == 0) {
5889 			goto release;
5890 		}
5891 		/*
5892 		 * We need to wait for more data a few things: - We don't
5893 		 * sbunlock() so we don't get someone else reading. - We
5894 		 * must be sure to account for the case where what is added
5895 		 * is NOT to our control when we wakeup.
5896 		 */
5897 
5898 		/*
5899 		 * Do we need to tell the transport a rwnd update might be
5900 		 * needed before we go to sleep?
5901 		 */
5902 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5903 		    ((freed_so_far >= rwnd_req) &&
5904 		    (control->do_not_ref_stcb == 0) &&
5905 		    (no_rcv_needed == 0))) {
5906 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5907 		}
5908 wait_some_more:
5909 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5910 			goto release;
5911 		}
5912 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5913 			goto release;
5914 
5915 		if (hold_rlock == 1) {
5916 			SCTP_INP_READ_UNLOCK(inp);
5917 			hold_rlock = 0;
5918 		}
5919 		if (hold_sblock == 0) {
5920 			SOCKBUF_LOCK(&so->so_rcv);
5921 			hold_sblock = 1;
5922 		}
5923 		if ((copied_so_far) && (control->length == 0) &&
5924 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5925 			goto release;
5926 		}
5927 		if (so->so_rcv.sb_cc <= control->held_length) {
5928 			error = sbwait(&so->so_rcv);
5929 			if (error) {
5930 				goto release;
5931 			}
5932 			control->held_length = 0;
5933 		}
5934 		if (hold_sblock) {
5935 			SOCKBUF_UNLOCK(&so->so_rcv);
5936 			hold_sblock = 0;
5937 		}
5938 		if (control->length == 0) {
5939 			/* still nothing here */
5940 			if (control->end_added == 1) {
5941 				/* he aborted, or is done i.e.did a shutdown */
5942 				out_flags |= MSG_EOR;
5943 				if (control->pdapi_aborted) {
5944 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5945 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5946 
5947 					out_flags |= MSG_TRUNC;
5948 				} else {
5949 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5950 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5951 				}
5952 				goto done_with_control;
5953 			}
5954 			if (so->so_rcv.sb_cc > held_length) {
5955 				control->held_length = so->so_rcv.sb_cc;
5956 				held_length = 0;
5957 			}
5958 			goto wait_some_more;
5959 		} else if (control->data == NULL) {
5960 			/*
5961 			 * we must re-sync since data is probably being
5962 			 * added
5963 			 */
5964 			SCTP_INP_READ_LOCK(inp);
5965 			if ((control->length > 0) && (control->data == NULL)) {
5966 				/*
5967 				 * big trouble.. we have the lock and its
5968 				 * corrupt?
5969 				 */
5970 #ifdef INVARIANTS
5971 				panic("Impossible data==NULL length !=0");
5972 #endif
5973 				out_flags |= MSG_EOR;
5974 				out_flags |= MSG_TRUNC;
5975 				control->length = 0;
5976 				SCTP_INP_READ_UNLOCK(inp);
5977 				goto done_with_control;
5978 			}
5979 			SCTP_INP_READ_UNLOCK(inp);
5980 			/* We will fall around to get more data */
5981 		}
5982 		goto get_more_data;
5983 	} else {
5984 		/*-
5985 		 * Give caller back the mbuf chain,
5986 		 * store in uio_resid the length
5987 		 */
5988 		wakeup_read_socket = 0;
5989 		if ((control->end_added == 0) ||
5990 		    (TAILQ_NEXT(control, next) == NULL)) {
5991 			/* Need to get rlock */
5992 			if (hold_rlock == 0) {
5993 				SCTP_INP_READ_LOCK(inp);
5994 				hold_rlock = 1;
5995 			}
5996 		}
5997 		if (control->end_added) {
5998 			out_flags |= MSG_EOR;
5999 			if ((control->do_not_ref_stcb == 0) &&
6000 			    (control->stcb != NULL) &&
6001 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6002 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6003 		}
6004 		if (control->spec_flags & M_NOTIFICATION) {
6005 			out_flags |= MSG_NOTIFICATION;
6006 		}
6007 		uio->uio_resid = control->length;
6008 		*mp = control->data;
6009 		m = control->data;
6010 		while (m) {
6011 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6012 				sctp_sblog(&so->so_rcv,
6013 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6014 			}
6015 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6016 			freed_so_far += SCTP_BUF_LEN(m);
6017 			freed_so_far += MSIZE;
6018 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6019 				sctp_sblog(&so->so_rcv,
6020 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6021 			}
6022 			m = SCTP_BUF_NEXT(m);
6023 		}
6024 		control->data = control->tail_mbuf = NULL;
6025 		control->length = 0;
6026 		if (out_flags & MSG_EOR) {
6027 			/* Done with this control */
6028 			goto done_with_control;
6029 		}
6030 	}
6031 release:
6032 	if (hold_rlock == 1) {
6033 		SCTP_INP_READ_UNLOCK(inp);
6034 		hold_rlock = 0;
6035 	}
6036 	if (hold_sblock == 1) {
6037 		SOCKBUF_UNLOCK(&so->so_rcv);
6038 		hold_sblock = 0;
6039 	}
6040 	sbunlock(&so->so_rcv);
6041 	sockbuf_lock = 0;
6042 
6043 release_unlocked:
6044 	if (hold_sblock) {
6045 		SOCKBUF_UNLOCK(&so->so_rcv);
6046 		hold_sblock = 0;
6047 	}
6048 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6049 		if ((freed_so_far >= rwnd_req) &&
6050 		    (control && (control->do_not_ref_stcb == 0)) &&
6051 		    (no_rcv_needed == 0))
6052 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6053 	}
6054 out:
6055 	if (msg_flags) {
6056 		*msg_flags = out_flags;
6057 	}
6058 	if (((out_flags & MSG_EOR) == 0) &&
6059 	    ((in_flags & MSG_PEEK) == 0) &&
6060 	    (sinfo) &&
6061 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6062 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6063 		struct sctp_extrcvinfo *s_extra;
6064 
6065 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6066 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6067 	}
6068 	if (hold_rlock == 1) {
6069 		SCTP_INP_READ_UNLOCK(inp);
6070 	}
6071 	if (hold_sblock) {
6072 		SOCKBUF_UNLOCK(&so->so_rcv);
6073 	}
6074 	if (sockbuf_lock) {
6075 		sbunlock(&so->so_rcv);
6076 	}
6077 	if (freecnt_applied) {
6078 		/*
6079 		 * The lock on the socket buffer protects us so the free
6080 		 * code will stop. But since we used the socketbuf lock and
6081 		 * the sender uses the tcb_lock to increment, we need to use
6082 		 * the atomic add to the refcnt.
6083 		 */
6084 		if (stcb == NULL) {
6085 #ifdef INVARIANTS
6086 			panic("stcb for refcnt has gone NULL?");
6087 			goto stage_left;
6088 #else
6089 			goto stage_left;
6090 #endif
6091 		}
6092 		atomic_add_int(&stcb->asoc.refcnt, -1);
6093 		/* Save the value back for next time */
6094 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6095 	}
6096 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6097 		if (stcb) {
6098 			sctp_misc_ints(SCTP_SORECV_DONE,
6099 			    freed_so_far,
6100 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6101 			    stcb->asoc.my_rwnd,
6102 			    so->so_rcv.sb_cc);
6103 		} else {
6104 			sctp_misc_ints(SCTP_SORECV_DONE,
6105 			    freed_so_far,
6106 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6107 			    0,
6108 			    so->so_rcv.sb_cc);
6109 		}
6110 	}
6111 stage_left:
6112 	if (wakeup_read_socket) {
6113 		sctp_sorwakeup(inp, so);
6114 	}
6115 	return (error);
6116 }
6117 
6118 
6119 #ifdef SCTP_MBUF_LOGGING
6120 struct mbuf *
6121 sctp_m_free(struct mbuf *m)
6122 {
6123 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6124 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6125 	}
6126 	return (m_free(m));
6127 }
6128 
6129 void
6130 sctp_m_freem(struct mbuf *mb)
6131 {
6132 	while (mb != NULL)
6133 		mb = sctp_m_free(mb);
6134 }
6135 
6136 #endif
6137 
6138 int
6139 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6140 {
6141 	/*
6142 	 * Given a local address. For all associations that holds the
6143 	 * address, request a peer-set-primary.
6144 	 */
6145 	struct sctp_ifa *ifa;
6146 	struct sctp_laddr *wi;
6147 
6148 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6149 	if (ifa == NULL) {
6150 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6151 		return (EADDRNOTAVAIL);
6152 	}
6153 	/*
6154 	 * Now that we have the ifa we must awaken the iterator with this
6155 	 * message.
6156 	 */
6157 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6158 	if (wi == NULL) {
6159 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6160 		return (ENOMEM);
6161 	}
6162 	/* Now incr the count and int wi structure */
6163 	SCTP_INCR_LADDR_COUNT();
6164 	bzero(wi, sizeof(*wi));
6165 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6166 	wi->ifa = ifa;
6167 	wi->action = SCTP_SET_PRIM_ADDR;
6168 	atomic_add_int(&ifa->refcount, 1);
6169 
6170 	/* Now add it to the work queue */
6171 	SCTP_WQ_ADDR_LOCK();
6172 	/*
6173 	 * Should this really be a tailq? As it is we will process the
6174 	 * newest first :-0
6175 	 */
6176 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6177 	SCTP_WQ_ADDR_UNLOCK();
6178 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6179 	    (struct sctp_inpcb *)NULL,
6180 	    (struct sctp_tcb *)NULL,
6181 	    (struct sctp_nets *)NULL);
6182 	return (0);
6183 }
6184 
6185 
6186 int
6187 sctp_soreceive(struct socket *so,
6188     struct sockaddr **psa,
6189     struct uio *uio,
6190     struct mbuf **mp0,
6191     struct mbuf **controlp,
6192     int *flagsp)
6193 {
6194 	int error, fromlen;
6195 	uint8_t sockbuf[256];
6196 	struct sockaddr *from;
6197 	struct sctp_extrcvinfo sinfo;
6198 	int filling_sinfo = 1;
6199 	struct sctp_inpcb *inp;
6200 
6201 	inp = (struct sctp_inpcb *)so->so_pcb;
6202 	/* pickup the assoc we are reading from */
6203 	if (inp == NULL) {
6204 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6205 		return (EINVAL);
6206 	}
6207 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6208 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6209 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6210 	    (controlp == NULL)) {
6211 		/* user does not want the sndrcv ctl */
6212 		filling_sinfo = 0;
6213 	}
6214 	if (psa) {
6215 		from = (struct sockaddr *)sockbuf;
6216 		fromlen = sizeof(sockbuf);
6217 		from->sa_len = 0;
6218 	} else {
6219 		from = NULL;
6220 		fromlen = 0;
6221 	}
6222 
6223 	if (filling_sinfo) {
6224 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6225 	}
6226 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6227 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6228 	if (controlp != NULL) {
6229 		/* copy back the sinfo in a CMSG format */
6230 		if (filling_sinfo)
6231 			*controlp = sctp_build_ctl_nchunk(inp,
6232 			    (struct sctp_sndrcvinfo *)&sinfo);
6233 		else
6234 			*controlp = NULL;
6235 	}
6236 	if (psa) {
6237 		/* copy back the address info */
6238 		if (from && from->sa_len) {
6239 			*psa = sodupsockaddr(from, M_NOWAIT);
6240 		} else {
6241 			*psa = NULL;
6242 		}
6243 	}
6244 	return (error);
6245 }
6246 
6247 
6248 
6249 
6250 
6251 int
6252 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6253     int totaddr, int *error)
6254 {
6255 	int added = 0;
6256 	int i;
6257 	struct sctp_inpcb *inp;
6258 	struct sockaddr *sa;
6259 	size_t incr = 0;
6260 
6261 #ifdef INET
6262 	struct sockaddr_in *sin;
6263 
6264 #endif
6265 #ifdef INET6
6266 	struct sockaddr_in6 *sin6;
6267 
6268 #endif
6269 
6270 	sa = addr;
6271 	inp = stcb->sctp_ep;
6272 	*error = 0;
6273 	for (i = 0; i < totaddr; i++) {
6274 		switch (sa->sa_family) {
6275 #ifdef INET
6276 		case AF_INET:
6277 			incr = sizeof(struct sockaddr_in);
6278 			sin = (struct sockaddr_in *)sa;
6279 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6280 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6281 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6282 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6283 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6284 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6285 				*error = EINVAL;
6286 				goto out_now;
6287 			}
6288 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6289 			    SCTP_DONOT_SETSCOPE,
6290 			    SCTP_ADDR_IS_CONFIRMED)) {
6291 				/* assoc gone no un-lock */
6292 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6293 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6294 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6295 				*error = ENOBUFS;
6296 				goto out_now;
6297 			}
6298 			added++;
6299 			break;
6300 #endif
6301 #ifdef INET6
6302 		case AF_INET6:
6303 			incr = sizeof(struct sockaddr_in6);
6304 			sin6 = (struct sockaddr_in6 *)sa;
6305 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6306 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6307 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6308 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6309 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6310 				*error = EINVAL;
6311 				goto out_now;
6312 			}
6313 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6314 			    SCTP_DONOT_SETSCOPE,
6315 			    SCTP_ADDR_IS_CONFIRMED)) {
6316 				/* assoc gone no un-lock */
6317 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6318 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6319 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6320 				*error = ENOBUFS;
6321 				goto out_now;
6322 			}
6323 			added++;
6324 			break;
6325 #endif
6326 		default:
6327 			break;
6328 		}
6329 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6330 	}
6331 out_now:
6332 	return (added);
6333 }
6334 
6335 struct sctp_tcb *
6336 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6337     unsigned int *totaddr,
6338     unsigned int *num_v4, unsigned int *num_v6, int *error,
6339     unsigned int limit, int *bad_addr)
6340 {
6341 	struct sockaddr *sa;
6342 	struct sctp_tcb *stcb = NULL;
6343 	unsigned int incr, at, i;
6344 
6345 	at = incr = 0;
6346 	sa = addr;
6347 	*error = *num_v6 = *num_v4 = 0;
6348 	/* account and validate addresses */
6349 	for (i = 0; i < *totaddr; i++) {
6350 		switch (sa->sa_family) {
6351 #ifdef INET
6352 		case AF_INET:
6353 			if (sa->sa_len != incr) {
6354 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6355 				*error = EINVAL;
6356 				*bad_addr = 1;
6357 				return (NULL);
6358 			}
6359 			(*num_v4) += 1;
6360 			incr = (unsigned int)sizeof(struct sockaddr_in);
6361 			break;
6362 #endif
6363 #ifdef INET6
6364 		case AF_INET6:
6365 			{
6366 				struct sockaddr_in6 *sin6;
6367 
6368 				sin6 = (struct sockaddr_in6 *)sa;
6369 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6370 					/* Must be non-mapped for connectx */
6371 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6372 					*error = EINVAL;
6373 					*bad_addr = 1;
6374 					return (NULL);
6375 				}
6376 				if (sa->sa_len != incr) {
6377 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6378 					*error = EINVAL;
6379 					*bad_addr = 1;
6380 					return (NULL);
6381 				}
6382 				(*num_v6) += 1;
6383 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6384 				break;
6385 			}
6386 #endif
6387 		default:
6388 			*totaddr = i;
6389 			/* we are done */
6390 			break;
6391 		}
6392 		if (i == *totaddr) {
6393 			break;
6394 		}
6395 		SCTP_INP_INCR_REF(inp);
6396 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6397 		if (stcb != NULL) {
6398 			/* Already have or am bring up an association */
6399 			return (stcb);
6400 		} else {
6401 			SCTP_INP_DECR_REF(inp);
6402 		}
6403 		if ((at + incr) > limit) {
6404 			*totaddr = i;
6405 			break;
6406 		}
6407 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6408 	}
6409 	return ((struct sctp_tcb *)NULL);
6410 }
6411 
6412 /*
6413  * sctp_bindx(ADD) for one address.
6414  * assumes all arguments are valid/checked by caller.
6415  */
6416 void
6417 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6418     struct sockaddr *sa, sctp_assoc_t assoc_id,
6419     uint32_t vrf_id, int *error, void *p)
6420 {
6421 	struct sockaddr *addr_touse;
6422 
6423 #if defined(INET) && defined(INET6)
6424 	struct sockaddr_in sin;
6425 
6426 #endif
6427 
6428 	/* see if we're bound all already! */
6429 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6430 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6431 		*error = EINVAL;
6432 		return;
6433 	}
6434 	addr_touse = sa;
6435 #ifdef INET6
6436 	if (sa->sa_family == AF_INET6) {
6437 #ifdef INET
6438 		struct sockaddr_in6 *sin6;
6439 
6440 #endif
6441 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6442 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6443 			*error = EINVAL;
6444 			return;
6445 		}
6446 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6447 			/* can only bind v6 on PF_INET6 sockets */
6448 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6449 			*error = EINVAL;
6450 			return;
6451 		}
6452 #ifdef INET
6453 		sin6 = (struct sockaddr_in6 *)addr_touse;
6454 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6455 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6456 			    SCTP_IPV6_V6ONLY(inp)) {
6457 				/* can't bind v4-mapped on PF_INET sockets */
6458 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6459 				*error = EINVAL;
6460 				return;
6461 			}
6462 			in6_sin6_2_sin(&sin, sin6);
6463 			addr_touse = (struct sockaddr *)&sin;
6464 		}
6465 #endif
6466 	}
6467 #endif
6468 #ifdef INET
6469 	if (sa->sa_family == AF_INET) {
6470 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6471 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6472 			*error = EINVAL;
6473 			return;
6474 		}
6475 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6476 		    SCTP_IPV6_V6ONLY(inp)) {
6477 			/* can't bind v4 on PF_INET sockets */
6478 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479 			*error = EINVAL;
6480 			return;
6481 		}
6482 	}
6483 #endif
6484 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6485 		if (p == NULL) {
6486 			/* Can't get proc for Net/Open BSD */
6487 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6488 			*error = EINVAL;
6489 			return;
6490 		}
6491 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6492 		return;
6493 	}
6494 	/*
6495 	 * No locks required here since bind and mgmt_ep_sa all do their own
6496 	 * locking. If we do something for the FIX: below we may need to
6497 	 * lock in that case.
6498 	 */
6499 	if (assoc_id == 0) {
6500 		/* add the address */
6501 		struct sctp_inpcb *lep;
6502 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6503 
6504 		/* validate the incoming port */
6505 		if ((lsin->sin_port != 0) &&
6506 		    (lsin->sin_port != inp->sctp_lport)) {
6507 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6508 			*error = EINVAL;
6509 			return;
6510 		} else {
6511 			/* user specified 0 port, set it to existing port */
6512 			lsin->sin_port = inp->sctp_lport;
6513 		}
6514 
6515 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6516 		if (lep != NULL) {
6517 			/*
6518 			 * We must decrement the refcount since we have the
6519 			 * ep already and are binding. No remove going on
6520 			 * here.
6521 			 */
6522 			SCTP_INP_DECR_REF(lep);
6523 		}
6524 		if (lep == inp) {
6525 			/* already bound to it.. ok */
6526 			return;
6527 		} else if (lep == NULL) {
6528 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6529 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6530 			    SCTP_ADD_IP_ADDRESS,
6531 			    vrf_id, NULL);
6532 		} else {
6533 			*error = EADDRINUSE;
6534 		}
6535 		if (*error)
6536 			return;
6537 	} else {
6538 		/*
6539 		 * FIX: decide whether we allow assoc based bindx
6540 		 */
6541 	}
6542 }
6543 
6544 /*
6545  * sctp_bindx(DELETE) for one address.
6546  * assumes all arguments are valid/checked by caller.
6547  */
6548 void
6549 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6550     struct sockaddr *sa, sctp_assoc_t assoc_id,
6551     uint32_t vrf_id, int *error)
6552 {
6553 	struct sockaddr *addr_touse;
6554 
6555 #if defined(INET) && defined(INET6)
6556 	struct sockaddr_in sin;
6557 
6558 #endif
6559 
6560 	/* see if we're bound all already! */
6561 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6562 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563 		*error = EINVAL;
6564 		return;
6565 	}
6566 	addr_touse = sa;
6567 #ifdef INET6
6568 	if (sa->sa_family == AF_INET6) {
6569 #ifdef INET
6570 		struct sockaddr_in6 *sin6;
6571 
6572 #endif
6573 
6574 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6575 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6576 			*error = EINVAL;
6577 			return;
6578 		}
6579 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6580 			/* can only bind v6 on PF_INET6 sockets */
6581 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6582 			*error = EINVAL;
6583 			return;
6584 		}
6585 #ifdef INET
6586 		sin6 = (struct sockaddr_in6 *)addr_touse;
6587 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6588 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6589 			    SCTP_IPV6_V6ONLY(inp)) {
6590 				/* can't bind mapped-v4 on PF_INET sockets */
6591 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6592 				*error = EINVAL;
6593 				return;
6594 			}
6595 			in6_sin6_2_sin(&sin, sin6);
6596 			addr_touse = (struct sockaddr *)&sin;
6597 		}
6598 #endif
6599 	}
6600 #endif
6601 #ifdef INET
6602 	if (sa->sa_family == AF_INET) {
6603 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6604 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6605 			*error = EINVAL;
6606 			return;
6607 		}
6608 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6609 		    SCTP_IPV6_V6ONLY(inp)) {
6610 			/* can't bind v4 on PF_INET sockets */
6611 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6612 			*error = EINVAL;
6613 			return;
6614 		}
6615 	}
6616 #endif
6617 	/*
6618 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6619 	 * below is ever changed we may need to lock before calling
6620 	 * association level binding.
6621 	 */
6622 	if (assoc_id == 0) {
6623 		/* delete the address */
6624 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6625 		    SCTP_DEL_IP_ADDRESS,
6626 		    vrf_id, NULL);
6627 	} else {
6628 		/*
6629 		 * FIX: decide whether we allow assoc based bindx
6630 		 */
6631 	}
6632 }
6633 
6634 /*
6635  * returns the valid local address count for an assoc, taking into account
6636  * all scoping rules
6637  */
6638 int
6639 sctp_local_addr_count(struct sctp_tcb *stcb)
6640 {
6641 	int loopback_scope;
6642 
6643 #if defined(INET)
6644 	int ipv4_local_scope, ipv4_addr_legal;
6645 
6646 #endif
6647 #if defined (INET6)
6648 	int local_scope, site_scope, ipv6_addr_legal;
6649 
6650 #endif
6651 	struct sctp_vrf *vrf;
6652 	struct sctp_ifn *sctp_ifn;
6653 	struct sctp_ifa *sctp_ifa;
6654 	int count = 0;
6655 
6656 	/* Turn on all the appropriate scopes */
6657 	loopback_scope = stcb->asoc.scope.loopback_scope;
6658 #if defined(INET)
6659 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6660 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6661 #endif
6662 #if defined(INET6)
6663 	local_scope = stcb->asoc.scope.local_scope;
6664 	site_scope = stcb->asoc.scope.site_scope;
6665 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6666 #endif
6667 	SCTP_IPI_ADDR_RLOCK();
6668 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6669 	if (vrf == NULL) {
6670 		/* no vrf, no addresses */
6671 		SCTP_IPI_ADDR_RUNLOCK();
6672 		return (0);
6673 	}
6674 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6675 		/*
6676 		 * bound all case: go through all ifns on the vrf
6677 		 */
6678 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6679 			if ((loopback_scope == 0) &&
6680 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6681 				continue;
6682 			}
6683 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6684 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6685 					continue;
6686 				switch (sctp_ifa->address.sa.sa_family) {
6687 #ifdef INET
6688 				case AF_INET:
6689 					if (ipv4_addr_legal) {
6690 						struct sockaddr_in *sin;
6691 
6692 						sin = &sctp_ifa->address.sin;
6693 						if (sin->sin_addr.s_addr == 0) {
6694 							/*
6695 							 * skip unspecified
6696 							 * addrs
6697 							 */
6698 							continue;
6699 						}
6700 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6701 						    &sin->sin_addr) != 0) {
6702 							continue;
6703 						}
6704 						if ((ipv4_local_scope == 0) &&
6705 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6706 							continue;
6707 						}
6708 						/* count this one */
6709 						count++;
6710 					} else {
6711 						continue;
6712 					}
6713 					break;
6714 #endif
6715 #ifdef INET6
6716 				case AF_INET6:
6717 					if (ipv6_addr_legal) {
6718 						struct sockaddr_in6 *sin6;
6719 
6720 						sin6 = &sctp_ifa->address.sin6;
6721 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6722 							continue;
6723 						}
6724 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6725 						    &sin6->sin6_addr) != 0) {
6726 							continue;
6727 						}
6728 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6729 							if (local_scope == 0)
6730 								continue;
6731 							if (sin6->sin6_scope_id == 0) {
6732 								if (sa6_recoverscope(sin6) != 0)
6733 									/*
6734 									 *
6735 									 * bad
6736 									 *
6737 									 * li
6738 									 * nk
6739 									 *
6740 									 * loc
6741 									 * al
6742 									 *
6743 									 * add
6744 									 * re
6745 									 * ss
6746 									 * */
6747 									continue;
6748 							}
6749 						}
6750 						if ((site_scope == 0) &&
6751 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6752 							continue;
6753 						}
6754 						/* count this one */
6755 						count++;
6756 					}
6757 					break;
6758 #endif
6759 				default:
6760 					/* TSNH */
6761 					break;
6762 				}
6763 			}
6764 		}
6765 	} else {
6766 		/*
6767 		 * subset bound case
6768 		 */
6769 		struct sctp_laddr *laddr;
6770 
6771 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6772 		    sctp_nxt_addr) {
6773 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6774 				continue;
6775 			}
6776 			/* count this one */
6777 			count++;
6778 		}
6779 	}
6780 	SCTP_IPI_ADDR_RUNLOCK();
6781 	return (count);
6782 }
6783 
6784 #if defined(SCTP_LOCAL_TRACE_BUF)
6785 
6786 void
6787 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6788 {
6789 	uint32_t saveindex, newindex;
6790 
6791 	do {
6792 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6793 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6794 			newindex = 1;
6795 		} else {
6796 			newindex = saveindex + 1;
6797 		}
6798 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6799 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6800 		saveindex = 0;
6801 	}
6802 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6803 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6804 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6805 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6806 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6807 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6808 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6809 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6810 }
6811 
6812 #endif
6813 static void
6814 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6815     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6816 {
6817 	struct ip *iph;
6818 
6819 #ifdef INET6
6820 	struct ip6_hdr *ip6;
6821 
6822 #endif
6823 	struct mbuf *sp, *last;
6824 	struct udphdr *uhdr;
6825 	uint16_t port;
6826 
6827 	if ((m->m_flags & M_PKTHDR) == 0) {
6828 		/* Can't handle one that is not a pkt hdr */
6829 		goto out;
6830 	}
6831 	/* Pull the src port */
6832 	iph = mtod(m, struct ip *);
6833 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6834 	port = uhdr->uh_sport;
6835 	/*
6836 	 * Split out the mbuf chain. Leave the IP header in m, place the
6837 	 * rest in the sp.
6838 	 */
6839 	sp = m_split(m, off, M_NOWAIT);
6840 	if (sp == NULL) {
6841 		/* Gak, drop packet, we can't do a split */
6842 		goto out;
6843 	}
6844 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6845 		/* Gak, packet can't have an SCTP header in it - too small */
6846 		m_freem(sp);
6847 		goto out;
6848 	}
6849 	/* Now pull up the UDP header and SCTP header together */
6850 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6851 	if (sp == NULL) {
6852 		/* Gak pullup failed */
6853 		goto out;
6854 	}
6855 	/* Trim out the UDP header */
6856 	m_adj(sp, sizeof(struct udphdr));
6857 
6858 	/* Now reconstruct the mbuf chain */
6859 	for (last = m; last->m_next; last = last->m_next);
6860 	last->m_next = sp;
6861 	m->m_pkthdr.len += sp->m_pkthdr.len;
6862 	/*
6863 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6864 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6865 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6866 	 * SCTP checksum. Therefore, clear the bit.
6867 	 */
6868 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6869 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6870 	    m->m_pkthdr.len,
6871 	    if_name(m->m_pkthdr.rcvif),
6872 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6873 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6874 	iph = mtod(m, struct ip *);
6875 	switch (iph->ip_v) {
6876 #ifdef INET
6877 	case IPVERSION:
6878 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6879 		sctp_input_with_port(m, off, port);
6880 		break;
6881 #endif
6882 #ifdef INET6
6883 	case IPV6_VERSION >> 4:
6884 		ip6 = mtod(m, struct ip6_hdr *);
6885 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6886 		sctp6_input_with_port(&m, &off, port);
6887 		break;
6888 #endif
6889 	default:
6890 		goto out;
6891 		break;
6892 	}
6893 	return;
6894 out:
6895 	m_freem(m);
6896 }
6897 
6898 #ifdef INET
6899 static void
6900 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6901 {
6902 	struct ip *outer_ip, *inner_ip;
6903 	struct sctphdr *sh;
6904 	struct icmp *icmp;
6905 	struct udphdr *udp;
6906 	struct sctp_inpcb *inp;
6907 	struct sctp_tcb *stcb;
6908 	struct sctp_nets *net;
6909 	struct sctp_init_chunk *ch;
6910 	struct sockaddr_in src, dst;
6911 	uint8_t type, code;
6912 
6913 	inner_ip = (struct ip *)vip;
6914 	icmp = (struct icmp *)((caddr_t)inner_ip -
6915 	    (sizeof(struct icmp) - sizeof(struct ip)));
6916 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6917 	if (ntohs(outer_ip->ip_len) <
6918 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6919 		return;
6920 	}
6921 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6922 	sh = (struct sctphdr *)(udp + 1);
6923 	memset(&src, 0, sizeof(struct sockaddr_in));
6924 	src.sin_family = AF_INET;
6925 	src.sin_len = sizeof(struct sockaddr_in);
6926 	src.sin_port = sh->src_port;
6927 	src.sin_addr = inner_ip->ip_src;
6928 	memset(&dst, 0, sizeof(struct sockaddr_in));
6929 	dst.sin_family = AF_INET;
6930 	dst.sin_len = sizeof(struct sockaddr_in);
6931 	dst.sin_port = sh->dest_port;
6932 	dst.sin_addr = inner_ip->ip_dst;
6933 	/*
6934 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6935 	 * holds our local endpoint address. Thus we reverse the dst and the
6936 	 * src in the lookup.
6937 	 */
6938 	inp = NULL;
6939 	net = NULL;
6940 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6941 	    (struct sockaddr *)&src,
6942 	    &inp, &net, 1,
6943 	    SCTP_DEFAULT_VRFID);
6944 	if ((stcb != NULL) &&
6945 	    (net != NULL) &&
6946 	    (inp != NULL) &&
6947 	    (inp->sctp_socket != NULL)) {
6948 		/* Check the UDP port numbers */
6949 		if ((udp->uh_dport != net->port) ||
6950 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6951 			SCTP_TCB_UNLOCK(stcb);
6952 			return;
6953 		}
6954 		/* Check the verification tag */
6955 		if (ntohl(sh->v_tag) != 0) {
6956 			/*
6957 			 * This must be the verification tag used for
6958 			 * sending out packets. We don't consider packets
6959 			 * reflecting the verification tag.
6960 			 */
6961 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6962 				SCTP_TCB_UNLOCK(stcb);
6963 				return;
6964 			}
6965 		} else {
6966 			if (ntohs(outer_ip->ip_len) >=
6967 			    sizeof(struct ip) +
6968 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6969 				/*
6970 				 * In this case we can check if we got an
6971 				 * INIT chunk and if the initiate tag
6972 				 * matches.
6973 				 */
6974 				ch = (struct sctp_init_chunk *)(sh + 1);
6975 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6976 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6977 					SCTP_TCB_UNLOCK(stcb);
6978 					return;
6979 				}
6980 			} else {
6981 				SCTP_TCB_UNLOCK(stcb);
6982 				return;
6983 			}
6984 		}
6985 		type = icmp->icmp_type;
6986 		code = icmp->icmp_code;
6987 		if ((type == ICMP_UNREACH) &&
6988 		    (code == ICMP_UNREACH_PORT)) {
6989 			code = ICMP_UNREACH_PROTOCOL;
6990 		}
6991 		sctp_notify(inp, stcb, net, type, code,
6992 		    ntohs(inner_ip->ip_len),
6993 		    ntohs(icmp->icmp_nextmtu));
6994 	} else {
6995 		if ((stcb == NULL) && (inp != NULL)) {
6996 			/* reduce ref-count */
6997 			SCTP_INP_WLOCK(inp);
6998 			SCTP_INP_DECR_REF(inp);
6999 			SCTP_INP_WUNLOCK(inp);
7000 		}
7001 		if (stcb) {
7002 			SCTP_TCB_UNLOCK(stcb);
7003 		}
7004 	}
7005 	return;
7006 }
7007 
7008 #endif
7009 
7010 #ifdef INET6
7011 static void
7012 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7013 {
7014 	struct ip6ctlparam *ip6cp;
7015 	struct sctp_inpcb *inp;
7016 	struct sctp_tcb *stcb;
7017 	struct sctp_nets *net;
7018 	struct sctphdr sh;
7019 	struct udphdr udp;
7020 	struct sockaddr_in6 src, dst;
7021 	uint8_t type, code;
7022 
7023 	ip6cp = (struct ip6ctlparam *)d;
7024 	/*
7025 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7026 	 */
7027 	if (ip6cp->ip6c_m == NULL) {
7028 		return;
7029 	}
7030 	/*
7031 	 * Check if we can safely examine the ports and the verification tag
7032 	 * of the SCTP common header.
7033 	 */
7034 	if (ip6cp->ip6c_m->m_pkthdr.len <
7035 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7036 		return;
7037 	}
7038 	/* Copy out the UDP header. */
7039 	memset(&udp, 0, sizeof(struct udphdr));
7040 	m_copydata(ip6cp->ip6c_m,
7041 	    ip6cp->ip6c_off,
7042 	    sizeof(struct udphdr),
7043 	    (caddr_t)&udp);
7044 	/* Copy out the port numbers and the verification tag. */
7045 	memset(&sh, 0, sizeof(struct sctphdr));
7046 	m_copydata(ip6cp->ip6c_m,
7047 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7048 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7049 	    (caddr_t)&sh);
7050 	memset(&src, 0, sizeof(struct sockaddr_in6));
7051 	src.sin6_family = AF_INET6;
7052 	src.sin6_len = sizeof(struct sockaddr_in6);
7053 	src.sin6_port = sh.src_port;
7054 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7055 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7056 		return;
7057 	}
7058 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7059 	dst.sin6_family = AF_INET6;
7060 	dst.sin6_len = sizeof(struct sockaddr_in6);
7061 	dst.sin6_port = sh.dest_port;
7062 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7063 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7064 		return;
7065 	}
7066 	inp = NULL;
7067 	net = NULL;
7068 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7069 	    (struct sockaddr *)&src,
7070 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7071 	if ((stcb != NULL) &&
7072 	    (net != NULL) &&
7073 	    (inp != NULL) &&
7074 	    (inp->sctp_socket != NULL)) {
7075 		/* Check the UDP port numbers */
7076 		if ((udp.uh_dport != net->port) ||
7077 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7078 			SCTP_TCB_UNLOCK(stcb);
7079 			return;
7080 		}
7081 		/* Check the verification tag */
7082 		if (ntohl(sh.v_tag) != 0) {
7083 			/*
7084 			 * This must be the verification tag used for
7085 			 * sending out packets. We don't consider packets
7086 			 * reflecting the verification tag.
7087 			 */
7088 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7089 				SCTP_TCB_UNLOCK(stcb);
7090 				return;
7091 			}
7092 		} else {
7093 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7094 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7095 			    sizeof(struct sctphdr) +
7096 			    sizeof(struct sctp_chunkhdr) +
7097 			    offsetof(struct sctp_init, a_rwnd)) {
7098 				/*
7099 				 * In this case we can check if we got an
7100 				 * INIT chunk and if the initiate tag
7101 				 * matches.
7102 				 */
7103 				uint32_t initiate_tag;
7104 				uint8_t chunk_type;
7105 
7106 				m_copydata(ip6cp->ip6c_m,
7107 				    ip6cp->ip6c_off +
7108 				    sizeof(struct udphdr) +
7109 				    sizeof(struct sctphdr),
7110 				    sizeof(uint8_t),
7111 				    (caddr_t)&chunk_type);
7112 				m_copydata(ip6cp->ip6c_m,
7113 				    ip6cp->ip6c_off +
7114 				    sizeof(struct udphdr) +
7115 				    sizeof(struct sctphdr) +
7116 				    sizeof(struct sctp_chunkhdr),
7117 				    sizeof(uint32_t),
7118 				    (caddr_t)&initiate_tag);
7119 				if ((chunk_type != SCTP_INITIATION) ||
7120 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7121 					SCTP_TCB_UNLOCK(stcb);
7122 					return;
7123 				}
7124 			} else {
7125 				SCTP_TCB_UNLOCK(stcb);
7126 				return;
7127 			}
7128 		}
7129 		type = ip6cp->ip6c_icmp6->icmp6_type;
7130 		code = ip6cp->ip6c_icmp6->icmp6_code;
7131 		if ((type == ICMP6_DST_UNREACH) &&
7132 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7133 			type = ICMP6_PARAM_PROB;
7134 			code = ICMP6_PARAMPROB_NEXTHEADER;
7135 		}
7136 		sctp6_notify(inp, stcb, net, type, code,
7137 		    (uint16_t) ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7138 	} else {
7139 		if ((stcb == NULL) && (inp != NULL)) {
7140 			/* reduce inp's ref-count */
7141 			SCTP_INP_WLOCK(inp);
7142 			SCTP_INP_DECR_REF(inp);
7143 			SCTP_INP_WUNLOCK(inp);
7144 		}
7145 		if (stcb) {
7146 			SCTP_TCB_UNLOCK(stcb);
7147 		}
7148 	}
7149 }
7150 
7151 #endif
7152 
7153 void
7154 sctp_over_udp_stop(void)
7155 {
7156 	/*
7157 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7158 	 * for writting!
7159 	 */
7160 #ifdef INET
7161 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7162 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7163 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7164 	}
7165 #endif
7166 #ifdef INET6
7167 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7168 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7169 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7170 	}
7171 #endif
7172 }
7173 
7174 int
7175 sctp_over_udp_start(void)
7176 {
7177 	uint16_t port;
7178 	int ret;
7179 
7180 #ifdef INET
7181 	struct sockaddr_in sin;
7182 
7183 #endif
7184 #ifdef INET6
7185 	struct sockaddr_in6 sin6;
7186 
7187 #endif
7188 	/*
7189 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7190 	 * for writting!
7191 	 */
7192 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7193 	if (ntohs(port) == 0) {
7194 		/* Must have a port set */
7195 		return (EINVAL);
7196 	}
7197 #ifdef INET
7198 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7199 		/* Already running -- must stop first */
7200 		return (EALREADY);
7201 	}
7202 #endif
7203 #ifdef INET6
7204 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7205 		/* Already running -- must stop first */
7206 		return (EALREADY);
7207 	}
7208 #endif
7209 #ifdef INET
7210 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7211 	    SOCK_DGRAM, IPPROTO_UDP,
7212 	    curthread->td_ucred, curthread))) {
7213 		sctp_over_udp_stop();
7214 		return (ret);
7215 	}
7216 	/* Call the special UDP hook. */
7217 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7218 	    sctp_recv_udp_tunneled_packet,
7219 	    sctp_recv_icmp_tunneled_packet,
7220 	    NULL))) {
7221 		sctp_over_udp_stop();
7222 		return (ret);
7223 	}
7224 	/* Ok, we have a socket, bind it to the port. */
7225 	memset(&sin, 0, sizeof(struct sockaddr_in));
7226 	sin.sin_len = sizeof(struct sockaddr_in);
7227 	sin.sin_family = AF_INET;
7228 	sin.sin_port = htons(port);
7229 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7230 	    (struct sockaddr *)&sin, curthread))) {
7231 		sctp_over_udp_stop();
7232 		return (ret);
7233 	}
7234 #endif
7235 #ifdef INET6
7236 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7237 	    SOCK_DGRAM, IPPROTO_UDP,
7238 	    curthread->td_ucred, curthread))) {
7239 		sctp_over_udp_stop();
7240 		return (ret);
7241 	}
7242 	/* Call the special UDP hook. */
7243 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7244 	    sctp_recv_udp_tunneled_packet,
7245 	    sctp_recv_icmp6_tunneled_packet,
7246 	    NULL))) {
7247 		sctp_over_udp_stop();
7248 		return (ret);
7249 	}
7250 	/* Ok, we have a socket, bind it to the port. */
7251 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7252 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7253 	sin6.sin6_family = AF_INET6;
7254 	sin6.sin6_port = htons(port);
7255 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7256 	    (struct sockaddr *)&sin6, curthread))) {
7257 		sctp_over_udp_stop();
7258 		return (ret);
7259 	}
7260 #endif
7261 	return (0);
7262 }
7263