xref: /freebsd/sys/netinet/sctputil.c (revision d3de06238379fc0e692927ebf74fcc41860c726f)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 #ifdef INET6
56 #include <netinet/icmp6.h>
57 #endif
58 
59 
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 
64 extern const struct sctp_cc_functions sctp_cc_functions[];
65 extern const struct sctp_ss_functions sctp_ss_functions[];
66 
67 void
68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->rtt / 1000;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 }
128 
129 void
130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
131 {
132 	struct sctp_cwnd_log sctp_clog;
133 
134 	sctp_clog.x.strlog.stcb = stcb;
135 	sctp_clog.x.strlog.n_tsn = tsn;
136 	sctp_clog.x.strlog.n_sseq = sseq;
137 	sctp_clog.x.strlog.e_tsn = 0;
138 	sctp_clog.x.strlog.e_sseq = 0;
139 	sctp_clog.x.strlog.strm = stream;
140 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
141 	    SCTP_LOG_EVENT_STRM,
142 	    from,
143 	    sctp_clog.x.misc.log1,
144 	    sctp_clog.x.misc.log2,
145 	    sctp_clog.x.misc.log3,
146 	    sctp_clog.x.misc.log4);
147 }
148 
149 void
150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
151 {
152 	struct sctp_cwnd_log sctp_clog;
153 
154 	sctp_clog.x.nagle.stcb = (void *)stcb;
155 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
156 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
157 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
158 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
159 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
160 	    SCTP_LOG_EVENT_NAGLE,
161 	    action,
162 	    sctp_clog.x.misc.log1,
163 	    sctp_clog.x.misc.log2,
164 	    sctp_clog.x.misc.log3,
165 	    sctp_clog.x.misc.log4);
166 }
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 }
222 
223 #ifdef SCTP_MBUF_LOGGING
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 void
250 sctp_log_mbc(struct mbuf *m, int from)
251 {
252 	struct mbuf *mat;
253 
254 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
255 		sctp_log_mb(mat, from);
256 	}
257 }
258 
259 #endif
260 
261 void
262 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
263 {
264 	struct sctp_cwnd_log sctp_clog;
265 
266 	if (control == NULL) {
267 		SCTP_PRINTF("Gak log of NULL?\n");
268 		return;
269 	}
270 	sctp_clog.x.strlog.stcb = control->stcb;
271 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
272 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
273 	sctp_clog.x.strlog.strm = control->sinfo_stream;
274 	if (poschk != NULL) {
275 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
276 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
277 	} else {
278 		sctp_clog.x.strlog.e_tsn = 0;
279 		sctp_clog.x.strlog.e_sseq = 0;
280 	}
281 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
282 	    SCTP_LOG_EVENT_STRM,
283 	    from,
284 	    sctp_clog.x.misc.log1,
285 	    sctp_clog.x.misc.log2,
286 	    sctp_clog.x.misc.log3,
287 	    sctp_clog.x.misc.log4);
288 }
289 
290 void
291 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
292 {
293 	struct sctp_cwnd_log sctp_clog;
294 
295 	sctp_clog.x.cwnd.net = net;
296 	if (stcb->asoc.send_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_send = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
300 	if (stcb->asoc.stream_queue_cnt > 255)
301 		sctp_clog.x.cwnd.cnt_in_str = 255;
302 	else
303 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
304 
305 	if (net) {
306 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
307 		sctp_clog.x.cwnd.inflight = net->flight_size;
308 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
310 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
311 	}
312 	if (SCTP_CWNDLOG_PRESEND == from) {
313 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
314 	}
315 	sctp_clog.x.cwnd.cwnd_augment = augment;
316 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
317 	    SCTP_LOG_EVENT_CWND,
318 	    from,
319 	    sctp_clog.x.misc.log1,
320 	    sctp_clog.x.misc.log2,
321 	    sctp_clog.x.misc.log3,
322 	    sctp_clog.x.misc.log4);
323 }
324 
325 void
326 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
327 {
328 	struct sctp_cwnd_log sctp_clog;
329 
330 	memset(&sctp_clog, 0, sizeof(sctp_clog));
331 	if (inp) {
332 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
333 
334 	} else {
335 		sctp_clog.x.lock.sock = (void *)NULL;
336 	}
337 	sctp_clog.x.lock.inp = (void *)inp;
338 	if (stcb) {
339 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
340 	} else {
341 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
342 	}
343 	if (inp) {
344 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
345 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
346 	} else {
347 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
349 	}
350 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
351 	if (inp && (inp->sctp_socket)) {
352 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
353 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
354 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
355 	} else {
356 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
357 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
358 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
359 	}
360 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
361 	    SCTP_LOG_LOCK_EVENT,
362 	    from,
363 	    sctp_clog.x.misc.log1,
364 	    sctp_clog.x.misc.log2,
365 	    sctp_clog.x.misc.log3,
366 	    sctp_clog.x.misc.log4);
367 }
368 
369 void
370 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
371 {
372 	struct sctp_cwnd_log sctp_clog;
373 
374 	memset(&sctp_clog, 0, sizeof(sctp_clog));
375 	sctp_clog.x.cwnd.net = net;
376 	sctp_clog.x.cwnd.cwnd_new_value = error;
377 	sctp_clog.x.cwnd.inflight = net->flight_size;
378 	sctp_clog.x.cwnd.cwnd_augment = burst;
379 	if (stcb->asoc.send_queue_cnt > 255)
380 		sctp_clog.x.cwnd.cnt_in_send = 255;
381 	else
382 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
383 	if (stcb->asoc.stream_queue_cnt > 255)
384 		sctp_clog.x.cwnd.cnt_in_str = 255;
385 	else
386 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
387 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
388 	    SCTP_LOG_EVENT_MAXBURST,
389 	    from,
390 	    sctp_clog.x.misc.log1,
391 	    sctp_clog.x.misc.log2,
392 	    sctp_clog.x.misc.log3,
393 	    sctp_clog.x.misc.log4);
394 }
395 
396 void
397 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
398 {
399 	struct sctp_cwnd_log sctp_clog;
400 
401 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
402 	sctp_clog.x.rwnd.send_size = snd_size;
403 	sctp_clog.x.rwnd.overhead = overhead;
404 	sctp_clog.x.rwnd.new_rwnd = 0;
405 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
406 	    SCTP_LOG_EVENT_RWND,
407 	    from,
408 	    sctp_clog.x.misc.log1,
409 	    sctp_clog.x.misc.log2,
410 	    sctp_clog.x.misc.log3,
411 	    sctp_clog.x.misc.log4);
412 }
413 
414 void
415 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
416 {
417 	struct sctp_cwnd_log sctp_clog;
418 
419 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
420 	sctp_clog.x.rwnd.send_size = flight_size;
421 	sctp_clog.x.rwnd.overhead = overhead;
422 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
423 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
424 	    SCTP_LOG_EVENT_RWND,
425 	    from,
426 	    sctp_clog.x.misc.log1,
427 	    sctp_clog.x.misc.log2,
428 	    sctp_clog.x.misc.log3,
429 	    sctp_clog.x.misc.log4);
430 }
431 
432 #ifdef SCTP_MBCNT_LOGGING
433 static void
434 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
435 {
436 	struct sctp_cwnd_log sctp_clog;
437 
438 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
439 	sctp_clog.x.mbcnt.size_change = book;
440 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
441 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
442 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
443 	    SCTP_LOG_EVENT_MBCNT,
444 	    from,
445 	    sctp_clog.x.misc.log1,
446 	    sctp_clog.x.misc.log2,
447 	    sctp_clog.x.misc.log3,
448 	    sctp_clog.x.misc.log4);
449 }
450 
451 #endif
452 
453 void
454 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
455 {
456 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
457 	    SCTP_LOG_MISC_EVENT,
458 	    from,
459 	    a, b, c, d);
460 }
461 
462 void
463 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
464 {
465 	struct sctp_cwnd_log sctp_clog;
466 
467 	sctp_clog.x.wake.stcb = (void *)stcb;
468 	sctp_clog.x.wake.wake_cnt = wake_cnt;
469 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
470 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
471 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
472 
473 	if (stcb->asoc.stream_queue_cnt < 0xff)
474 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
475 	else
476 		sctp_clog.x.wake.stream_qcnt = 0xff;
477 
478 	if (stcb->asoc.chunks_on_out_queue < 0xff)
479 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
480 	else
481 		sctp_clog.x.wake.chunks_on_oque = 0xff;
482 
483 	sctp_clog.x.wake.sctpflags = 0;
484 	/* set in the defered mode stuff */
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
486 		sctp_clog.x.wake.sctpflags |= 1;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
488 		sctp_clog.x.wake.sctpflags |= 2;
489 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
490 		sctp_clog.x.wake.sctpflags |= 4;
491 	/* what about the sb */
492 	if (stcb->sctp_socket) {
493 		struct socket *so = stcb->sctp_socket;
494 
495 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
496 	} else {
497 		sctp_clog.x.wake.sbflags = 0xff;
498 	}
499 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
500 	    SCTP_LOG_EVENT_WAKE,
501 	    from,
502 	    sctp_clog.x.misc.log1,
503 	    sctp_clog.x.misc.log2,
504 	    sctp_clog.x.misc.log3,
505 	    sctp_clog.x.misc.log4);
506 }
507 
508 void
509 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
510 {
511 	struct sctp_cwnd_log sctp_clog;
512 
513 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
514 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
515 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
516 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
517 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
518 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
519 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
520 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
521 	    SCTP_LOG_EVENT_BLOCK,
522 	    from,
523 	    sctp_clog.x.misc.log1,
524 	    sctp_clog.x.misc.log2,
525 	    sctp_clog.x.misc.log3,
526 	    sctp_clog.x.misc.log4);
527 }
528 
529 int
530 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
531 {
532 	/* May need to fix this if ktrdump does not work */
533 	return (0);
534 }
535 
536 #ifdef SCTP_AUDITING_ENABLED
537 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
538 static int sctp_audit_indx = 0;
539 
540 static
541 void
542 sctp_print_audit_report(void)
543 {
544 	int i;
545 	int cnt;
546 
547 	cnt = 0;
548 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	for (i = 0; i < sctp_audit_indx; i++) {
568 		if ((sctp_audit_data[i][0] == 0xe0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if (sctp_audit_data[i][0] == 0xf0) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
576 		    (sctp_audit_data[i][1] == 0x01)) {
577 			SCTP_PRINTF("\n");
578 			cnt = 0;
579 		}
580 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
581 		    (uint32_t) sctp_audit_data[i][1]);
582 		cnt++;
583 		if ((cnt % 14) == 0)
584 			SCTP_PRINTF("\n");
585 	}
586 	SCTP_PRINTF("\n");
587 }
588 
589 void
590 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
591     struct sctp_nets *net)
592 {
593 	int resend_cnt, tot_out, rep, tot_book_cnt;
594 	struct sctp_nets *lnet;
595 	struct sctp_tmit_chunk *chk;
596 
597 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
598 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
599 	sctp_audit_indx++;
600 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 		sctp_audit_indx = 0;
602 	}
603 	if (inp == NULL) {
604 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
605 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
606 		sctp_audit_indx++;
607 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
608 			sctp_audit_indx = 0;
609 		}
610 		return;
611 	}
612 	if (stcb == NULL) {
613 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
614 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
615 		sctp_audit_indx++;
616 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
617 			sctp_audit_indx = 0;
618 		}
619 		return;
620 	}
621 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
622 	sctp_audit_data[sctp_audit_indx][1] =
623 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
624 	sctp_audit_indx++;
625 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
626 		sctp_audit_indx = 0;
627 	}
628 	rep = 0;
629 	tot_book_cnt = 0;
630 	resend_cnt = tot_out = 0;
631 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
632 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
633 			resend_cnt++;
634 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
635 			tot_out += chk->book_size;
636 			tot_book_cnt++;
637 		}
638 	}
639 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
647 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
648 		rep = 1;
649 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
650 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
651 		sctp_audit_data[sctp_audit_indx][1] =
652 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 	}
658 	if (tot_out != stcb->asoc.total_flight) {
659 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
660 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
661 		sctp_audit_indx++;
662 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
663 			sctp_audit_indx = 0;
664 		}
665 		rep = 1;
666 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
667 		    (int)stcb->asoc.total_flight);
668 		stcb->asoc.total_flight = tot_out;
669 	}
670 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
671 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
672 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
673 		sctp_audit_indx++;
674 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
675 			sctp_audit_indx = 0;
676 		}
677 		rep = 1;
678 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
679 
680 		stcb->asoc.total_flight_count = tot_book_cnt;
681 	}
682 	tot_out = 0;
683 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
684 		tot_out += lnet->flight_size;
685 	}
686 	if (tot_out != stcb->asoc.total_flight) {
687 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
688 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
689 		sctp_audit_indx++;
690 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 			sctp_audit_indx = 0;
692 		}
693 		rep = 1;
694 		SCTP_PRINTF("real flight:%d net total was %d\n",
695 		    stcb->asoc.total_flight, tot_out);
696 		/* now corrective action */
697 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
698 
699 			tot_out = 0;
700 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
701 				if ((chk->whoTo == lnet) &&
702 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
703 					tot_out += chk->book_size;
704 				}
705 			}
706 			if (lnet->flight_size != tot_out) {
707 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
708 				    (void *)lnet, lnet->flight_size,
709 				    tot_out);
710 				lnet->flight_size = tot_out;
711 			}
712 		}
713 	}
714 	if (rep) {
715 		sctp_print_audit_report();
716 	}
717 }
718 
719 void
720 sctp_audit_log(uint8_t ev, uint8_t fd)
721 {
722 
723 	sctp_audit_data[sctp_audit_indx][0] = ev;
724 	sctp_audit_data[sctp_audit_indx][1] = fd;
725 	sctp_audit_indx++;
726 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 		sctp_audit_indx = 0;
728 	}
729 }
730 
731 #endif
732 
733 /*
734  * sctp_stop_timers_for_shutdown() should be called
735  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
736  * state to make sure that all timers are stopped.
737  */
738 void
739 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
740 {
741 	struct sctp_association *asoc;
742 	struct sctp_nets *net;
743 
744 	asoc = &stcb->asoc;
745 
746 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
751 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
752 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
753 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
754 	}
755 }
756 
757 /*
758  * a list of sizes based on typical mtu's, used only if next hop size not
759  * returned.
760  */
761 static uint32_t sctp_mtu_sizes[] = {
762 	68,
763 	296,
764 	508,
765 	512,
766 	544,
767 	576,
768 	1006,
769 	1492,
770 	1500,
771 	1536,
772 	2002,
773 	2048,
774 	4352,
775 	4464,
776 	8166,
777 	17914,
778 	32000,
779 	65535
780 };
781 
782 /*
783  * Return the largest MTU smaller than val. If there is no
784  * entry, just return val.
785  */
786 uint32_t
787 sctp_get_prev_mtu(uint32_t val)
788 {
789 	uint32_t i;
790 
791 	if (val <= sctp_mtu_sizes[0]) {
792 		return (val);
793 	}
794 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
795 		if (val <= sctp_mtu_sizes[i]) {
796 			break;
797 		}
798 	}
799 	return (sctp_mtu_sizes[i - 1]);
800 }
801 
802 /*
803  * Return the smallest MTU larger than val. If there is no
804  * entry, just return val.
805  */
806 uint32_t
807 sctp_get_next_mtu(uint32_t val)
808 {
809 	/* select another MTU that is just bigger than this one */
810 	uint32_t i;
811 
812 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
813 		if (val < sctp_mtu_sizes[i]) {
814 			return (sctp_mtu_sizes[i]);
815 		}
816 	}
817 	return (val);
818 }
819 
820 void
821 sctp_fill_random_store(struct sctp_pcb *m)
822 {
823 	/*
824 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
825 	 * our counter. The result becomes our good random numbers and we
826 	 * then setup to give these out. Note that we do no locking to
827 	 * protect this. This is ok, since if competing folks call this we
828 	 * will get more gobbled gook in the random store which is what we
829 	 * want. There is a danger that two guys will use the same random
830 	 * numbers, but thats ok too since that is random as well :->
831 	 */
832 	m->store_at = 0;
833 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
834 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
835 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
836 	m->random_counter++;
837 }
838 
839 uint32_t
840 sctp_select_initial_TSN(struct sctp_pcb *inp)
841 {
842 	/*
843 	 * A true implementation should use random selection process to get
844 	 * the initial stream sequence number, using RFC1750 as a good
845 	 * guideline
846 	 */
847 	uint32_t x, *xp;
848 	uint8_t *p;
849 	int store_at, new_store;
850 
851 	if (inp->initial_sequence_debug != 0) {
852 		uint32_t ret;
853 
854 		ret = inp->initial_sequence_debug;
855 		inp->initial_sequence_debug++;
856 		return (ret);
857 	}
858 retry:
859 	store_at = inp->store_at;
860 	new_store = store_at + sizeof(uint32_t);
861 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
862 		new_store = 0;
863 	}
864 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
865 		goto retry;
866 	}
867 	if (new_store == 0) {
868 		/* Refill the random store */
869 		sctp_fill_random_store(inp);
870 	}
871 	p = &inp->random_store[store_at];
872 	xp = (uint32_t *) p;
873 	x = *xp;
874 	return (x);
875 }
876 
877 uint32_t
878 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
879 {
880 	uint32_t x;
881 	struct timeval now;
882 
883 	if (check) {
884 		(void)SCTP_GETTIME_TIMEVAL(&now);
885 	}
886 	for (;;) {
887 		x = sctp_select_initial_TSN(&inp->sctp_ep);
888 		if (x == 0) {
889 			/* we never use 0 */
890 			continue;
891 		}
892 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
893 			break;
894 		}
895 	}
896 	return (x);
897 }
898 
899 int32_t
900 sctp_map_assoc_state(int kernel_state)
901 {
902 	int32_t user_state;
903 
904 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
905 		user_state = SCTP_CLOSED;
906 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
907 		user_state = SCTP_SHUTDOWN_PENDING;
908 	} else {
909 		switch (kernel_state & SCTP_STATE_MASK) {
910 		case SCTP_STATE_EMPTY:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_INUSE:
914 			user_state = SCTP_CLOSED;
915 			break;
916 		case SCTP_STATE_COOKIE_WAIT:
917 			user_state = SCTP_COOKIE_WAIT;
918 			break;
919 		case SCTP_STATE_COOKIE_ECHOED:
920 			user_state = SCTP_COOKIE_ECHOED;
921 			break;
922 		case SCTP_STATE_OPEN:
923 			user_state = SCTP_ESTABLISHED;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_SENT:
926 			user_state = SCTP_SHUTDOWN_SENT;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_RECEIVED:
929 			user_state = SCTP_SHUTDOWN_RECEIVED;
930 			break;
931 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
932 			user_state = SCTP_SHUTDOWN_ACK_SENT;
933 			break;
934 		default:
935 			user_state = SCTP_CLOSED;
936 			break;
937 		}
938 	}
939 	return (user_state);
940 }
941 
942 int
943 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
944     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
945 {
946 	struct sctp_association *asoc;
947 
948 	/*
949 	 * Anything set to zero is taken care of by the allocation routine's
950 	 * bzero
951 	 */
952 
953 	/*
954 	 * Up front select what scoping to apply on addresses I tell my peer
955 	 * Not sure what to do with these right now, we will need to come up
956 	 * with a way to set them. We may need to pass them through from the
957 	 * caller in the sctp_aloc_assoc() function.
958 	 */
959 	int i;
960 
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 
964 #endif
965 
966 	asoc = &stcb->asoc;
967 	/* init all variables to a known value. */
968 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
969 	asoc->max_burst = inp->sctp_ep.max_burst;
970 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
971 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
972 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
973 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
974 	asoc->ecn_supported = inp->ecn_supported;
975 	asoc->prsctp_supported = inp->prsctp_supported;
976 	asoc->idata_supported = inp->idata_supported;
977 	asoc->auth_supported = inp->auth_supported;
978 	asoc->asconf_supported = inp->asconf_supported;
979 	asoc->reconfig_supported = inp->reconfig_supported;
980 	asoc->nrsack_supported = inp->nrsack_supported;
981 	asoc->pktdrop_supported = inp->pktdrop_supported;
982 	asoc->idata_supported = inp->idata_supported;
983 	asoc->sctp_cmt_pf = (uint8_t) 0;
984 	asoc->sctp_frag_point = inp->sctp_frag_point;
985 	asoc->sctp_features = inp->sctp_features;
986 	asoc->default_dscp = inp->sctp_ep.default_dscp;
987 	asoc->max_cwnd = inp->max_cwnd;
988 #ifdef INET6
989 	if (inp->sctp_ep.default_flowlabel) {
990 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
991 	} else {
992 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
993 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
994 			asoc->default_flowlabel &= 0x000fffff;
995 			asoc->default_flowlabel |= 0x80000000;
996 		} else {
997 			asoc->default_flowlabel = 0;
998 		}
999 	}
1000 #endif
1001 	asoc->sb_send_resv = 0;
1002 	if (override_tag) {
1003 		asoc->my_vtag = override_tag;
1004 	} else {
1005 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1006 	}
1007 	/* Get the nonce tags */
1008 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010 	asoc->vrf_id = vrf_id;
1011 
1012 #ifdef SCTP_ASOCLOG_OF_TSNS
1013 	asoc->tsn_in_at = 0;
1014 	asoc->tsn_out_at = 0;
1015 	asoc->tsn_in_wrapped = 0;
1016 	asoc->tsn_out_wrapped = 0;
1017 	asoc->cumack_log_at = 0;
1018 	asoc->cumack_log_atsnt = 0;
1019 #endif
1020 #ifdef SCTP_FS_SPEC_LOG
1021 	asoc->fs_index = 0;
1022 #endif
1023 	asoc->refcnt = 0;
1024 	asoc->assoc_up_sent = 0;
1025 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1026 	    sctp_select_initial_TSN(&inp->sctp_ep);
1027 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1028 	/* we are optimisitic here */
1029 	asoc->peer_supports_nat = 0;
1030 	asoc->sent_queue_retran_cnt = 0;
1031 
1032 	/* for CMT */
1033 	asoc->last_net_cmt_send_started = NULL;
1034 
1035 	/* This will need to be adjusted */
1036 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1037 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1038 	asoc->asconf_seq_in = asoc->last_acked_seq;
1039 
1040 	/* here we are different, we hold the next one we expect */
1041 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1042 
1043 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1044 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1045 
1046 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1047 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1048 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1049 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1050 	asoc->free_chunk_cnt = 0;
1051 
1052 	asoc->iam_blocking = 0;
1053 	asoc->context = inp->sctp_context;
1054 	asoc->local_strreset_support = inp->local_strreset_support;
1055 	asoc->def_send = inp->def_send;
1056 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1057 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1058 	asoc->pr_sctp_cnt = 0;
1059 	asoc->total_output_queue_size = 0;
1060 
1061 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1062 		asoc->scope.ipv6_addr_legal = 1;
1063 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1064 			asoc->scope.ipv4_addr_legal = 1;
1065 		} else {
1066 			asoc->scope.ipv4_addr_legal = 0;
1067 		}
1068 	} else {
1069 		asoc->scope.ipv6_addr_legal = 0;
1070 		asoc->scope.ipv4_addr_legal = 1;
1071 	}
1072 
1073 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1074 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1075 
1076 	asoc->smallest_mtu = inp->sctp_frag_point;
1077 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1078 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1079 
1080 	asoc->stream_locked_on = 0;
1081 	asoc->ecn_echo_cnt_onq = 0;
1082 	asoc->stream_locked = 0;
1083 
1084 	asoc->send_sack = 1;
1085 
1086 	LIST_INIT(&asoc->sctp_restricted_addrs);
1087 
1088 	TAILQ_INIT(&asoc->nets);
1089 	TAILQ_INIT(&asoc->pending_reply_queue);
1090 	TAILQ_INIT(&asoc->asconf_ack_sent);
1091 	/* Setup to fill the hb random cache at first HB */
1092 	asoc->hb_random_idx = 4;
1093 
1094 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1095 
1096 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1097 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1098 
1099 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1100 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1101 
1102 	/*
1103 	 * Now the stream parameters, here we allocate space for all streams
1104 	 * that we request by default.
1105 	 */
1106 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1107 	    o_strms;
1108 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1109 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1110 	    SCTP_M_STRMO);
1111 	if (asoc->strmout == NULL) {
1112 		/* big trouble no memory */
1113 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1114 		return (ENOMEM);
1115 	}
1116 	for (i = 0; i < asoc->streamoutcnt; i++) {
1117 		/*
1118 		 * inbound side must be set to 0xffff, also NOTE when we get
1119 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1120 		 * count (streamoutcnt) but first check if we sent to any of
1121 		 * the upper streams that were dropped (if some were). Those
1122 		 * that were dropped must be notified to the upper layer as
1123 		 * failed to send.
1124 		 */
1125 		asoc->strmout[i].next_mid_ordered = 0;
1126 		asoc->strmout[i].next_mid_unordered = 0;
1127 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1128 		asoc->strmout[i].chunks_on_queues = 0;
1129 #if defined(SCTP_DETAILED_STR_STATS)
1130 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1131 			asoc->strmout[i].abandoned_sent[j] = 0;
1132 			asoc->strmout[i].abandoned_unsent[j] = 0;
1133 		}
1134 #else
1135 		asoc->strmout[i].abandoned_sent[0] = 0;
1136 		asoc->strmout[i].abandoned_unsent[0] = 0;
1137 #endif
1138 		asoc->strmout[i].stream_no = i;
1139 		asoc->strmout[i].last_msg_incomplete = 0;
1140 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1141 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1142 	}
1143 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1144 
1145 	/* Now the mapping array */
1146 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1147 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1148 	    SCTP_M_MAP);
1149 	if (asoc->mapping_array == NULL) {
1150 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1151 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1152 		return (ENOMEM);
1153 	}
1154 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1155 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1156 	    SCTP_M_MAP);
1157 	if (asoc->nr_mapping_array == NULL) {
1158 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1159 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1160 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1161 		return (ENOMEM);
1162 	}
1163 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1164 
1165 	/* Now the init of the other outqueues */
1166 	TAILQ_INIT(&asoc->free_chunks);
1167 	TAILQ_INIT(&asoc->control_send_queue);
1168 	TAILQ_INIT(&asoc->asconf_send_queue);
1169 	TAILQ_INIT(&asoc->send_queue);
1170 	TAILQ_INIT(&asoc->sent_queue);
1171 	TAILQ_INIT(&asoc->resetHead);
1172 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1173 	TAILQ_INIT(&asoc->asconf_queue);
1174 	/* authentication fields */
1175 	asoc->authinfo.random = NULL;
1176 	asoc->authinfo.active_keyid = 0;
1177 	asoc->authinfo.assoc_key = NULL;
1178 	asoc->authinfo.assoc_keyid = 0;
1179 	asoc->authinfo.recv_key = NULL;
1180 	asoc->authinfo.recv_keyid = 0;
1181 	LIST_INIT(&asoc->shared_keys);
1182 	asoc->marked_retrans = 0;
1183 	asoc->port = inp->sctp_ep.port;
1184 	asoc->timoinit = 0;
1185 	asoc->timodata = 0;
1186 	asoc->timosack = 0;
1187 	asoc->timoshutdown = 0;
1188 	asoc->timoheartbeat = 0;
1189 	asoc->timocookie = 0;
1190 	asoc->timoshutdownack = 0;
1191 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1192 	asoc->discontinuity_time = asoc->start_time;
1193 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1194 		asoc->abandoned_unsent[i] = 0;
1195 		asoc->abandoned_sent[i] = 0;
1196 	}
1197 	/*
1198 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1199 	 * freed later when the association is freed.
1200 	 */
1201 	return (0);
1202 }
1203 
1204 void
1205 sctp_print_mapping_array(struct sctp_association *asoc)
1206 {
1207 	unsigned int i, limit;
1208 
1209 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1210 	    asoc->mapping_array_size,
1211 	    asoc->mapping_array_base_tsn,
1212 	    asoc->cumulative_tsn,
1213 	    asoc->highest_tsn_inside_map,
1214 	    asoc->highest_tsn_inside_nr_map);
1215 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1216 		if (asoc->mapping_array[limit - 1] != 0) {
1217 			break;
1218 		}
1219 	}
1220 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1221 	for (i = 0; i < limit; i++) {
1222 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1223 	}
1224 	if (limit % 16)
1225 		SCTP_PRINTF("\n");
1226 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1227 		if (asoc->nr_mapping_array[limit - 1]) {
1228 			break;
1229 		}
1230 	}
1231 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1232 	for (i = 0; i < limit; i++) {
1233 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1234 	}
1235 	if (limit % 16)
1236 		SCTP_PRINTF("\n");
1237 }
1238 
1239 int
1240 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1241 {
1242 	/* mapping array needs to grow */
1243 	uint8_t *new_array1, *new_array2;
1244 	uint32_t new_size;
1245 
1246 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1247 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1248 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1249 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1250 		/* can't get more, forget it */
1251 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1252 		if (new_array1) {
1253 			SCTP_FREE(new_array1, SCTP_M_MAP);
1254 		}
1255 		if (new_array2) {
1256 			SCTP_FREE(new_array2, SCTP_M_MAP);
1257 		}
1258 		return (-1);
1259 	}
1260 	memset(new_array1, 0, new_size);
1261 	memset(new_array2, 0, new_size);
1262 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1263 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1264 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1265 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1266 	asoc->mapping_array = new_array1;
1267 	asoc->nr_mapping_array = new_array2;
1268 	asoc->mapping_array_size = new_size;
1269 	return (0);
1270 }
1271 
1272 
1273 static void
1274 sctp_iterator_work(struct sctp_iterator *it)
1275 {
1276 	int iteration_count = 0;
1277 	int inp_skip = 0;
1278 	int first_in = 1;
1279 	struct sctp_inpcb *tinp;
1280 
1281 	SCTP_INP_INFO_RLOCK();
1282 	SCTP_ITERATOR_LOCK();
1283 	sctp_it_ctl.cur_it = it;
1284 	if (it->inp) {
1285 		SCTP_INP_RLOCK(it->inp);
1286 		SCTP_INP_DECR_REF(it->inp);
1287 	}
1288 	if (it->inp == NULL) {
1289 		/* iterator is complete */
1290 done_with_iterator:
1291 		sctp_it_ctl.cur_it = NULL;
1292 		SCTP_ITERATOR_UNLOCK();
1293 		SCTP_INP_INFO_RUNLOCK();
1294 		if (it->function_atend != NULL) {
1295 			(*it->function_atend) (it->pointer, it->val);
1296 		}
1297 		SCTP_FREE(it, SCTP_M_ITER);
1298 		return;
1299 	}
1300 select_a_new_ep:
1301 	if (first_in) {
1302 		first_in = 0;
1303 	} else {
1304 		SCTP_INP_RLOCK(it->inp);
1305 	}
1306 	while (((it->pcb_flags) &&
1307 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1308 	    ((it->pcb_features) &&
1309 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1310 		/* endpoint flags or features don't match, so keep looking */
1311 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1312 			SCTP_INP_RUNLOCK(it->inp);
1313 			goto done_with_iterator;
1314 		}
1315 		tinp = it->inp;
1316 		it->inp = LIST_NEXT(it->inp, sctp_list);
1317 		SCTP_INP_RUNLOCK(tinp);
1318 		if (it->inp == NULL) {
1319 			goto done_with_iterator;
1320 		}
1321 		SCTP_INP_RLOCK(it->inp);
1322 	}
1323 	/* now go through each assoc which is in the desired state */
1324 	if (it->done_current_ep == 0) {
1325 		if (it->function_inp != NULL)
1326 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1327 		it->done_current_ep = 1;
1328 	}
1329 	if (it->stcb == NULL) {
1330 		/* run the per instance function */
1331 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1332 	}
1333 	if ((inp_skip) || it->stcb == NULL) {
1334 		if (it->function_inp_end != NULL) {
1335 			inp_skip = (*it->function_inp_end) (it->inp,
1336 			    it->pointer,
1337 			    it->val);
1338 		}
1339 		SCTP_INP_RUNLOCK(it->inp);
1340 		goto no_stcb;
1341 	}
1342 	while (it->stcb) {
1343 		SCTP_TCB_LOCK(it->stcb);
1344 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1345 			/* not in the right state... keep looking */
1346 			SCTP_TCB_UNLOCK(it->stcb);
1347 			goto next_assoc;
1348 		}
1349 		/* see if we have limited out the iterator loop */
1350 		iteration_count++;
1351 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1352 			/* Pause to let others grab the lock */
1353 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1354 			SCTP_TCB_UNLOCK(it->stcb);
1355 			SCTP_INP_INCR_REF(it->inp);
1356 			SCTP_INP_RUNLOCK(it->inp);
1357 			SCTP_ITERATOR_UNLOCK();
1358 			SCTP_INP_INFO_RUNLOCK();
1359 			SCTP_INP_INFO_RLOCK();
1360 			SCTP_ITERATOR_LOCK();
1361 			if (sctp_it_ctl.iterator_flags) {
1362 				/* We won't be staying here */
1363 				SCTP_INP_DECR_REF(it->inp);
1364 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1365 				if (sctp_it_ctl.iterator_flags &
1366 				    SCTP_ITERATOR_STOP_CUR_IT) {
1367 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1368 					goto done_with_iterator;
1369 				}
1370 				if (sctp_it_ctl.iterator_flags &
1371 				    SCTP_ITERATOR_STOP_CUR_INP) {
1372 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1373 					goto no_stcb;
1374 				}
1375 				/* If we reach here huh? */
1376 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1377 				    sctp_it_ctl.iterator_flags);
1378 				sctp_it_ctl.iterator_flags = 0;
1379 			}
1380 			SCTP_INP_RLOCK(it->inp);
1381 			SCTP_INP_DECR_REF(it->inp);
1382 			SCTP_TCB_LOCK(it->stcb);
1383 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1384 			iteration_count = 0;
1385 		}
1386 		/* run function on this one */
1387 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1388 
1389 		/*
1390 		 * we lie here, it really needs to have its own type but
1391 		 * first I must verify that this won't effect things :-0
1392 		 */
1393 		if (it->no_chunk_output == 0)
1394 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1395 
1396 		SCTP_TCB_UNLOCK(it->stcb);
1397 next_assoc:
1398 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1399 		if (it->stcb == NULL) {
1400 			/* Run last function */
1401 			if (it->function_inp_end != NULL) {
1402 				inp_skip = (*it->function_inp_end) (it->inp,
1403 				    it->pointer,
1404 				    it->val);
1405 			}
1406 		}
1407 	}
1408 	SCTP_INP_RUNLOCK(it->inp);
1409 no_stcb:
1410 	/* done with all assocs on this endpoint, move on to next endpoint */
1411 	it->done_current_ep = 0;
1412 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1413 		it->inp = NULL;
1414 	} else {
1415 		it->inp = LIST_NEXT(it->inp, sctp_list);
1416 	}
1417 	if (it->inp == NULL) {
1418 		goto done_with_iterator;
1419 	}
1420 	goto select_a_new_ep;
1421 }
1422 
1423 void
1424 sctp_iterator_worker(void)
1425 {
1426 	struct sctp_iterator *it, *nit;
1427 
1428 	/* This function is called with the WQ lock in place */
1429 
1430 	sctp_it_ctl.iterator_running = 1;
1431 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1432 		/* now lets work on this one */
1433 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1434 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1435 		CURVNET_SET(it->vn);
1436 		sctp_iterator_work(it);
1437 		CURVNET_RESTORE();
1438 		SCTP_IPI_ITERATOR_WQ_LOCK();
1439 		/* sa_ignore FREED_MEMORY */
1440 	}
1441 	sctp_it_ctl.iterator_running = 0;
1442 	return;
1443 }
1444 
1445 
1446 static void
1447 sctp_handle_addr_wq(void)
1448 {
1449 	/* deal with the ADDR wq from the rtsock calls */
1450 	struct sctp_laddr *wi, *nwi;
1451 	struct sctp_asconf_iterator *asc;
1452 
1453 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1454 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1455 	if (asc == NULL) {
1456 		/* Try later, no memory */
1457 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1458 		    (struct sctp_inpcb *)NULL,
1459 		    (struct sctp_tcb *)NULL,
1460 		    (struct sctp_nets *)NULL);
1461 		return;
1462 	}
1463 	LIST_INIT(&asc->list_of_work);
1464 	asc->cnt = 0;
1465 
1466 	SCTP_WQ_ADDR_LOCK();
1467 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1468 		LIST_REMOVE(wi, sctp_nxt_addr);
1469 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1470 		asc->cnt++;
1471 	}
1472 	SCTP_WQ_ADDR_UNLOCK();
1473 
1474 	if (asc->cnt == 0) {
1475 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1476 	} else {
1477 		int ret;
1478 
1479 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1480 		    sctp_asconf_iterator_stcb,
1481 		    NULL,	/* No ep end for boundall */
1482 		    SCTP_PCB_FLAGS_BOUNDALL,
1483 		    SCTP_PCB_ANY_FEATURES,
1484 		    SCTP_ASOC_ANY_STATE,
1485 		    (void *)asc, 0,
1486 		    sctp_asconf_iterator_end, NULL, 0);
1487 		if (ret) {
1488 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1489 			/*
1490 			 * Freeing if we are stopping or put back on the
1491 			 * addr_wq.
1492 			 */
1493 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1494 				sctp_asconf_iterator_end(asc, 0);
1495 			} else {
1496 				SCTP_WQ_ADDR_LOCK();
1497 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1498 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1499 				}
1500 				SCTP_WQ_ADDR_UNLOCK();
1501 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1502 			}
1503 		}
1504 	}
1505 }
1506 
1507 void
1508 sctp_timeout_handler(void *t)
1509 {
1510 	struct sctp_inpcb *inp;
1511 	struct sctp_tcb *stcb;
1512 	struct sctp_nets *net;
1513 	struct sctp_timer *tmr;
1514 	struct mbuf *op_err;
1515 
1516 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1517 	struct socket *so;
1518 
1519 #endif
1520 	int did_output;
1521 	int type;
1522 
1523 	tmr = (struct sctp_timer *)t;
1524 	inp = (struct sctp_inpcb *)tmr->ep;
1525 	stcb = (struct sctp_tcb *)tmr->tcb;
1526 	net = (struct sctp_nets *)tmr->net;
1527 	CURVNET_SET((struct vnet *)tmr->vnet);
1528 	did_output = 1;
1529 
1530 #ifdef SCTP_AUDITING_ENABLED
1531 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1532 	sctp_auditing(3, inp, stcb, net);
1533 #endif
1534 
1535 	/* sanity checks... */
1536 	if (tmr->self != (void *)tmr) {
1537 		/*
1538 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1539 		 * (void *)tmr);
1540 		 */
1541 		CURVNET_RESTORE();
1542 		return;
1543 	}
1544 	tmr->stopped_from = 0xa001;
1545 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1546 		/*
1547 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1548 		 * tmr->type);
1549 		 */
1550 		CURVNET_RESTORE();
1551 		return;
1552 	}
1553 	tmr->stopped_from = 0xa002;
1554 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1555 		CURVNET_RESTORE();
1556 		return;
1557 	}
1558 	/* if this is an iterator timeout, get the struct and clear inp */
1559 	tmr->stopped_from = 0xa003;
1560 	if (inp) {
1561 		SCTP_INP_INCR_REF(inp);
1562 		if ((inp->sctp_socket == NULL) &&
1563 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1569 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1570 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1571 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1572 		    ) {
1573 			SCTP_INP_DECR_REF(inp);
1574 			CURVNET_RESTORE();
1575 			return;
1576 		}
1577 	}
1578 	tmr->stopped_from = 0xa004;
1579 	if (stcb) {
1580 		atomic_add_int(&stcb->asoc.refcnt, 1);
1581 		if (stcb->asoc.state == 0) {
1582 			atomic_add_int(&stcb->asoc.refcnt, -1);
1583 			if (inp) {
1584 				SCTP_INP_DECR_REF(inp);
1585 			}
1586 			CURVNET_RESTORE();
1587 			return;
1588 		}
1589 	}
1590 	type = tmr->type;
1591 	tmr->stopped_from = 0xa005;
1592 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1593 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1594 		if (inp) {
1595 			SCTP_INP_DECR_REF(inp);
1596 		}
1597 		if (stcb) {
1598 			atomic_add_int(&stcb->asoc.refcnt, -1);
1599 		}
1600 		CURVNET_RESTORE();
1601 		return;
1602 	}
1603 	tmr->stopped_from = 0xa006;
1604 
1605 	if (stcb) {
1606 		SCTP_TCB_LOCK(stcb);
1607 		atomic_add_int(&stcb->asoc.refcnt, -1);
1608 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1609 		    ((stcb->asoc.state == 0) ||
1610 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1611 			SCTP_TCB_UNLOCK(stcb);
1612 			if (inp) {
1613 				SCTP_INP_DECR_REF(inp);
1614 			}
1615 			CURVNET_RESTORE();
1616 			return;
1617 		}
1618 	}
1619 	/* record in stopped what t-o occurred */
1620 	tmr->stopped_from = type;
1621 
1622 	/* mark as being serviced now */
1623 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1624 		/*
1625 		 * Callout has been rescheduled.
1626 		 */
1627 		goto get_out;
1628 	}
1629 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1630 		/*
1631 		 * Not active, so no action.
1632 		 */
1633 		goto get_out;
1634 	}
1635 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1636 
1637 	/* call the handler for the appropriate timer type */
1638 	switch (type) {
1639 	case SCTP_TIMER_TYPE_ZERO_COPY:
1640 		if (inp == NULL) {
1641 			break;
1642 		}
1643 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1644 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1645 		}
1646 		break;
1647 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1648 		if (inp == NULL) {
1649 			break;
1650 		}
1651 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1652 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1653 		}
1654 		break;
1655 	case SCTP_TIMER_TYPE_ADDR_WQ:
1656 		sctp_handle_addr_wq();
1657 		break;
1658 	case SCTP_TIMER_TYPE_SEND:
1659 		if ((stcb == NULL) || (inp == NULL)) {
1660 			break;
1661 		}
1662 		SCTP_STAT_INCR(sctps_timodata);
1663 		stcb->asoc.timodata++;
1664 		stcb->asoc.num_send_timers_up--;
1665 		if (stcb->asoc.num_send_timers_up < 0) {
1666 			stcb->asoc.num_send_timers_up = 0;
1667 		}
1668 		SCTP_TCB_LOCK_ASSERT(stcb);
1669 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1670 			/* no need to unlock on tcb its gone */
1671 
1672 			goto out_decr;
1673 		}
1674 		SCTP_TCB_LOCK_ASSERT(stcb);
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1679 		if ((stcb->asoc.num_send_timers_up == 0) &&
1680 		    (stcb->asoc.sent_queue_cnt > 0)) {
1681 			struct sctp_tmit_chunk *chk;
1682 
1683 			/*
1684 			 * safeguard. If there on some on the sent queue
1685 			 * somewhere but no timers running something is
1686 			 * wrong... so we start a timer on the first chunk
1687 			 * on the send queue on whatever net it is sent to.
1688 			 */
1689 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1690 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1691 			    chk->whoTo);
1692 		}
1693 		break;
1694 	case SCTP_TIMER_TYPE_INIT:
1695 		if ((stcb == NULL) || (inp == NULL)) {
1696 			break;
1697 		}
1698 		SCTP_STAT_INCR(sctps_timoinit);
1699 		stcb->asoc.timoinit++;
1700 		if (sctp_t1init_timer(inp, stcb, net)) {
1701 			/* no need to unlock on tcb its gone */
1702 			goto out_decr;
1703 		}
1704 		/* We do output but not here */
1705 		did_output = 0;
1706 		break;
1707 	case SCTP_TIMER_TYPE_RECV:
1708 		if ((stcb == NULL) || (inp == NULL)) {
1709 			break;
1710 		}
1711 		SCTP_STAT_INCR(sctps_timosack);
1712 		stcb->asoc.timosack++;
1713 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1714 #ifdef SCTP_AUDITING_ENABLED
1715 		sctp_auditing(4, inp, stcb, net);
1716 #endif
1717 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1718 		break;
1719 	case SCTP_TIMER_TYPE_SHUTDOWN:
1720 		if ((stcb == NULL) || (inp == NULL)) {
1721 			break;
1722 		}
1723 		if (sctp_shutdown_timer(inp, stcb, net)) {
1724 			/* no need to unlock on tcb its gone */
1725 			goto out_decr;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdown);
1728 		stcb->asoc.timoshutdown++;
1729 #ifdef SCTP_AUDITING_ENABLED
1730 		sctp_auditing(4, inp, stcb, net);
1731 #endif
1732 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1733 		break;
1734 	case SCTP_TIMER_TYPE_HEARTBEAT:
1735 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1736 			break;
1737 		}
1738 		SCTP_STAT_INCR(sctps_timoheartbeat);
1739 		stcb->asoc.timoheartbeat++;
1740 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1741 			/* no need to unlock on tcb its gone */
1742 			goto out_decr;
1743 		}
1744 #ifdef SCTP_AUDITING_ENABLED
1745 		sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1748 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1749 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1750 		}
1751 		break;
1752 	case SCTP_TIMER_TYPE_COOKIE:
1753 		if ((stcb == NULL) || (inp == NULL)) {
1754 			break;
1755 		}
1756 		if (sctp_cookie_timer(inp, stcb, net)) {
1757 			/* no need to unlock on tcb its gone */
1758 			goto out_decr;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timocookie);
1761 		stcb->asoc.timocookie++;
1762 #ifdef SCTP_AUDITING_ENABLED
1763 		sctp_auditing(4, inp, stcb, net);
1764 #endif
1765 		/*
1766 		 * We consider T3 and Cookie timer pretty much the same with
1767 		 * respect to where from in chunk_output.
1768 		 */
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1770 		break;
1771 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1772 		{
1773 			struct timeval tv;
1774 			int i, secret;
1775 
1776 			if (inp == NULL) {
1777 				break;
1778 			}
1779 			SCTP_STAT_INCR(sctps_timosecret);
1780 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1781 			SCTP_INP_WLOCK(inp);
1782 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1783 			inp->sctp_ep.last_secret_number =
1784 			    inp->sctp_ep.current_secret_number;
1785 			inp->sctp_ep.current_secret_number++;
1786 			if (inp->sctp_ep.current_secret_number >=
1787 			    SCTP_HOW_MANY_SECRETS) {
1788 				inp->sctp_ep.current_secret_number = 0;
1789 			}
1790 			secret = (int)inp->sctp_ep.current_secret_number;
1791 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1792 				inp->sctp_ep.secret_key[secret][i] =
1793 				    sctp_select_initial_TSN(&inp->sctp_ep);
1794 			}
1795 			SCTP_INP_WUNLOCK(inp);
1796 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1797 		}
1798 		did_output = 0;
1799 		break;
1800 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		SCTP_STAT_INCR(sctps_timopathmtu);
1805 		sctp_pathmtu_timer(inp, stcb, net);
1806 		did_output = 0;
1807 		break;
1808 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1809 		if ((stcb == NULL) || (inp == NULL)) {
1810 			break;
1811 		}
1812 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1813 			/* no need to unlock on tcb its gone */
1814 			goto out_decr;
1815 		}
1816 		SCTP_STAT_INCR(sctps_timoshutdownack);
1817 		stcb->asoc.timoshutdownack++;
1818 #ifdef SCTP_AUDITING_ENABLED
1819 		sctp_auditing(4, inp, stcb, net);
1820 #endif
1821 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1822 		break;
1823 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1824 		if ((stcb == NULL) || (inp == NULL)) {
1825 			break;
1826 		}
1827 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1828 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1829 		    "Shutdown guard timer expired");
1830 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1831 		/* no need to unlock on tcb its gone */
1832 		goto out_decr;
1833 
1834 	case SCTP_TIMER_TYPE_STRRESET:
1835 		if ((stcb == NULL) || (inp == NULL)) {
1836 			break;
1837 		}
1838 		if (sctp_strreset_timer(inp, stcb, net)) {
1839 			/* no need to unlock on tcb its gone */
1840 			goto out_decr;
1841 		}
1842 		SCTP_STAT_INCR(sctps_timostrmrst);
1843 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1844 		break;
1845 	case SCTP_TIMER_TYPE_ASCONF:
1846 		if ((stcb == NULL) || (inp == NULL)) {
1847 			break;
1848 		}
1849 		if (sctp_asconf_timer(inp, stcb, net)) {
1850 			/* no need to unlock on tcb its gone */
1851 			goto out_decr;
1852 		}
1853 		SCTP_STAT_INCR(sctps_timoasconf);
1854 #ifdef SCTP_AUDITING_ENABLED
1855 		sctp_auditing(4, inp, stcb, net);
1856 #endif
1857 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1858 		break;
1859 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1860 		if ((stcb == NULL) || (inp == NULL)) {
1861 			break;
1862 		}
1863 		sctp_delete_prim_timer(inp, stcb, net);
1864 		SCTP_STAT_INCR(sctps_timodelprim);
1865 		break;
1866 
1867 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1868 		if ((stcb == NULL) || (inp == NULL)) {
1869 			break;
1870 		}
1871 		SCTP_STAT_INCR(sctps_timoautoclose);
1872 		sctp_autoclose_timer(inp, stcb, net);
1873 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1874 		did_output = 0;
1875 		break;
1876 	case SCTP_TIMER_TYPE_ASOCKILL:
1877 		if ((stcb == NULL) || (inp == NULL)) {
1878 			break;
1879 		}
1880 		SCTP_STAT_INCR(sctps_timoassockill);
1881 		/* Can we free it yet? */
1882 		SCTP_INP_DECR_REF(inp);
1883 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1884 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1885 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1886 		so = SCTP_INP_SO(inp);
1887 		atomic_add_int(&stcb->asoc.refcnt, 1);
1888 		SCTP_TCB_UNLOCK(stcb);
1889 		SCTP_SOCKET_LOCK(so, 1);
1890 		SCTP_TCB_LOCK(stcb);
1891 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1892 #endif
1893 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1894 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1895 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1896 		SCTP_SOCKET_UNLOCK(so, 1);
1897 #endif
1898 		/*
1899 		 * free asoc, always unlocks (or destroy's) so prevent
1900 		 * duplicate unlock or unlock of a free mtx :-0
1901 		 */
1902 		stcb = NULL;
1903 		goto out_no_decr;
1904 	case SCTP_TIMER_TYPE_INPKILL:
1905 		SCTP_STAT_INCR(sctps_timoinpkill);
1906 		if (inp == NULL) {
1907 			break;
1908 		}
1909 		/*
1910 		 * special case, take away our increment since WE are the
1911 		 * killer
1912 		 */
1913 		SCTP_INP_DECR_REF(inp);
1914 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1915 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1916 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1917 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1918 		inp = NULL;
1919 		goto out_no_decr;
1920 	default:
1921 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1922 		    type);
1923 		break;
1924 	}
1925 #ifdef SCTP_AUDITING_ENABLED
1926 	sctp_audit_log(0xF1, (uint8_t) type);
1927 	if (inp)
1928 		sctp_auditing(5, inp, stcb, net);
1929 #endif
1930 	if ((did_output) && stcb) {
1931 		/*
1932 		 * Now we need to clean up the control chunk chain if an
1933 		 * ECNE is on it. It must be marked as UNSENT again so next
1934 		 * call will continue to send it until such time that we get
1935 		 * a CWR, to remove it. It is, however, less likely that we
1936 		 * will find a ecn echo on the chain though.
1937 		 */
1938 		sctp_fix_ecn_echo(&stcb->asoc);
1939 	}
1940 get_out:
1941 	if (stcb) {
1942 		SCTP_TCB_UNLOCK(stcb);
1943 	}
1944 out_decr:
1945 	if (inp) {
1946 		SCTP_INP_DECR_REF(inp);
1947 	}
1948 out_no_decr:
1949 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1950 	CURVNET_RESTORE();
1951 }
1952 
1953 void
1954 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1955     struct sctp_nets *net)
1956 {
1957 	uint32_t to_ticks;
1958 	struct sctp_timer *tmr;
1959 
1960 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1961 		return;
1962 
1963 	tmr = NULL;
1964 	if (stcb) {
1965 		SCTP_TCB_LOCK_ASSERT(stcb);
1966 	}
1967 	switch (t_type) {
1968 	case SCTP_TIMER_TYPE_ZERO_COPY:
1969 		tmr = &inp->sctp_ep.zero_copy_timer;
1970 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1971 		break;
1972 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1973 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1974 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1975 		break;
1976 	case SCTP_TIMER_TYPE_ADDR_WQ:
1977 		/* Only 1 tick away :-) */
1978 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1979 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1980 		break;
1981 	case SCTP_TIMER_TYPE_SEND:
1982 		/* Here we use the RTO timer */
1983 		{
1984 			int rto_val;
1985 
1986 			if ((stcb == NULL) || (net == NULL)) {
1987 				return;
1988 			}
1989 			tmr = &net->rxt_timer;
1990 			if (net->RTO == 0) {
1991 				rto_val = stcb->asoc.initial_rto;
1992 			} else {
1993 				rto_val = net->RTO;
1994 			}
1995 			to_ticks = MSEC_TO_TICKS(rto_val);
1996 		}
1997 		break;
1998 	case SCTP_TIMER_TYPE_INIT:
1999 		/*
2000 		 * Here we use the INIT timer default usually about 1
2001 		 * minute.
2002 		 */
2003 		if ((stcb == NULL) || (net == NULL)) {
2004 			return;
2005 		}
2006 		tmr = &net->rxt_timer;
2007 		if (net->RTO == 0) {
2008 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2009 		} else {
2010 			to_ticks = MSEC_TO_TICKS(net->RTO);
2011 		}
2012 		break;
2013 	case SCTP_TIMER_TYPE_RECV:
2014 		/*
2015 		 * Here we use the Delayed-Ack timer value from the inp
2016 		 * ususually about 200ms.
2017 		 */
2018 		if (stcb == NULL) {
2019 			return;
2020 		}
2021 		tmr = &stcb->asoc.dack_timer;
2022 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2023 		break;
2024 	case SCTP_TIMER_TYPE_SHUTDOWN:
2025 		/* Here we use the RTO of the destination. */
2026 		if ((stcb == NULL) || (net == NULL)) {
2027 			return;
2028 		}
2029 		if (net->RTO == 0) {
2030 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2031 		} else {
2032 			to_ticks = MSEC_TO_TICKS(net->RTO);
2033 		}
2034 		tmr = &net->rxt_timer;
2035 		break;
2036 	case SCTP_TIMER_TYPE_HEARTBEAT:
2037 		/*
2038 		 * the net is used here so that we can add in the RTO. Even
2039 		 * though we use a different timer. We also add the HB timer
2040 		 * PLUS a random jitter.
2041 		 */
2042 		if ((stcb == NULL) || (net == NULL)) {
2043 			return;
2044 		} else {
2045 			uint32_t rndval;
2046 			uint32_t jitter;
2047 
2048 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2049 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2050 				return;
2051 			}
2052 			if (net->RTO == 0) {
2053 				to_ticks = stcb->asoc.initial_rto;
2054 			} else {
2055 				to_ticks = net->RTO;
2056 			}
2057 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2058 			jitter = rndval % to_ticks;
2059 			if (jitter >= (to_ticks >> 1)) {
2060 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2061 			} else {
2062 				to_ticks = to_ticks - jitter;
2063 			}
2064 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2065 			    !(net->dest_state & SCTP_ADDR_PF)) {
2066 				to_ticks += net->heart_beat_delay;
2067 			}
2068 			/*
2069 			 * Now we must convert the to_ticks that are now in
2070 			 * ms to ticks.
2071 			 */
2072 			to_ticks = MSEC_TO_TICKS(to_ticks);
2073 			tmr = &net->hb_timer;
2074 		}
2075 		break;
2076 	case SCTP_TIMER_TYPE_COOKIE:
2077 		/*
2078 		 * Here we can use the RTO timer from the network since one
2079 		 * RTT was compelete. If a retran happened then we will be
2080 		 * using the RTO initial value.
2081 		 */
2082 		if ((stcb == NULL) || (net == NULL)) {
2083 			return;
2084 		}
2085 		if (net->RTO == 0) {
2086 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2087 		} else {
2088 			to_ticks = MSEC_TO_TICKS(net->RTO);
2089 		}
2090 		tmr = &net->rxt_timer;
2091 		break;
2092 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2093 		/*
2094 		 * nothing needed but the endpoint here ususually about 60
2095 		 * minutes.
2096 		 */
2097 		tmr = &inp->sctp_ep.signature_change;
2098 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2099 		break;
2100 	case SCTP_TIMER_TYPE_ASOCKILL:
2101 		if (stcb == NULL) {
2102 			return;
2103 		}
2104 		tmr = &stcb->asoc.strreset_timer;
2105 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2106 		break;
2107 	case SCTP_TIMER_TYPE_INPKILL:
2108 		/*
2109 		 * The inp is setup to die. We re-use the signature_chage
2110 		 * timer since that has stopped and we are in the GONE
2111 		 * state.
2112 		 */
2113 		tmr = &inp->sctp_ep.signature_change;
2114 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2115 		break;
2116 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2117 		/*
2118 		 * Here we use the value found in the EP for PMTU ususually
2119 		 * about 10 minutes.
2120 		 */
2121 		if ((stcb == NULL) || (net == NULL)) {
2122 			return;
2123 		}
2124 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2125 			return;
2126 		}
2127 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2128 		tmr = &net->pmtu_timer;
2129 		break;
2130 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2131 		/* Here we use the RTO of the destination */
2132 		if ((stcb == NULL) || (net == NULL)) {
2133 			return;
2134 		}
2135 		if (net->RTO == 0) {
2136 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2137 		} else {
2138 			to_ticks = MSEC_TO_TICKS(net->RTO);
2139 		}
2140 		tmr = &net->rxt_timer;
2141 		break;
2142 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2143 		/*
2144 		 * Here we use the endpoints shutdown guard timer usually
2145 		 * about 3 minutes.
2146 		 */
2147 		if (stcb == NULL) {
2148 			return;
2149 		}
2150 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2151 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2152 		} else {
2153 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2154 		}
2155 		tmr = &stcb->asoc.shut_guard_timer;
2156 		break;
2157 	case SCTP_TIMER_TYPE_STRRESET:
2158 		/*
2159 		 * Here the timer comes from the stcb but its value is from
2160 		 * the net's RTO.
2161 		 */
2162 		if ((stcb == NULL) || (net == NULL)) {
2163 			return;
2164 		}
2165 		if (net->RTO == 0) {
2166 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2167 		} else {
2168 			to_ticks = MSEC_TO_TICKS(net->RTO);
2169 		}
2170 		tmr = &stcb->asoc.strreset_timer;
2171 		break;
2172 	case SCTP_TIMER_TYPE_ASCONF:
2173 		/*
2174 		 * Here the timer comes from the stcb but its value is from
2175 		 * the net's RTO.
2176 		 */
2177 		if ((stcb == NULL) || (net == NULL)) {
2178 			return;
2179 		}
2180 		if (net->RTO == 0) {
2181 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2182 		} else {
2183 			to_ticks = MSEC_TO_TICKS(net->RTO);
2184 		}
2185 		tmr = &stcb->asoc.asconf_timer;
2186 		break;
2187 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2188 		if ((stcb == NULL) || (net != NULL)) {
2189 			return;
2190 		}
2191 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2192 		tmr = &stcb->asoc.delete_prim_timer;
2193 		break;
2194 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2195 		if (stcb == NULL) {
2196 			return;
2197 		}
2198 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2199 			/*
2200 			 * Really an error since stcb is NOT set to
2201 			 * autoclose
2202 			 */
2203 			return;
2204 		}
2205 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2206 		tmr = &stcb->asoc.autoclose_timer;
2207 		break;
2208 	default:
2209 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2210 		    __func__, t_type);
2211 		return;
2212 		break;
2213 	}
2214 	if ((to_ticks <= 0) || (tmr == NULL)) {
2215 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2216 		    __func__, t_type, to_ticks, (void *)tmr);
2217 		return;
2218 	}
2219 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2220 		/*
2221 		 * we do NOT allow you to have it already running. if it is
2222 		 * we leave the current one up unchanged
2223 		 */
2224 		return;
2225 	}
2226 	/* At this point we can proceed */
2227 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2228 		stcb->asoc.num_send_timers_up++;
2229 	}
2230 	tmr->stopped_from = 0;
2231 	tmr->type = t_type;
2232 	tmr->ep = (void *)inp;
2233 	tmr->tcb = (void *)stcb;
2234 	tmr->net = (void *)net;
2235 	tmr->self = (void *)tmr;
2236 	tmr->vnet = (void *)curvnet;
2237 	tmr->ticks = sctp_get_tick_count();
2238 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2239 	return;
2240 }
2241 
2242 void
2243 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2244     struct sctp_nets *net, uint32_t from)
2245 {
2246 	struct sctp_timer *tmr;
2247 
2248 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2249 	    (inp == NULL))
2250 		return;
2251 
2252 	tmr = NULL;
2253 	if (stcb) {
2254 		SCTP_TCB_LOCK_ASSERT(stcb);
2255 	}
2256 	switch (t_type) {
2257 	case SCTP_TIMER_TYPE_ZERO_COPY:
2258 		tmr = &inp->sctp_ep.zero_copy_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2261 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_ADDR_WQ:
2264 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2265 		break;
2266 	case SCTP_TIMER_TYPE_SEND:
2267 		if ((stcb == NULL) || (net == NULL)) {
2268 			return;
2269 		}
2270 		tmr = &net->rxt_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_INIT:
2273 		if ((stcb == NULL) || (net == NULL)) {
2274 			return;
2275 		}
2276 		tmr = &net->rxt_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_RECV:
2279 		if (stcb == NULL) {
2280 			return;
2281 		}
2282 		tmr = &stcb->asoc.dack_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_SHUTDOWN:
2285 		if ((stcb == NULL) || (net == NULL)) {
2286 			return;
2287 		}
2288 		tmr = &net->rxt_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_HEARTBEAT:
2291 		if ((stcb == NULL) || (net == NULL)) {
2292 			return;
2293 		}
2294 		tmr = &net->hb_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_COOKIE:
2297 		if ((stcb == NULL) || (net == NULL)) {
2298 			return;
2299 		}
2300 		tmr = &net->rxt_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2303 		/* nothing needed but the endpoint here */
2304 		tmr = &inp->sctp_ep.signature_change;
2305 		/*
2306 		 * We re-use the newcookie timer for the INP kill timer. We
2307 		 * must assure that we do not kill it by accident.
2308 		 */
2309 		break;
2310 	case SCTP_TIMER_TYPE_ASOCKILL:
2311 		/*
2312 		 * Stop the asoc kill timer.
2313 		 */
2314 		if (stcb == NULL) {
2315 			return;
2316 		}
2317 		tmr = &stcb->asoc.strreset_timer;
2318 		break;
2319 
2320 	case SCTP_TIMER_TYPE_INPKILL:
2321 		/*
2322 		 * The inp is setup to die. We re-use the signature_chage
2323 		 * timer since that has stopped and we are in the GONE
2324 		 * state.
2325 		 */
2326 		tmr = &inp->sctp_ep.signature_change;
2327 		break;
2328 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2329 		if ((stcb == NULL) || (net == NULL)) {
2330 			return;
2331 		}
2332 		tmr = &net->pmtu_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2335 		if ((stcb == NULL) || (net == NULL)) {
2336 			return;
2337 		}
2338 		tmr = &net->rxt_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2341 		if (stcb == NULL) {
2342 			return;
2343 		}
2344 		tmr = &stcb->asoc.shut_guard_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_STRRESET:
2347 		if (stcb == NULL) {
2348 			return;
2349 		}
2350 		tmr = &stcb->asoc.strreset_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_ASCONF:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.asconf_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.delete_prim_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2365 		if (stcb == NULL) {
2366 			return;
2367 		}
2368 		tmr = &stcb->asoc.autoclose_timer;
2369 		break;
2370 	default:
2371 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2372 		    __func__, t_type);
2373 		break;
2374 	}
2375 	if (tmr == NULL) {
2376 		return;
2377 	}
2378 	if ((tmr->type != t_type) && tmr->type) {
2379 		/*
2380 		 * Ok we have a timer that is under joint use. Cookie timer
2381 		 * per chance with the SEND timer. We therefore are NOT
2382 		 * running the timer that the caller wants stopped.  So just
2383 		 * return.
2384 		 */
2385 		return;
2386 	}
2387 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2388 		stcb->asoc.num_send_timers_up--;
2389 		if (stcb->asoc.num_send_timers_up < 0) {
2390 			stcb->asoc.num_send_timers_up = 0;
2391 		}
2392 	}
2393 	tmr->self = NULL;
2394 	tmr->stopped_from = from;
2395 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2396 	return;
2397 }
2398 
2399 uint32_t
2400 sctp_calculate_len(struct mbuf *m)
2401 {
2402 	uint32_t tlen = 0;
2403 	struct mbuf *at;
2404 
2405 	at = m;
2406 	while (at) {
2407 		tlen += SCTP_BUF_LEN(at);
2408 		at = SCTP_BUF_NEXT(at);
2409 	}
2410 	return (tlen);
2411 }
2412 
2413 void
2414 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2415     struct sctp_association *asoc, uint32_t mtu)
2416 {
2417 	/*
2418 	 * Reset the P-MTU size on this association, this involves changing
2419 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2420 	 * allow the DF flag to be cleared.
2421 	 */
2422 	struct sctp_tmit_chunk *chk;
2423 	unsigned int eff_mtu, ovh;
2424 
2425 	asoc->smallest_mtu = mtu;
2426 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2427 		ovh = SCTP_MIN_OVERHEAD;
2428 	} else {
2429 		ovh = SCTP_MIN_V4_OVERHEAD;
2430 	}
2431 	eff_mtu = mtu - ovh;
2432 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2433 		if (chk->send_size > eff_mtu) {
2434 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2435 		}
2436 	}
2437 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2438 		if (chk->send_size > eff_mtu) {
2439 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2440 		}
2441 	}
2442 }
2443 
2444 
2445 /*
2446  * given an association and starting time of the current RTT period return
2447  * RTO in number of msecs net should point to the current network
2448  */
2449 
2450 uint32_t
2451 sctp_calculate_rto(struct sctp_tcb *stcb,
2452     struct sctp_association *asoc,
2453     struct sctp_nets *net,
2454     struct timeval *told,
2455     int safe, int rtt_from_sack)
2456 {
2457 	/*-
2458 	 * given an association and the starting time of the current RTT
2459 	 * period (in value1/value2) return RTO in number of msecs.
2460 	 */
2461 	int32_t rtt;		/* RTT in ms */
2462 	uint32_t new_rto;
2463 	int first_measure = 0;
2464 	struct timeval now, then, *old;
2465 
2466 	/* Copy it out for sparc64 */
2467 	if (safe == sctp_align_unsafe_makecopy) {
2468 		old = &then;
2469 		memcpy(&then, told, sizeof(struct timeval));
2470 	} else if (safe == sctp_align_safe_nocopy) {
2471 		old = told;
2472 	} else {
2473 		/* error */
2474 		SCTP_PRINTF("Huh, bad rto calc call\n");
2475 		return (0);
2476 	}
2477 	/************************/
2478 	/* 1. calculate new RTT */
2479 	/************************/
2480 	/* get the current time */
2481 	if (stcb->asoc.use_precise_time) {
2482 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2483 	} else {
2484 		(void)SCTP_GETTIME_TIMEVAL(&now);
2485 	}
2486 	timevalsub(&now, old);
2487 	/* store the current RTT in us */
2488 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2489 	        (uint64_t) now.tv_usec;
2490 
2491 	/* compute rtt in ms */
2492 	rtt = (int32_t) (net->rtt / 1000);
2493 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2494 		/*
2495 		 * Tell the CC module that a new update has just occurred
2496 		 * from a sack
2497 		 */
2498 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2499 	}
2500 	/*
2501 	 * Do we need to determine the lan? We do this only on sacks i.e.
2502 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2503 	 */
2504 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2505 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2506 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2507 			net->lan_type = SCTP_LAN_INTERNET;
2508 		} else {
2509 			net->lan_type = SCTP_LAN_LOCAL;
2510 		}
2511 	}
2512 	/***************************/
2513 	/* 2. update RTTVAR & SRTT */
2514 	/***************************/
2515 	/*-
2516 	 * Compute the scaled average lastsa and the
2517 	 * scaled variance lastsv as described in van Jacobson
2518 	 * Paper "Congestion Avoidance and Control", Annex A.
2519 	 *
2520 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2521 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2522 	 */
2523 	if (net->RTO_measured) {
2524 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2525 		net->lastsa += rtt;
2526 		if (rtt < 0) {
2527 			rtt = -rtt;
2528 		}
2529 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2530 		net->lastsv += rtt;
2531 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2532 			rto_logging(net, SCTP_LOG_RTTVAR);
2533 		}
2534 	} else {
2535 		/* First RTO measurment */
2536 		net->RTO_measured = 1;
2537 		first_measure = 1;
2538 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2539 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2540 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2541 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2542 		}
2543 	}
2544 	if (net->lastsv == 0) {
2545 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2546 	}
2547 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2548 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2549 	    (stcb->asoc.sat_network_lockout == 0)) {
2550 		stcb->asoc.sat_network = 1;
2551 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2552 		stcb->asoc.sat_network = 0;
2553 		stcb->asoc.sat_network_lockout = 1;
2554 	}
2555 	/* bound it, per C6/C7 in Section 5.3.1 */
2556 	if (new_rto < stcb->asoc.minrto) {
2557 		new_rto = stcb->asoc.minrto;
2558 	}
2559 	if (new_rto > stcb->asoc.maxrto) {
2560 		new_rto = stcb->asoc.maxrto;
2561 	}
2562 	/* we are now returning the RTO */
2563 	return (new_rto);
2564 }
2565 
2566 /*
2567  * return a pointer to a contiguous piece of data from the given mbuf chain
2568  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2569  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2570  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2571  */
2572 caddr_t
2573 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2574 {
2575 	uint32_t count;
2576 	uint8_t *ptr;
2577 
2578 	ptr = in_ptr;
2579 	if ((off < 0) || (len <= 0))
2580 		return (NULL);
2581 
2582 	/* find the desired start location */
2583 	while ((m != NULL) && (off > 0)) {
2584 		if (off < SCTP_BUF_LEN(m))
2585 			break;
2586 		off -= SCTP_BUF_LEN(m);
2587 		m = SCTP_BUF_NEXT(m);
2588 	}
2589 	if (m == NULL)
2590 		return (NULL);
2591 
2592 	/* is the current mbuf large enough (eg. contiguous)? */
2593 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2594 		return (mtod(m, caddr_t)+off);
2595 	} else {
2596 		/* else, it spans more than one mbuf, so save a temp copy... */
2597 		while ((m != NULL) && (len > 0)) {
2598 			count = min(SCTP_BUF_LEN(m) - off, len);
2599 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2600 			len -= count;
2601 			ptr += count;
2602 			off = 0;
2603 			m = SCTP_BUF_NEXT(m);
2604 		}
2605 		if ((m == NULL) && (len > 0))
2606 			return (NULL);
2607 		else
2608 			return ((caddr_t)in_ptr);
2609 	}
2610 }
2611 
2612 
2613 
2614 struct sctp_paramhdr *
2615 sctp_get_next_param(struct mbuf *m,
2616     int offset,
2617     struct sctp_paramhdr *pull,
2618     int pull_limit)
2619 {
2620 	/* This just provides a typed signature to Peter's Pull routine */
2621 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2622 	    (uint8_t *) pull));
2623 }
2624 
2625 
2626 struct mbuf *
2627 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2628 {
2629 	struct mbuf *m_last;
2630 	caddr_t dp;
2631 
2632 	if (padlen > 3) {
2633 		return (NULL);
2634 	}
2635 	if (padlen <= M_TRAILINGSPACE(m)) {
2636 		/*
2637 		 * The easy way. We hope the majority of the time we hit
2638 		 * here :)
2639 		 */
2640 		m_last = m;
2641 	} else {
2642 		/* Hard way we must grow the mbuf chain */
2643 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2644 		if (m_last == NULL) {
2645 			return (NULL);
2646 		}
2647 		SCTP_BUF_LEN(m_last) = 0;
2648 		SCTP_BUF_NEXT(m_last) = NULL;
2649 		SCTP_BUF_NEXT(m) = m_last;
2650 	}
2651 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2652 	SCTP_BUF_LEN(m_last) += padlen;
2653 	memset(dp, 0, padlen);
2654 	return (m_last);
2655 }
2656 
2657 struct mbuf *
2658 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2659 {
2660 	/* find the last mbuf in chain and pad it */
2661 	struct mbuf *m_at;
2662 
2663 	if (last_mbuf != NULL) {
2664 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2665 	} else {
2666 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2667 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2668 				return (sctp_add_pad_tombuf(m_at, padval));
2669 			}
2670 		}
2671 	}
2672 	return (NULL);
2673 }
2674 
2675 static void
2676 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2677     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2678 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2679     SCTP_UNUSED
2680 #endif
2681 )
2682 {
2683 	struct mbuf *m_notify;
2684 	struct sctp_assoc_change *sac;
2685 	struct sctp_queued_to_read *control;
2686 	unsigned int notif_len;
2687 	uint16_t abort_len;
2688 	unsigned int i;
2689 
2690 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2691 	struct socket *so;
2692 
2693 #endif
2694 
2695 	if (stcb == NULL) {
2696 		return;
2697 	}
2698 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2699 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2700 		if (abort != NULL) {
2701 			abort_len = ntohs(abort->ch.chunk_length);
2702 		} else {
2703 			abort_len = 0;
2704 		}
2705 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2706 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2707 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2708 			notif_len += abort_len;
2709 		}
2710 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2711 		if (m_notify == NULL) {
2712 			/* Retry with smaller value. */
2713 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2714 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2715 			if (m_notify == NULL) {
2716 				goto set_error;
2717 			}
2718 		}
2719 		SCTP_BUF_NEXT(m_notify) = NULL;
2720 		sac = mtod(m_notify, struct sctp_assoc_change *);
2721 		memset(sac, 0, notif_len);
2722 		sac->sac_type = SCTP_ASSOC_CHANGE;
2723 		sac->sac_flags = 0;
2724 		sac->sac_length = sizeof(struct sctp_assoc_change);
2725 		sac->sac_state = state;
2726 		sac->sac_error = error;
2727 		/* XXX verify these stream counts */
2728 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2729 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2730 		sac->sac_assoc_id = sctp_get_associd(stcb);
2731 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2732 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2733 				i = 0;
2734 				if (stcb->asoc.prsctp_supported == 1) {
2735 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2736 				}
2737 				if (stcb->asoc.auth_supported == 1) {
2738 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2739 				}
2740 				if (stcb->asoc.asconf_supported == 1) {
2741 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2742 				}
2743 				if (stcb->asoc.idata_supported == 1) {
2744 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2745 				}
2746 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2747 				if (stcb->asoc.reconfig_supported == 1) {
2748 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2749 				}
2750 				sac->sac_length += i;
2751 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2752 				memcpy(sac->sac_info, abort, abort_len);
2753 				sac->sac_length += abort_len;
2754 			}
2755 		}
2756 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2757 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2758 		    0, 0, stcb->asoc.context, 0, 0, 0,
2759 		    m_notify);
2760 		if (control != NULL) {
2761 			control->length = SCTP_BUF_LEN(m_notify);
2762 			/* not that we need this */
2763 			control->tail_mbuf = m_notify;
2764 			control->spec_flags = M_NOTIFICATION;
2765 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2766 			    control,
2767 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2768 			    so_locked);
2769 		} else {
2770 			sctp_m_freem(m_notify);
2771 		}
2772 	}
2773 	/*
2774 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2775 	 * comes in.
2776 	 */
2777 set_error:
2778 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2779 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2780 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2781 		SOCK_LOCK(stcb->sctp_socket);
2782 		if (from_peer) {
2783 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2784 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2785 				stcb->sctp_socket->so_error = ECONNREFUSED;
2786 			} else {
2787 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2788 				stcb->sctp_socket->so_error = ECONNRESET;
2789 			}
2790 		} else {
2791 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2792 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2793 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2794 				stcb->sctp_socket->so_error = ETIMEDOUT;
2795 			} else {
2796 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2797 				stcb->sctp_socket->so_error = ECONNABORTED;
2798 			}
2799 		}
2800 	}
2801 	/* Wake ANY sleepers */
2802 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2803 	so = SCTP_INP_SO(stcb->sctp_ep);
2804 	if (!so_locked) {
2805 		atomic_add_int(&stcb->asoc.refcnt, 1);
2806 		SCTP_TCB_UNLOCK(stcb);
2807 		SCTP_SOCKET_LOCK(so, 1);
2808 		SCTP_TCB_LOCK(stcb);
2809 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2810 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2811 			SCTP_SOCKET_UNLOCK(so, 1);
2812 			return;
2813 		}
2814 	}
2815 #endif
2816 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2817 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2818 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2819 		socantrcvmore_locked(stcb->sctp_socket);
2820 	}
2821 	sorwakeup(stcb->sctp_socket);
2822 	sowwakeup(stcb->sctp_socket);
2823 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2824 	if (!so_locked) {
2825 		SCTP_SOCKET_UNLOCK(so, 1);
2826 	}
2827 #endif
2828 }
2829 
2830 static void
2831 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2832     struct sockaddr *sa, uint32_t error, int so_locked
2833 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2834     SCTP_UNUSED
2835 #endif
2836 )
2837 {
2838 	struct mbuf *m_notify;
2839 	struct sctp_paddr_change *spc;
2840 	struct sctp_queued_to_read *control;
2841 
2842 	if ((stcb == NULL) ||
2843 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2844 		/* event not enabled */
2845 		return;
2846 	}
2847 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2848 	if (m_notify == NULL)
2849 		return;
2850 	SCTP_BUF_LEN(m_notify) = 0;
2851 	spc = mtod(m_notify, struct sctp_paddr_change *);
2852 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2853 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2854 	spc->spc_flags = 0;
2855 	spc->spc_length = sizeof(struct sctp_paddr_change);
2856 	switch (sa->sa_family) {
2857 #ifdef INET
2858 	case AF_INET:
2859 #ifdef INET6
2860 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2861 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2862 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2863 		} else {
2864 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2865 		}
2866 #else
2867 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2868 #endif
2869 		break;
2870 #endif
2871 #ifdef INET6
2872 	case AF_INET6:
2873 		{
2874 			struct sockaddr_in6 *sin6;
2875 
2876 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2877 
2878 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2879 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2880 				if (sin6->sin6_scope_id == 0) {
2881 					/* recover scope_id for user */
2882 					(void)sa6_recoverscope(sin6);
2883 				} else {
2884 					/* clear embedded scope_id for user */
2885 					in6_clearscope(&sin6->sin6_addr);
2886 				}
2887 			}
2888 			break;
2889 		}
2890 #endif
2891 	default:
2892 		/* TSNH */
2893 		break;
2894 	}
2895 	spc->spc_state = state;
2896 	spc->spc_error = error;
2897 	spc->spc_assoc_id = sctp_get_associd(stcb);
2898 
2899 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2900 	SCTP_BUF_NEXT(m_notify) = NULL;
2901 
2902 	/* append to socket */
2903 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2904 	    0, 0, stcb->asoc.context, 0, 0, 0,
2905 	    m_notify);
2906 	if (control == NULL) {
2907 		/* no memory */
2908 		sctp_m_freem(m_notify);
2909 		return;
2910 	}
2911 	control->length = SCTP_BUF_LEN(m_notify);
2912 	control->spec_flags = M_NOTIFICATION;
2913 	/* not that we need this */
2914 	control->tail_mbuf = m_notify;
2915 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2916 	    control,
2917 	    &stcb->sctp_socket->so_rcv, 1,
2918 	    SCTP_READ_LOCK_NOT_HELD,
2919 	    so_locked);
2920 }
2921 
2922 
2923 static void
2924 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2925     struct sctp_tmit_chunk *chk, int so_locked
2926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2927     SCTP_UNUSED
2928 #endif
2929 )
2930 {
2931 	struct mbuf *m_notify;
2932 	struct sctp_send_failed *ssf;
2933 	struct sctp_send_failed_event *ssfe;
2934 	struct sctp_queued_to_read *control;
2935 	struct sctp_chunkhdr *chkhdr;
2936 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2937 
2938 	if ((stcb == NULL) ||
2939 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2940 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2941 		/* event not enabled */
2942 		return;
2943 	}
2944 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2945 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2946 	} else {
2947 		notifhdr_len = sizeof(struct sctp_send_failed);
2948 	}
2949 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2950 	if (m_notify == NULL)
2951 		/* no space left */
2952 		return;
2953 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2954 	if (stcb->asoc.idata_supported) {
2955 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2956 	} else {
2957 		chkhdr_len = sizeof(struct sctp_data_chunk);
2958 	}
2959 	/* Use some defaults in case we can't access the chunk header */
2960 	if (chk->send_size >= chkhdr_len) {
2961 		payload_len = chk->send_size - chkhdr_len;
2962 	} else {
2963 		payload_len = 0;
2964 	}
2965 	padding_len = 0;
2966 	if (chk->data != NULL) {
2967 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2968 		if (chkhdr != NULL) {
2969 			chk_len = ntohs(chkhdr->chunk_length);
2970 			if ((chk_len >= chkhdr_len) &&
2971 			    (chk->send_size >= chk_len) &&
2972 			    (chk->send_size - chk_len < 4)) {
2973 				padding_len = chk->send_size - chk_len;
2974 				payload_len = chk->send_size - chkhdr_len - padding_len;
2975 			}
2976 		}
2977 	}
2978 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2979 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2980 		memset(ssfe, 0, notifhdr_len);
2981 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2982 		if (sent) {
2983 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2984 		} else {
2985 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2986 		}
2987 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + payload_len);
2988 		ssfe->ssfe_error = error;
2989 		/* not exactly what the user sent in, but should be close :) */
2990 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2991 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2992 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2993 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2994 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2995 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2996 	} else {
2997 		ssf = mtod(m_notify, struct sctp_send_failed *);
2998 		memset(ssf, 0, notifhdr_len);
2999 		ssf->ssf_type = SCTP_SEND_FAILED;
3000 		if (sent) {
3001 			ssf->ssf_flags = SCTP_DATA_SENT;
3002 		} else {
3003 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3004 		}
3005 		ssf->ssf_length = (uint32_t) (notifhdr_len + payload_len);
3006 		ssf->ssf_error = error;
3007 		/* not exactly what the user sent in, but should be close :) */
3008 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3009 		ssf->ssf_info.sinfo_ssn = (uint16_t) chk->rec.data.stream_seq;
3010 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3011 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3012 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3013 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3014 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3015 	}
3016 	if (chk->data != NULL) {
3017 		/* Trim off the sctp chunk header (it should be there) */
3018 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3019 			m_adj(chk->data, chkhdr_len);
3020 			m_adj(chk->data, -padding_len);
3021 			sctp_mbuf_crush(chk->data);
3022 			chk->send_size -= (chkhdr_len + padding_len);
3023 		}
3024 	}
3025 	SCTP_BUF_NEXT(m_notify) = chk->data;
3026 	/* Steal off the mbuf */
3027 	chk->data = NULL;
3028 	/*
3029 	 * For this case, we check the actual socket buffer, since the assoc
3030 	 * is going away we don't want to overfill the socket buffer for a
3031 	 * non-reader
3032 	 */
3033 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3034 		sctp_m_freem(m_notify);
3035 		return;
3036 	}
3037 	/* append to socket */
3038 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3039 	    0, 0, stcb->asoc.context, 0, 0, 0,
3040 	    m_notify);
3041 	if (control == NULL) {
3042 		/* no memory */
3043 		sctp_m_freem(m_notify);
3044 		return;
3045 	}
3046 	control->spec_flags = M_NOTIFICATION;
3047 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3048 	    control,
3049 	    &stcb->sctp_socket->so_rcv, 1,
3050 	    SCTP_READ_LOCK_NOT_HELD,
3051 	    so_locked);
3052 }
3053 
3054 
3055 static void
3056 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3057     struct sctp_stream_queue_pending *sp, int so_locked
3058 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3059     SCTP_UNUSED
3060 #endif
3061 )
3062 {
3063 	struct mbuf *m_notify;
3064 	struct sctp_send_failed *ssf;
3065 	struct sctp_send_failed_event *ssfe;
3066 	struct sctp_queued_to_read *control;
3067 	int notifhdr_len;
3068 
3069 	if ((stcb == NULL) ||
3070 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3071 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3072 		/* event not enabled */
3073 		return;
3074 	}
3075 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3076 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3077 	} else {
3078 		notifhdr_len = sizeof(struct sctp_send_failed);
3079 	}
3080 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3081 	if (m_notify == NULL) {
3082 		/* no space left */
3083 		return;
3084 	}
3085 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3086 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3087 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3088 		memset(ssfe, 0, notifhdr_len);
3089 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3090 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3091 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + sp->length);
3092 		ssfe->ssfe_error = error;
3093 		/* not exactly what the user sent in, but should be close :) */
3094 		ssfe->ssfe_info.snd_sid = sp->stream;
3095 		if (sp->some_taken) {
3096 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3097 		} else {
3098 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3099 		}
3100 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3101 		ssfe->ssfe_info.snd_context = sp->context;
3102 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3103 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3104 	} else {
3105 		ssf = mtod(m_notify, struct sctp_send_failed *);
3106 		memset(ssf, 0, notifhdr_len);
3107 		ssf->ssf_type = SCTP_SEND_FAILED;
3108 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3109 		ssf->ssf_length = (uint32_t) (notifhdr_len + sp->length);
3110 		ssf->ssf_error = error;
3111 		/* not exactly what the user sent in, but should be close :) */
3112 		ssf->ssf_info.sinfo_stream = sp->stream;
3113 		ssf->ssf_info.sinfo_ssn = 0;
3114 		if (sp->some_taken) {
3115 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3116 		} else {
3117 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3118 		}
3119 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3120 		ssf->ssf_info.sinfo_context = sp->context;
3121 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3122 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3123 	}
3124 	SCTP_BUF_NEXT(m_notify) = sp->data;
3125 
3126 	/* Steal off the mbuf */
3127 	sp->data = NULL;
3128 	/*
3129 	 * For this case, we check the actual socket buffer, since the assoc
3130 	 * is going away we don't want to overfill the socket buffer for a
3131 	 * non-reader
3132 	 */
3133 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3134 		sctp_m_freem(m_notify);
3135 		return;
3136 	}
3137 	/* append to socket */
3138 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3139 	    0, 0, stcb->asoc.context, 0, 0, 0,
3140 	    m_notify);
3141 	if (control == NULL) {
3142 		/* no memory */
3143 		sctp_m_freem(m_notify);
3144 		return;
3145 	}
3146 	control->spec_flags = M_NOTIFICATION;
3147 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3148 	    control,
3149 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3150 }
3151 
3152 
3153 
3154 static void
3155 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3156 {
3157 	struct mbuf *m_notify;
3158 	struct sctp_adaptation_event *sai;
3159 	struct sctp_queued_to_read *control;
3160 
3161 	if ((stcb == NULL) ||
3162 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3163 		/* event not enabled */
3164 		return;
3165 	}
3166 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3167 	if (m_notify == NULL)
3168 		/* no space left */
3169 		return;
3170 	SCTP_BUF_LEN(m_notify) = 0;
3171 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3172 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3173 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3174 	sai->sai_flags = 0;
3175 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3176 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3177 	sai->sai_assoc_id = sctp_get_associd(stcb);
3178 
3179 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3180 	SCTP_BUF_NEXT(m_notify) = NULL;
3181 
3182 	/* append to socket */
3183 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3184 	    0, 0, stcb->asoc.context, 0, 0, 0,
3185 	    m_notify);
3186 	if (control == NULL) {
3187 		/* no memory */
3188 		sctp_m_freem(m_notify);
3189 		return;
3190 	}
3191 	control->length = SCTP_BUF_LEN(m_notify);
3192 	control->spec_flags = M_NOTIFICATION;
3193 	/* not that we need this */
3194 	control->tail_mbuf = m_notify;
3195 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3196 	    control,
3197 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3198 }
3199 
3200 /* This always must be called with the read-queue LOCKED in the INP */
3201 static void
3202 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3203     uint32_t val, int so_locked
3204 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3205     SCTP_UNUSED
3206 #endif
3207 )
3208 {
3209 	struct mbuf *m_notify;
3210 	struct sctp_pdapi_event *pdapi;
3211 	struct sctp_queued_to_read *control;
3212 	struct sockbuf *sb;
3213 
3214 	if ((stcb == NULL) ||
3215 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3216 		/* event not enabled */
3217 		return;
3218 	}
3219 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3220 		return;
3221 	}
3222 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3223 	if (m_notify == NULL)
3224 		/* no space left */
3225 		return;
3226 	SCTP_BUF_LEN(m_notify) = 0;
3227 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3228 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3229 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3230 	pdapi->pdapi_flags = 0;
3231 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3232 	pdapi->pdapi_indication = error;
3233 	pdapi->pdapi_stream = (val >> 16);
3234 	pdapi->pdapi_seq = (val & 0x0000ffff);
3235 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3236 
3237 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3238 	SCTP_BUF_NEXT(m_notify) = NULL;
3239 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3240 	    0, 0, stcb->asoc.context, 0, 0, 0,
3241 	    m_notify);
3242 	if (control == NULL) {
3243 		/* no memory */
3244 		sctp_m_freem(m_notify);
3245 		return;
3246 	}
3247 	control->spec_flags = M_NOTIFICATION;
3248 	control->length = SCTP_BUF_LEN(m_notify);
3249 	/* not that we need this */
3250 	control->tail_mbuf = m_notify;
3251 	control->held_length = 0;
3252 	control->length = 0;
3253 	sb = &stcb->sctp_socket->so_rcv;
3254 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3255 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3256 	}
3257 	sctp_sballoc(stcb, sb, m_notify);
3258 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3259 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3260 	}
3261 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3262 	control->end_added = 1;
3263 	if (stcb->asoc.control_pdapi)
3264 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3265 	else {
3266 		/* we really should not see this case */
3267 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3268 	}
3269 	if (stcb->sctp_ep && stcb->sctp_socket) {
3270 		/* This should always be the case */
3271 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3272 		struct socket *so;
3273 
3274 		so = SCTP_INP_SO(stcb->sctp_ep);
3275 		if (!so_locked) {
3276 			atomic_add_int(&stcb->asoc.refcnt, 1);
3277 			SCTP_TCB_UNLOCK(stcb);
3278 			SCTP_SOCKET_LOCK(so, 1);
3279 			SCTP_TCB_LOCK(stcb);
3280 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3281 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3282 				SCTP_SOCKET_UNLOCK(so, 1);
3283 				return;
3284 			}
3285 		}
3286 #endif
3287 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3288 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3289 		if (!so_locked) {
3290 			SCTP_SOCKET_UNLOCK(so, 1);
3291 		}
3292 #endif
3293 	}
3294 }
3295 
3296 static void
3297 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3298 {
3299 	struct mbuf *m_notify;
3300 	struct sctp_shutdown_event *sse;
3301 	struct sctp_queued_to_read *control;
3302 
3303 	/*
3304 	 * For TCP model AND UDP connected sockets we will send an error up
3305 	 * when an SHUTDOWN completes
3306 	 */
3307 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3308 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3309 		/* mark socket closed for read/write and wakeup! */
3310 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3311 		struct socket *so;
3312 
3313 		so = SCTP_INP_SO(stcb->sctp_ep);
3314 		atomic_add_int(&stcb->asoc.refcnt, 1);
3315 		SCTP_TCB_UNLOCK(stcb);
3316 		SCTP_SOCKET_LOCK(so, 1);
3317 		SCTP_TCB_LOCK(stcb);
3318 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3319 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3320 			SCTP_SOCKET_UNLOCK(so, 1);
3321 			return;
3322 		}
3323 #endif
3324 		socantsendmore(stcb->sctp_socket);
3325 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3326 		SCTP_SOCKET_UNLOCK(so, 1);
3327 #endif
3328 	}
3329 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3330 		/* event not enabled */
3331 		return;
3332 	}
3333 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3334 	if (m_notify == NULL)
3335 		/* no space left */
3336 		return;
3337 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3338 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3339 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3340 	sse->sse_flags = 0;
3341 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3342 	sse->sse_assoc_id = sctp_get_associd(stcb);
3343 
3344 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3345 	SCTP_BUF_NEXT(m_notify) = NULL;
3346 
3347 	/* append to socket */
3348 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3349 	    0, 0, stcb->asoc.context, 0, 0, 0,
3350 	    m_notify);
3351 	if (control == NULL) {
3352 		/* no memory */
3353 		sctp_m_freem(m_notify);
3354 		return;
3355 	}
3356 	control->spec_flags = M_NOTIFICATION;
3357 	control->length = SCTP_BUF_LEN(m_notify);
3358 	/* not that we need this */
3359 	control->tail_mbuf = m_notify;
3360 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3361 	    control,
3362 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3363 }
3364 
3365 static void
3366 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3367     int so_locked
3368 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3369     SCTP_UNUSED
3370 #endif
3371 )
3372 {
3373 	struct mbuf *m_notify;
3374 	struct sctp_sender_dry_event *event;
3375 	struct sctp_queued_to_read *control;
3376 
3377 	if ((stcb == NULL) ||
3378 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3379 		/* event not enabled */
3380 		return;
3381 	}
3382 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3383 	if (m_notify == NULL) {
3384 		/* no space left */
3385 		return;
3386 	}
3387 	SCTP_BUF_LEN(m_notify) = 0;
3388 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3389 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3390 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3391 	event->sender_dry_flags = 0;
3392 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3393 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3394 
3395 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3396 	SCTP_BUF_NEXT(m_notify) = NULL;
3397 
3398 	/* append to socket */
3399 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3400 	    0, 0, stcb->asoc.context, 0, 0, 0,
3401 	    m_notify);
3402 	if (control == NULL) {
3403 		/* no memory */
3404 		sctp_m_freem(m_notify);
3405 		return;
3406 	}
3407 	control->length = SCTP_BUF_LEN(m_notify);
3408 	control->spec_flags = M_NOTIFICATION;
3409 	/* not that we need this */
3410 	control->tail_mbuf = m_notify;
3411 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3412 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3413 }
3414 
3415 
3416 void
3417 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3418 {
3419 	struct mbuf *m_notify;
3420 	struct sctp_queued_to_read *control;
3421 	struct sctp_stream_change_event *stradd;
3422 
3423 	if ((stcb == NULL) ||
3424 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3425 		/* event not enabled */
3426 		return;
3427 	}
3428 	if ((stcb->asoc.peer_req_out) && flag) {
3429 		/* Peer made the request, don't tell the local user */
3430 		stcb->asoc.peer_req_out = 0;
3431 		return;
3432 	}
3433 	stcb->asoc.peer_req_out = 0;
3434 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3435 	if (m_notify == NULL)
3436 		/* no space left */
3437 		return;
3438 	SCTP_BUF_LEN(m_notify) = 0;
3439 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3440 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3441 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3442 	stradd->strchange_flags = flag;
3443 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3444 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3445 	stradd->strchange_instrms = numberin;
3446 	stradd->strchange_outstrms = numberout;
3447 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3448 	SCTP_BUF_NEXT(m_notify) = NULL;
3449 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3450 		/* no space */
3451 		sctp_m_freem(m_notify);
3452 		return;
3453 	}
3454 	/* append to socket */
3455 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3456 	    0, 0, stcb->asoc.context, 0, 0, 0,
3457 	    m_notify);
3458 	if (control == NULL) {
3459 		/* no memory */
3460 		sctp_m_freem(m_notify);
3461 		return;
3462 	}
3463 	control->spec_flags = M_NOTIFICATION;
3464 	control->length = SCTP_BUF_LEN(m_notify);
3465 	/* not that we need this */
3466 	control->tail_mbuf = m_notify;
3467 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3468 	    control,
3469 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3470 }
3471 
3472 void
3473 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3474 {
3475 	struct mbuf *m_notify;
3476 	struct sctp_queued_to_read *control;
3477 	struct sctp_assoc_reset_event *strasoc;
3478 
3479 	if ((stcb == NULL) ||
3480 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3481 		/* event not enabled */
3482 		return;
3483 	}
3484 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3485 	if (m_notify == NULL)
3486 		/* no space left */
3487 		return;
3488 	SCTP_BUF_LEN(m_notify) = 0;
3489 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3490 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3491 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3492 	strasoc->assocreset_flags = flag;
3493 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3494 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3495 	strasoc->assocreset_local_tsn = sending_tsn;
3496 	strasoc->assocreset_remote_tsn = recv_tsn;
3497 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3498 	SCTP_BUF_NEXT(m_notify) = NULL;
3499 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3500 		/* no space */
3501 		sctp_m_freem(m_notify);
3502 		return;
3503 	}
3504 	/* append to socket */
3505 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3506 	    0, 0, stcb->asoc.context, 0, 0, 0,
3507 	    m_notify);
3508 	if (control == NULL) {
3509 		/* no memory */
3510 		sctp_m_freem(m_notify);
3511 		return;
3512 	}
3513 	control->spec_flags = M_NOTIFICATION;
3514 	control->length = SCTP_BUF_LEN(m_notify);
3515 	/* not that we need this */
3516 	control->tail_mbuf = m_notify;
3517 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3518 	    control,
3519 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3520 }
3521 
3522 
3523 
3524 static void
3525 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3526     int number_entries, uint16_t * list, int flag)
3527 {
3528 	struct mbuf *m_notify;
3529 	struct sctp_queued_to_read *control;
3530 	struct sctp_stream_reset_event *strreset;
3531 	int len;
3532 
3533 	if ((stcb == NULL) ||
3534 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3535 		/* event not enabled */
3536 		return;
3537 	}
3538 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3539 	if (m_notify == NULL)
3540 		/* no space left */
3541 		return;
3542 	SCTP_BUF_LEN(m_notify) = 0;
3543 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3544 	if (len > M_TRAILINGSPACE(m_notify)) {
3545 		/* never enough room */
3546 		sctp_m_freem(m_notify);
3547 		return;
3548 	}
3549 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3550 	memset(strreset, 0, len);
3551 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3552 	strreset->strreset_flags = flag;
3553 	strreset->strreset_length = len;
3554 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3555 	if (number_entries) {
3556 		int i;
3557 
3558 		for (i = 0; i < number_entries; i++) {
3559 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3560 		}
3561 	}
3562 	SCTP_BUF_LEN(m_notify) = len;
3563 	SCTP_BUF_NEXT(m_notify) = NULL;
3564 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3565 		/* no space */
3566 		sctp_m_freem(m_notify);
3567 		return;
3568 	}
3569 	/* append to socket */
3570 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3571 	    0, 0, stcb->asoc.context, 0, 0, 0,
3572 	    m_notify);
3573 	if (control == NULL) {
3574 		/* no memory */
3575 		sctp_m_freem(m_notify);
3576 		return;
3577 	}
3578 	control->spec_flags = M_NOTIFICATION;
3579 	control->length = SCTP_BUF_LEN(m_notify);
3580 	/* not that we need this */
3581 	control->tail_mbuf = m_notify;
3582 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3583 	    control,
3584 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3585 }
3586 
3587 
3588 static void
3589 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3590 {
3591 	struct mbuf *m_notify;
3592 	struct sctp_remote_error *sre;
3593 	struct sctp_queued_to_read *control;
3594 	unsigned int notif_len;
3595 	uint16_t chunk_len;
3596 
3597 	if ((stcb == NULL) ||
3598 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3599 		return;
3600 	}
3601 	if (chunk != NULL) {
3602 		chunk_len = ntohs(chunk->ch.chunk_length);
3603 	} else {
3604 		chunk_len = 0;
3605 	}
3606 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3607 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3608 	if (m_notify == NULL) {
3609 		/* Retry with smaller value. */
3610 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3611 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3612 		if (m_notify == NULL) {
3613 			return;
3614 		}
3615 	}
3616 	SCTP_BUF_NEXT(m_notify) = NULL;
3617 	sre = mtod(m_notify, struct sctp_remote_error *);
3618 	memset(sre, 0, notif_len);
3619 	sre->sre_type = SCTP_REMOTE_ERROR;
3620 	sre->sre_flags = 0;
3621 	sre->sre_length = sizeof(struct sctp_remote_error);
3622 	sre->sre_error = error;
3623 	sre->sre_assoc_id = sctp_get_associd(stcb);
3624 	if (notif_len > sizeof(struct sctp_remote_error)) {
3625 		memcpy(sre->sre_data, chunk, chunk_len);
3626 		sre->sre_length += chunk_len;
3627 	}
3628 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3629 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3630 	    0, 0, stcb->asoc.context, 0, 0, 0,
3631 	    m_notify);
3632 	if (control != NULL) {
3633 		control->length = SCTP_BUF_LEN(m_notify);
3634 		/* not that we need this */
3635 		control->tail_mbuf = m_notify;
3636 		control->spec_flags = M_NOTIFICATION;
3637 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3638 		    control,
3639 		    &stcb->sctp_socket->so_rcv, 1,
3640 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3641 	} else {
3642 		sctp_m_freem(m_notify);
3643 	}
3644 }
3645 
3646 
3647 void
3648 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3649     uint32_t error, void *data, int so_locked
3650 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3651     SCTP_UNUSED
3652 #endif
3653 )
3654 {
3655 	if ((stcb == NULL) ||
3656 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3657 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3658 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3659 		/* If the socket is gone we are out of here */
3660 		return;
3661 	}
3662 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3663 		return;
3664 	}
3665 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3666 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3667 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3668 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3669 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3670 			/* Don't report these in front states */
3671 			return;
3672 		}
3673 	}
3674 	switch (notification) {
3675 	case SCTP_NOTIFY_ASSOC_UP:
3676 		if (stcb->asoc.assoc_up_sent == 0) {
3677 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3678 			stcb->asoc.assoc_up_sent = 1;
3679 		}
3680 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3681 			sctp_notify_adaptation_layer(stcb);
3682 		}
3683 		if (stcb->asoc.auth_supported == 0) {
3684 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3685 			    NULL, so_locked);
3686 		}
3687 		break;
3688 	case SCTP_NOTIFY_ASSOC_DOWN:
3689 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3690 		break;
3691 	case SCTP_NOTIFY_INTERFACE_DOWN:
3692 		{
3693 			struct sctp_nets *net;
3694 
3695 			net = (struct sctp_nets *)data;
3696 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3697 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3698 			break;
3699 		}
3700 	case SCTP_NOTIFY_INTERFACE_UP:
3701 		{
3702 			struct sctp_nets *net;
3703 
3704 			net = (struct sctp_nets *)data;
3705 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3706 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3707 			break;
3708 		}
3709 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3710 		{
3711 			struct sctp_nets *net;
3712 
3713 			net = (struct sctp_nets *)data;
3714 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3715 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3716 			break;
3717 		}
3718 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3719 		sctp_notify_send_failed2(stcb, error,
3720 		    (struct sctp_stream_queue_pending *)data, so_locked);
3721 		break;
3722 	case SCTP_NOTIFY_SENT_DG_FAIL:
3723 		sctp_notify_send_failed(stcb, 1, error,
3724 		    (struct sctp_tmit_chunk *)data, so_locked);
3725 		break;
3726 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3727 		sctp_notify_send_failed(stcb, 0, error,
3728 		    (struct sctp_tmit_chunk *)data, so_locked);
3729 		break;
3730 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3731 		{
3732 			uint32_t val;
3733 
3734 			val = *((uint32_t *) data);
3735 
3736 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3737 			break;
3738 		}
3739 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3740 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3741 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3742 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3743 		} else {
3744 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3745 		}
3746 		break;
3747 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3748 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3749 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3750 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3751 		} else {
3752 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3753 		}
3754 		break;
3755 	case SCTP_NOTIFY_ASSOC_RESTART:
3756 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3757 		if (stcb->asoc.auth_supported == 0) {
3758 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3759 			    NULL, so_locked);
3760 		}
3761 		break;
3762 	case SCTP_NOTIFY_STR_RESET_SEND:
3763 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3764 		break;
3765 	case SCTP_NOTIFY_STR_RESET_RECV:
3766 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3767 		break;
3768 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3769 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3770 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3771 		break;
3772 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3773 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3774 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3775 		break;
3776 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3777 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3778 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3779 		break;
3780 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3781 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3782 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3783 		break;
3784 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3785 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3786 		    error, so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3789 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3790 		    error, so_locked);
3791 		break;
3792 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3793 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3794 		    error, so_locked);
3795 		break;
3796 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3797 		sctp_notify_shutdown_event(stcb);
3798 		break;
3799 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3800 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3801 		    (uint16_t) (uintptr_t) data,
3802 		    so_locked);
3803 		break;
3804 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3805 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3806 		    (uint16_t) (uintptr_t) data,
3807 		    so_locked);
3808 		break;
3809 	case SCTP_NOTIFY_NO_PEER_AUTH:
3810 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3811 		    (uint16_t) (uintptr_t) data,
3812 		    so_locked);
3813 		break;
3814 	case SCTP_NOTIFY_SENDER_DRY:
3815 		sctp_notify_sender_dry_event(stcb, so_locked);
3816 		break;
3817 	case SCTP_NOTIFY_REMOTE_ERROR:
3818 		sctp_notify_remote_error(stcb, error, data);
3819 		break;
3820 	default:
3821 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3822 		    __func__, notification, notification);
3823 		break;
3824 	}			/* end switch */
3825 }
3826 
3827 void
3828 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3829 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3830     SCTP_UNUSED
3831 #endif
3832 )
3833 {
3834 	struct sctp_association *asoc;
3835 	struct sctp_stream_out *outs;
3836 	struct sctp_tmit_chunk *chk, *nchk;
3837 	struct sctp_stream_queue_pending *sp, *nsp;
3838 	int i;
3839 
3840 	if (stcb == NULL) {
3841 		return;
3842 	}
3843 	asoc = &stcb->asoc;
3844 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3845 		/* already being freed */
3846 		return;
3847 	}
3848 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3849 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3850 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3851 		return;
3852 	}
3853 	/* now through all the gunk freeing chunks */
3854 	if (holds_lock == 0) {
3855 		SCTP_TCB_SEND_LOCK(stcb);
3856 	}
3857 	/* sent queue SHOULD be empty */
3858 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3859 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3860 		asoc->sent_queue_cnt--;
3861 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3862 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3863 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3864 #ifdef INVARIANTS
3865 			} else {
3866 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3867 #endif
3868 			}
3869 		}
3870 		if (chk->data != NULL) {
3871 			sctp_free_bufspace(stcb, asoc, chk, 1);
3872 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3873 			    error, chk, so_locked);
3874 			if (chk->data) {
3875 				sctp_m_freem(chk->data);
3876 				chk->data = NULL;
3877 			}
3878 		}
3879 		sctp_free_a_chunk(stcb, chk, so_locked);
3880 		/* sa_ignore FREED_MEMORY */
3881 	}
3882 	/* pending send queue SHOULD be empty */
3883 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3884 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3885 		asoc->send_queue_cnt--;
3886 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3887 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3888 #ifdef INVARIANTS
3889 		} else {
3890 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3891 #endif
3892 		}
3893 		if (chk->data != NULL) {
3894 			sctp_free_bufspace(stcb, asoc, chk, 1);
3895 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3896 			    error, chk, so_locked);
3897 			if (chk->data) {
3898 				sctp_m_freem(chk->data);
3899 				chk->data = NULL;
3900 			}
3901 		}
3902 		sctp_free_a_chunk(stcb, chk, so_locked);
3903 		/* sa_ignore FREED_MEMORY */
3904 	}
3905 	for (i = 0; i < asoc->streamoutcnt; i++) {
3906 		/* For each stream */
3907 		outs = &asoc->strmout[i];
3908 		/* clean up any sends there */
3909 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3910 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3911 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3912 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3913 			sctp_free_spbufspace(stcb, asoc, sp);
3914 			if (sp->data) {
3915 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3916 				    error, (void *)sp, so_locked);
3917 				if (sp->data) {
3918 					sctp_m_freem(sp->data);
3919 					sp->data = NULL;
3920 					sp->tail_mbuf = NULL;
3921 					sp->length = 0;
3922 				}
3923 			}
3924 			if (sp->net) {
3925 				sctp_free_remote_addr(sp->net);
3926 				sp->net = NULL;
3927 			}
3928 			/* Free the chunk */
3929 			sctp_free_a_strmoq(stcb, sp, so_locked);
3930 			/* sa_ignore FREED_MEMORY */
3931 		}
3932 	}
3933 
3934 	if (holds_lock == 0) {
3935 		SCTP_TCB_SEND_UNLOCK(stcb);
3936 	}
3937 }
3938 
3939 void
3940 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3941     struct sctp_abort_chunk *abort, int so_locked
3942 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3943     SCTP_UNUSED
3944 #endif
3945 )
3946 {
3947 	if (stcb == NULL) {
3948 		return;
3949 	}
3950 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3951 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3952 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3953 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3954 	}
3955 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3956 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3957 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3958 		return;
3959 	}
3960 	/* Tell them we lost the asoc */
3961 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3962 	if (from_peer) {
3963 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3964 	} else {
3965 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3966 	}
3967 }
3968 
3969 void
3970 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3971     struct mbuf *m, int iphlen,
3972     struct sockaddr *src, struct sockaddr *dst,
3973     struct sctphdr *sh, struct mbuf *op_err,
3974     uint8_t mflowtype, uint32_t mflowid,
3975     uint32_t vrf_id, uint16_t port)
3976 {
3977 	uint32_t vtag;
3978 
3979 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3980 	struct socket *so;
3981 
3982 #endif
3983 
3984 	vtag = 0;
3985 	if (stcb != NULL) {
3986 		vtag = stcb->asoc.peer_vtag;
3987 		vrf_id = stcb->asoc.vrf_id;
3988 	}
3989 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3990 	    mflowtype, mflowid, inp->fibnum,
3991 	    vrf_id, port);
3992 	if (stcb != NULL) {
3993 		/* We have a TCB to abort, send notification too */
3994 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3995 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3996 		/* Ok, now lets free it */
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998 		so = SCTP_INP_SO(inp);
3999 		atomic_add_int(&stcb->asoc.refcnt, 1);
4000 		SCTP_TCB_UNLOCK(stcb);
4001 		SCTP_SOCKET_LOCK(so, 1);
4002 		SCTP_TCB_LOCK(stcb);
4003 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4004 #endif
4005 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4006 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4007 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4008 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4009 		}
4010 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4011 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4012 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4013 		SCTP_SOCKET_UNLOCK(so, 1);
4014 #endif
4015 	}
4016 }
4017 
4018 #ifdef SCTP_ASOCLOG_OF_TSNS
4019 void
4020 sctp_print_out_track_log(struct sctp_tcb *stcb)
4021 {
4022 #ifdef NOSIY_PRINTS
4023 	int i;
4024 
4025 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4026 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4027 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4028 		SCTP_PRINTF("None rcvd\n");
4029 		goto none_in;
4030 	}
4031 	if (stcb->asoc.tsn_in_wrapped) {
4032 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4033 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4034 			    stcb->asoc.in_tsnlog[i].tsn,
4035 			    stcb->asoc.in_tsnlog[i].strm,
4036 			    stcb->asoc.in_tsnlog[i].seq,
4037 			    stcb->asoc.in_tsnlog[i].flgs,
4038 			    stcb->asoc.in_tsnlog[i].sz);
4039 		}
4040 	}
4041 	if (stcb->asoc.tsn_in_at) {
4042 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4043 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4044 			    stcb->asoc.in_tsnlog[i].tsn,
4045 			    stcb->asoc.in_tsnlog[i].strm,
4046 			    stcb->asoc.in_tsnlog[i].seq,
4047 			    stcb->asoc.in_tsnlog[i].flgs,
4048 			    stcb->asoc.in_tsnlog[i].sz);
4049 		}
4050 	}
4051 none_in:
4052 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4053 	if ((stcb->asoc.tsn_out_at == 0) &&
4054 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4055 		SCTP_PRINTF("None sent\n");
4056 	}
4057 	if (stcb->asoc.tsn_out_wrapped) {
4058 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4059 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4060 			    stcb->asoc.out_tsnlog[i].tsn,
4061 			    stcb->asoc.out_tsnlog[i].strm,
4062 			    stcb->asoc.out_tsnlog[i].seq,
4063 			    stcb->asoc.out_tsnlog[i].flgs,
4064 			    stcb->asoc.out_tsnlog[i].sz);
4065 		}
4066 	}
4067 	if (stcb->asoc.tsn_out_at) {
4068 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4069 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4070 			    stcb->asoc.out_tsnlog[i].tsn,
4071 			    stcb->asoc.out_tsnlog[i].strm,
4072 			    stcb->asoc.out_tsnlog[i].seq,
4073 			    stcb->asoc.out_tsnlog[i].flgs,
4074 			    stcb->asoc.out_tsnlog[i].sz);
4075 		}
4076 	}
4077 #endif
4078 }
4079 
4080 #endif
4081 
4082 void
4083 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4084     struct mbuf *op_err,
4085     int so_locked
4086 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4087     SCTP_UNUSED
4088 #endif
4089 )
4090 {
4091 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4092 	struct socket *so;
4093 
4094 #endif
4095 
4096 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4097 	so = SCTP_INP_SO(inp);
4098 #endif
4099 	if (stcb == NULL) {
4100 		/* Got to have a TCB */
4101 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4102 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4103 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4104 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4105 			}
4106 		}
4107 		return;
4108 	} else {
4109 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4110 	}
4111 	/* notify the peer */
4112 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4113 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4114 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4115 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4116 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4117 	}
4118 	/* notify the ulp */
4119 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4120 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4121 	}
4122 	/* now free the asoc */
4123 #ifdef SCTP_ASOCLOG_OF_TSNS
4124 	sctp_print_out_track_log(stcb);
4125 #endif
4126 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4127 	if (!so_locked) {
4128 		atomic_add_int(&stcb->asoc.refcnt, 1);
4129 		SCTP_TCB_UNLOCK(stcb);
4130 		SCTP_SOCKET_LOCK(so, 1);
4131 		SCTP_TCB_LOCK(stcb);
4132 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4133 	}
4134 #endif
4135 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4136 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4137 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4138 	if (!so_locked) {
4139 		SCTP_SOCKET_UNLOCK(so, 1);
4140 	}
4141 #endif
4142 }
4143 
4144 void
4145 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4146     struct sockaddr *src, struct sockaddr *dst,
4147     struct sctphdr *sh, struct sctp_inpcb *inp,
4148     struct mbuf *cause,
4149     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4150     uint32_t vrf_id, uint16_t port)
4151 {
4152 	struct sctp_chunkhdr *ch, chunk_buf;
4153 	unsigned int chk_length;
4154 	int contains_init_chunk;
4155 
4156 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4157 	/* Generate a TO address for future reference */
4158 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4159 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4160 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4161 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4162 		}
4163 	}
4164 	contains_init_chunk = 0;
4165 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4166 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4167 	while (ch != NULL) {
4168 		chk_length = ntohs(ch->chunk_length);
4169 		if (chk_length < sizeof(*ch)) {
4170 			/* break to abort land */
4171 			break;
4172 		}
4173 		switch (ch->chunk_type) {
4174 		case SCTP_INIT:
4175 			contains_init_chunk = 1;
4176 			break;
4177 		case SCTP_PACKET_DROPPED:
4178 			/* we don't respond to pkt-dropped */
4179 			return;
4180 		case SCTP_ABORT_ASSOCIATION:
4181 			/* we don't respond with an ABORT to an ABORT */
4182 			return;
4183 		case SCTP_SHUTDOWN_COMPLETE:
4184 			/*
4185 			 * we ignore it since we are not waiting for it and
4186 			 * peer is gone
4187 			 */
4188 			return;
4189 		case SCTP_SHUTDOWN_ACK:
4190 			sctp_send_shutdown_complete2(src, dst, sh,
4191 			    mflowtype, mflowid, fibnum,
4192 			    vrf_id, port);
4193 			return;
4194 		default:
4195 			break;
4196 		}
4197 		offset += SCTP_SIZE32(chk_length);
4198 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4199 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4200 	}
4201 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4202 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4203 	    (contains_init_chunk == 0))) {
4204 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4205 		    mflowtype, mflowid, fibnum,
4206 		    vrf_id, port);
4207 	}
4208 }
4209 
4210 /*
4211  * check the inbound datagram to make sure there is not an abort inside it,
4212  * if there is return 1, else return 0.
4213  */
4214 int
4215 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4216 {
4217 	struct sctp_chunkhdr *ch;
4218 	struct sctp_init_chunk *init_chk, chunk_buf;
4219 	int offset;
4220 	unsigned int chk_length;
4221 
4222 	offset = iphlen + sizeof(struct sctphdr);
4223 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4224 	    (uint8_t *) & chunk_buf);
4225 	while (ch != NULL) {
4226 		chk_length = ntohs(ch->chunk_length);
4227 		if (chk_length < sizeof(*ch)) {
4228 			/* packet is probably corrupt */
4229 			break;
4230 		}
4231 		/* we seem to be ok, is it an abort? */
4232 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4233 			/* yep, tell them */
4234 			return (1);
4235 		}
4236 		if (ch->chunk_type == SCTP_INITIATION) {
4237 			/* need to update the Vtag */
4238 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4239 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4240 			if (init_chk != NULL) {
4241 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4242 			}
4243 		}
4244 		/* Nope, move to the next chunk */
4245 		offset += SCTP_SIZE32(chk_length);
4246 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4247 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4248 	}
4249 	return (0);
4250 }
4251 
4252 /*
4253  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4254  * set (i.e. it's 0) so, create this function to compare link local scopes
4255  */
4256 #ifdef INET6
4257 uint32_t
4258 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4259 {
4260 	struct sockaddr_in6 a, b;
4261 
4262 	/* save copies */
4263 	a = *addr1;
4264 	b = *addr2;
4265 
4266 	if (a.sin6_scope_id == 0)
4267 		if (sa6_recoverscope(&a)) {
4268 			/* can't get scope, so can't match */
4269 			return (0);
4270 		}
4271 	if (b.sin6_scope_id == 0)
4272 		if (sa6_recoverscope(&b)) {
4273 			/* can't get scope, so can't match */
4274 			return (0);
4275 		}
4276 	if (a.sin6_scope_id != b.sin6_scope_id)
4277 		return (0);
4278 
4279 	return (1);
4280 }
4281 
4282 /*
4283  * returns a sockaddr_in6 with embedded scope recovered and removed
4284  */
4285 struct sockaddr_in6 *
4286 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4287 {
4288 	/* check and strip embedded scope junk */
4289 	if (addr->sin6_family == AF_INET6) {
4290 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4291 			if (addr->sin6_scope_id == 0) {
4292 				*store = *addr;
4293 				if (!sa6_recoverscope(store)) {
4294 					/* use the recovered scope */
4295 					addr = store;
4296 				}
4297 			} else {
4298 				/* else, return the original "to" addr */
4299 				in6_clearscope(&addr->sin6_addr);
4300 			}
4301 		}
4302 	}
4303 	return (addr);
4304 }
4305 
4306 #endif
4307 
4308 /*
4309  * are the two addresses the same?  currently a "scopeless" check returns: 1
4310  * if same, 0 if not
4311  */
4312 int
4313 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4314 {
4315 
4316 	/* must be valid */
4317 	if (sa1 == NULL || sa2 == NULL)
4318 		return (0);
4319 
4320 	/* must be the same family */
4321 	if (sa1->sa_family != sa2->sa_family)
4322 		return (0);
4323 
4324 	switch (sa1->sa_family) {
4325 #ifdef INET6
4326 	case AF_INET6:
4327 		{
4328 			/* IPv6 addresses */
4329 			struct sockaddr_in6 *sin6_1, *sin6_2;
4330 
4331 			sin6_1 = (struct sockaddr_in6 *)sa1;
4332 			sin6_2 = (struct sockaddr_in6 *)sa2;
4333 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4334 			    sin6_2));
4335 		}
4336 #endif
4337 #ifdef INET
4338 	case AF_INET:
4339 		{
4340 			/* IPv4 addresses */
4341 			struct sockaddr_in *sin_1, *sin_2;
4342 
4343 			sin_1 = (struct sockaddr_in *)sa1;
4344 			sin_2 = (struct sockaddr_in *)sa2;
4345 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4346 		}
4347 #endif
4348 	default:
4349 		/* we don't do these... */
4350 		return (0);
4351 	}
4352 }
4353 
4354 void
4355 sctp_print_address(struct sockaddr *sa)
4356 {
4357 #ifdef INET6
4358 	char ip6buf[INET6_ADDRSTRLEN];
4359 
4360 #endif
4361 
4362 	switch (sa->sa_family) {
4363 #ifdef INET6
4364 	case AF_INET6:
4365 		{
4366 			struct sockaddr_in6 *sin6;
4367 
4368 			sin6 = (struct sockaddr_in6 *)sa;
4369 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4370 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4371 			    ntohs(sin6->sin6_port),
4372 			    sin6->sin6_scope_id);
4373 			break;
4374 		}
4375 #endif
4376 #ifdef INET
4377 	case AF_INET:
4378 		{
4379 			struct sockaddr_in *sin;
4380 			unsigned char *p;
4381 
4382 			sin = (struct sockaddr_in *)sa;
4383 			p = (unsigned char *)&sin->sin_addr;
4384 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4385 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4386 			break;
4387 		}
4388 #endif
4389 	default:
4390 		SCTP_PRINTF("?\n");
4391 		break;
4392 	}
4393 }
4394 
4395 void
4396 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4397     struct sctp_inpcb *new_inp,
4398     struct sctp_tcb *stcb,
4399     int waitflags)
4400 {
4401 	/*
4402 	 * go through our old INP and pull off any control structures that
4403 	 * belong to stcb and move then to the new inp.
4404 	 */
4405 	struct socket *old_so, *new_so;
4406 	struct sctp_queued_to_read *control, *nctl;
4407 	struct sctp_readhead tmp_queue;
4408 	struct mbuf *m;
4409 	int error = 0;
4410 
4411 	old_so = old_inp->sctp_socket;
4412 	new_so = new_inp->sctp_socket;
4413 	TAILQ_INIT(&tmp_queue);
4414 	error = sblock(&old_so->so_rcv, waitflags);
4415 	if (error) {
4416 		/*
4417 		 * Gak, can't get sblock, we have a problem. data will be
4418 		 * left stranded.. and we don't dare look at it since the
4419 		 * other thread may be reading something. Oh well, its a
4420 		 * screwed up app that does a peeloff OR a accept while
4421 		 * reading from the main socket... actually its only the
4422 		 * peeloff() case, since I think read will fail on a
4423 		 * listening socket..
4424 		 */
4425 		return;
4426 	}
4427 	/* lock the socket buffers */
4428 	SCTP_INP_READ_LOCK(old_inp);
4429 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4430 		/* Pull off all for out target stcb */
4431 		if (control->stcb == stcb) {
4432 			/* remove it we want it */
4433 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4434 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4435 			m = control->data;
4436 			while (m) {
4437 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4438 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4439 				}
4440 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4441 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4442 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4443 				}
4444 				m = SCTP_BUF_NEXT(m);
4445 			}
4446 		}
4447 	}
4448 	SCTP_INP_READ_UNLOCK(old_inp);
4449 	/* Remove the sb-lock on the old socket */
4450 
4451 	sbunlock(&old_so->so_rcv);
4452 	/* Now we move them over to the new socket buffer */
4453 	SCTP_INP_READ_LOCK(new_inp);
4454 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4455 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4456 		m = control->data;
4457 		while (m) {
4458 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4459 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4460 			}
4461 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4462 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4463 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4464 			}
4465 			m = SCTP_BUF_NEXT(m);
4466 		}
4467 	}
4468 	SCTP_INP_READ_UNLOCK(new_inp);
4469 }
4470 
4471 void
4472 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4473     struct sctp_tcb *stcb,
4474     int so_locked
4475 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4476     SCTP_UNUSED
4477 #endif
4478 )
4479 {
4480 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4481 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4482 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4483 		} else {
4484 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4485 			struct socket *so;
4486 
4487 			so = SCTP_INP_SO(inp);
4488 			if (!so_locked) {
4489 				if (stcb) {
4490 					atomic_add_int(&stcb->asoc.refcnt, 1);
4491 					SCTP_TCB_UNLOCK(stcb);
4492 				}
4493 				SCTP_SOCKET_LOCK(so, 1);
4494 				if (stcb) {
4495 					SCTP_TCB_LOCK(stcb);
4496 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4497 				}
4498 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4499 					SCTP_SOCKET_UNLOCK(so, 1);
4500 					return;
4501 				}
4502 			}
4503 #endif
4504 			sctp_sorwakeup(inp, inp->sctp_socket);
4505 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4506 			if (!so_locked) {
4507 				SCTP_SOCKET_UNLOCK(so, 1);
4508 			}
4509 #endif
4510 		}
4511 	}
4512 }
4513 
4514 void
4515 sctp_add_to_readq(struct sctp_inpcb *inp,
4516     struct sctp_tcb *stcb,
4517     struct sctp_queued_to_read *control,
4518     struct sockbuf *sb,
4519     int end,
4520     int inp_read_lock_held,
4521     int so_locked
4522 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4523     SCTP_UNUSED
4524 #endif
4525 )
4526 {
4527 	/*
4528 	 * Here we must place the control on the end of the socket read
4529 	 * queue AND increment sb_cc so that select will work properly on
4530 	 * read.
4531 	 */
4532 	struct mbuf *m, *prev = NULL;
4533 
4534 	if (inp == NULL) {
4535 		/* Gak, TSNH!! */
4536 #ifdef INVARIANTS
4537 		panic("Gak, inp NULL on add_to_readq");
4538 #endif
4539 		return;
4540 	}
4541 	if (inp_read_lock_held == 0)
4542 		SCTP_INP_READ_LOCK(inp);
4543 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4544 		sctp_free_remote_addr(control->whoFrom);
4545 		if (control->data) {
4546 			sctp_m_freem(control->data);
4547 			control->data = NULL;
4548 		}
4549 		sctp_free_a_readq(stcb, control);
4550 		if (inp_read_lock_held == 0)
4551 			SCTP_INP_READ_UNLOCK(inp);
4552 		return;
4553 	}
4554 	if (!(control->spec_flags & M_NOTIFICATION)) {
4555 		atomic_add_int(&inp->total_recvs, 1);
4556 		if (!control->do_not_ref_stcb) {
4557 			atomic_add_int(&stcb->total_recvs, 1);
4558 		}
4559 	}
4560 	m = control->data;
4561 	control->held_length = 0;
4562 	control->length = 0;
4563 	while (m) {
4564 		if (SCTP_BUF_LEN(m) == 0) {
4565 			/* Skip mbufs with NO length */
4566 			if (prev == NULL) {
4567 				/* First one */
4568 				control->data = sctp_m_free(m);
4569 				m = control->data;
4570 			} else {
4571 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4572 				m = SCTP_BUF_NEXT(prev);
4573 			}
4574 			if (m == NULL) {
4575 				control->tail_mbuf = prev;
4576 			}
4577 			continue;
4578 		}
4579 		prev = m;
4580 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4581 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4582 		}
4583 		sctp_sballoc(stcb, sb, m);
4584 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4585 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4586 		}
4587 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4588 		m = SCTP_BUF_NEXT(m);
4589 	}
4590 	if (prev != NULL) {
4591 		control->tail_mbuf = prev;
4592 	} else {
4593 		/* Everything got collapsed out?? */
4594 		sctp_free_remote_addr(control->whoFrom);
4595 		sctp_free_a_readq(stcb, control);
4596 		if (inp_read_lock_held == 0)
4597 			SCTP_INP_READ_UNLOCK(inp);
4598 		return;
4599 	}
4600 	if (end) {
4601 		control->end_added = 1;
4602 	}
4603 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4604 	control->on_read_q = 1;
4605 	if (inp_read_lock_held == 0)
4606 		SCTP_INP_READ_UNLOCK(inp);
4607 	if (inp && inp->sctp_socket) {
4608 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4609 	}
4610 }
4611 
4612 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4613  *************ALTERNATE ROUTING CODE
4614  */
4615 
4616 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4617  *************ALTERNATE ROUTING CODE
4618  */
4619 
4620 struct mbuf *
4621 sctp_generate_cause(uint16_t code, char *info)
4622 {
4623 	struct mbuf *m;
4624 	struct sctp_gen_error_cause *cause;
4625 	size_t info_len;
4626 	uint16_t len;
4627 
4628 	if ((code == 0) || (info == NULL)) {
4629 		return (NULL);
4630 	}
4631 	info_len = strlen(info);
4632 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4633 		return (NULL);
4634 	}
4635 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4636 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4637 	if (m != NULL) {
4638 		SCTP_BUF_LEN(m) = len;
4639 		cause = mtod(m, struct sctp_gen_error_cause *);
4640 		cause->code = htons(code);
4641 		cause->length = htons(len);
4642 		memcpy(cause->info, info, info_len);
4643 	}
4644 	return (m);
4645 }
4646 
4647 struct mbuf *
4648 sctp_generate_no_user_data_cause(uint32_t tsn)
4649 {
4650 	struct mbuf *m;
4651 	struct sctp_error_no_user_data *no_user_data_cause;
4652 	uint16_t len;
4653 
4654 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4655 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4656 	if (m != NULL) {
4657 		SCTP_BUF_LEN(m) = len;
4658 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4659 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4660 		no_user_data_cause->cause.length = htons(len);
4661 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4662 	}
4663 	return (m);
4664 }
4665 
4666 #ifdef SCTP_MBCNT_LOGGING
4667 void
4668 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4669     struct sctp_tmit_chunk *tp1, int chk_cnt)
4670 {
4671 	if (tp1->data == NULL) {
4672 		return;
4673 	}
4674 	asoc->chunks_on_out_queue -= chk_cnt;
4675 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4676 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4677 		    asoc->total_output_queue_size,
4678 		    tp1->book_size,
4679 		    0,
4680 		    tp1->mbcnt);
4681 	}
4682 	if (asoc->total_output_queue_size >= tp1->book_size) {
4683 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4684 	} else {
4685 		asoc->total_output_queue_size = 0;
4686 	}
4687 
4688 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4689 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4690 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4691 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4692 		} else {
4693 			stcb->sctp_socket->so_snd.sb_cc = 0;
4694 
4695 		}
4696 	}
4697 }
4698 
4699 #endif
4700 
4701 int
4702 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4703     uint8_t sent, int so_locked
4704 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4705     SCTP_UNUSED
4706 #endif
4707 )
4708 {
4709 	struct sctp_stream_out *strq;
4710 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4711 	struct sctp_stream_queue_pending *sp;
4712 	uint16_t stream = 0, seq = 0;
4713 	uint8_t foundeom = 0;
4714 	int ret_sz = 0;
4715 	int notdone;
4716 	int do_wakeup_routine = 0;
4717 
4718 	stream = tp1->rec.data.stream_number;
4719 	seq = tp1->rec.data.stream_seq;
4720 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4721 		stcb->asoc.abandoned_sent[0]++;
4722 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4723 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4724 #if defined(SCTP_DETAILED_STR_STATS)
4725 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4726 #endif
4727 	} else {
4728 		stcb->asoc.abandoned_unsent[0]++;
4729 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4730 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4731 #if defined(SCTP_DETAILED_STR_STATS)
4732 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4733 #endif
4734 	}
4735 	do {
4736 		ret_sz += tp1->book_size;
4737 		if (tp1->data != NULL) {
4738 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4739 				sctp_flight_size_decrease(tp1);
4740 				sctp_total_flight_decrease(stcb, tp1);
4741 			}
4742 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4743 			stcb->asoc.peers_rwnd += tp1->send_size;
4744 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4745 			if (sent) {
4746 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4747 			} else {
4748 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4749 			}
4750 			if (tp1->data) {
4751 				sctp_m_freem(tp1->data);
4752 				tp1->data = NULL;
4753 			}
4754 			do_wakeup_routine = 1;
4755 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4756 				stcb->asoc.sent_queue_cnt_removeable--;
4757 			}
4758 		}
4759 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4760 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4761 		    SCTP_DATA_NOT_FRAG) {
4762 			/* not frag'ed we ae done   */
4763 			notdone = 0;
4764 			foundeom = 1;
4765 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4766 			/* end of frag, we are done */
4767 			notdone = 0;
4768 			foundeom = 1;
4769 		} else {
4770 			/*
4771 			 * Its a begin or middle piece, we must mark all of
4772 			 * it
4773 			 */
4774 			notdone = 1;
4775 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4776 		}
4777 	} while (tp1 && notdone);
4778 	if (foundeom == 0) {
4779 		/*
4780 		 * The multi-part message was scattered across the send and
4781 		 * sent queue.
4782 		 */
4783 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4784 			if ((tp1->rec.data.stream_number != stream) ||
4785 			    (tp1->rec.data.stream_seq != seq)) {
4786 				break;
4787 			}
4788 			/*
4789 			 * save to chk in case we have some on stream out
4790 			 * queue. If so and we have an un-transmitted one we
4791 			 * don't have to fudge the TSN.
4792 			 */
4793 			chk = tp1;
4794 			ret_sz += tp1->book_size;
4795 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4796 			if (sent) {
4797 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4798 			} else {
4799 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4800 			}
4801 			if (tp1->data) {
4802 				sctp_m_freem(tp1->data);
4803 				tp1->data = NULL;
4804 			}
4805 			/* No flight involved here book the size to 0 */
4806 			tp1->book_size = 0;
4807 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4808 				foundeom = 1;
4809 			}
4810 			do_wakeup_routine = 1;
4811 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4812 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4813 			/*
4814 			 * on to the sent queue so we can wait for it to be
4815 			 * passed by.
4816 			 */
4817 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4818 			    sctp_next);
4819 			stcb->asoc.send_queue_cnt--;
4820 			stcb->asoc.sent_queue_cnt++;
4821 		}
4822 	}
4823 	if (foundeom == 0) {
4824 		/*
4825 		 * Still no eom found. That means there is stuff left on the
4826 		 * stream out queue.. yuck.
4827 		 */
4828 		SCTP_TCB_SEND_LOCK(stcb);
4829 		strq = &stcb->asoc.strmout[stream];
4830 		sp = TAILQ_FIRST(&strq->outqueue);
4831 		if (sp != NULL) {
4832 			sp->discard_rest = 1;
4833 			/*
4834 			 * We may need to put a chunk on the queue that
4835 			 * holds the TSN that would have been sent with the
4836 			 * LAST bit.
4837 			 */
4838 			if (chk == NULL) {
4839 				/* Yep, we have to */
4840 				sctp_alloc_a_chunk(stcb, chk);
4841 				if (chk == NULL) {
4842 					/*
4843 					 * we are hosed. All we can do is
4844 					 * nothing.. which will cause an
4845 					 * abort if the peer is paying
4846 					 * attention.
4847 					 */
4848 					goto oh_well;
4849 				}
4850 				memset(chk, 0, sizeof(*chk));
4851 				chk->rec.data.rcv_flags = 0;
4852 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4853 				chk->asoc = &stcb->asoc;
4854 				if (stcb->asoc.idata_supported == 0) {
4855 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4856 						chk->rec.data.stream_seq = 0;
4857 					} else {
4858 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4859 					}
4860 				} else {
4861 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4862 						chk->rec.data.stream_seq = strq->next_mid_unordered;
4863 					} else {
4864 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4865 					}
4866 				}
4867 				chk->rec.data.stream_number = sp->stream;
4868 				chk->rec.data.payloadtype = sp->ppid;
4869 				chk->rec.data.context = sp->context;
4870 				chk->flags = sp->act_flags;
4871 				chk->whoTo = NULL;
4872 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4873 				strq->chunks_on_queues++;
4874 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4875 				stcb->asoc.sent_queue_cnt++;
4876 				stcb->asoc.pr_sctp_cnt++;
4877 			}
4878 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4879 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4880 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4881 			}
4882 			if (stcb->asoc.idata_supported == 0) {
4883 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4884 					strq->next_mid_ordered++;
4885 				}
4886 			} else {
4887 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4888 					strq->next_mid_unordered++;
4889 				} else {
4890 					strq->next_mid_ordered++;
4891 				}
4892 			}
4893 	oh_well:
4894 			if (sp->data) {
4895 				/*
4896 				 * Pull any data to free up the SB and allow
4897 				 * sender to "add more" while we will throw
4898 				 * away :-)
4899 				 */
4900 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4901 				ret_sz += sp->length;
4902 				do_wakeup_routine = 1;
4903 				sp->some_taken = 1;
4904 				sctp_m_freem(sp->data);
4905 				sp->data = NULL;
4906 				sp->tail_mbuf = NULL;
4907 				sp->length = 0;
4908 			}
4909 		}
4910 		SCTP_TCB_SEND_UNLOCK(stcb);
4911 	}
4912 	if (do_wakeup_routine) {
4913 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4914 		struct socket *so;
4915 
4916 		so = SCTP_INP_SO(stcb->sctp_ep);
4917 		if (!so_locked) {
4918 			atomic_add_int(&stcb->asoc.refcnt, 1);
4919 			SCTP_TCB_UNLOCK(stcb);
4920 			SCTP_SOCKET_LOCK(so, 1);
4921 			SCTP_TCB_LOCK(stcb);
4922 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4923 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4924 				/* assoc was freed while we were unlocked */
4925 				SCTP_SOCKET_UNLOCK(so, 1);
4926 				return (ret_sz);
4927 			}
4928 		}
4929 #endif
4930 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4931 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4932 		if (!so_locked) {
4933 			SCTP_SOCKET_UNLOCK(so, 1);
4934 		}
4935 #endif
4936 	}
4937 	return (ret_sz);
4938 }
4939 
4940 /*
4941  * checks to see if the given address, sa, is one that is currently known by
4942  * the kernel note: can't distinguish the same address on multiple interfaces
4943  * and doesn't handle multiple addresses with different zone/scope id's note:
4944  * ifa_ifwithaddr() compares the entire sockaddr struct
4945  */
4946 struct sctp_ifa *
4947 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4948     int holds_lock)
4949 {
4950 	struct sctp_laddr *laddr;
4951 
4952 	if (holds_lock == 0) {
4953 		SCTP_INP_RLOCK(inp);
4954 	}
4955 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4956 		if (laddr->ifa == NULL)
4957 			continue;
4958 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4959 			continue;
4960 #ifdef INET
4961 		if (addr->sa_family == AF_INET) {
4962 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4963 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4964 				/* found him. */
4965 				if (holds_lock == 0) {
4966 					SCTP_INP_RUNLOCK(inp);
4967 				}
4968 				return (laddr->ifa);
4969 				break;
4970 			}
4971 		}
4972 #endif
4973 #ifdef INET6
4974 		if (addr->sa_family == AF_INET6) {
4975 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4976 			    &laddr->ifa->address.sin6)) {
4977 				/* found him. */
4978 				if (holds_lock == 0) {
4979 					SCTP_INP_RUNLOCK(inp);
4980 				}
4981 				return (laddr->ifa);
4982 				break;
4983 			}
4984 		}
4985 #endif
4986 	}
4987 	if (holds_lock == 0) {
4988 		SCTP_INP_RUNLOCK(inp);
4989 	}
4990 	return (NULL);
4991 }
4992 
4993 uint32_t
4994 sctp_get_ifa_hash_val(struct sockaddr *addr)
4995 {
4996 	switch (addr->sa_family) {
4997 #ifdef INET
4998 	case AF_INET:
4999 		{
5000 			struct sockaddr_in *sin;
5001 
5002 			sin = (struct sockaddr_in *)addr;
5003 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5004 		}
5005 #endif
5006 #ifdef INET6
5007 	case AF_INET6:
5008 		{
5009 			struct sockaddr_in6 *sin6;
5010 			uint32_t hash_of_addr;
5011 
5012 			sin6 = (struct sockaddr_in6 *)addr;
5013 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5014 			    sin6->sin6_addr.s6_addr32[1] +
5015 			    sin6->sin6_addr.s6_addr32[2] +
5016 			    sin6->sin6_addr.s6_addr32[3]);
5017 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5018 			return (hash_of_addr);
5019 		}
5020 #endif
5021 	default:
5022 		break;
5023 	}
5024 	return (0);
5025 }
5026 
5027 struct sctp_ifa *
5028 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5029 {
5030 	struct sctp_ifa *sctp_ifap;
5031 	struct sctp_vrf *vrf;
5032 	struct sctp_ifalist *hash_head;
5033 	uint32_t hash_of_addr;
5034 
5035 	if (holds_lock == 0)
5036 		SCTP_IPI_ADDR_RLOCK();
5037 
5038 	vrf = sctp_find_vrf(vrf_id);
5039 	if (vrf == NULL) {
5040 		if (holds_lock == 0)
5041 			SCTP_IPI_ADDR_RUNLOCK();
5042 		return (NULL);
5043 	}
5044 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5045 
5046 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5047 	if (hash_head == NULL) {
5048 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5049 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5050 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5051 		sctp_print_address(addr);
5052 		SCTP_PRINTF("No such bucket for address\n");
5053 		if (holds_lock == 0)
5054 			SCTP_IPI_ADDR_RUNLOCK();
5055 
5056 		return (NULL);
5057 	}
5058 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5059 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5060 			continue;
5061 #ifdef INET
5062 		if (addr->sa_family == AF_INET) {
5063 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5064 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5065 				/* found him. */
5066 				if (holds_lock == 0)
5067 					SCTP_IPI_ADDR_RUNLOCK();
5068 				return (sctp_ifap);
5069 				break;
5070 			}
5071 		}
5072 #endif
5073 #ifdef INET6
5074 		if (addr->sa_family == AF_INET6) {
5075 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5076 			    &sctp_ifap->address.sin6)) {
5077 				/* found him. */
5078 				if (holds_lock == 0)
5079 					SCTP_IPI_ADDR_RUNLOCK();
5080 				return (sctp_ifap);
5081 				break;
5082 			}
5083 		}
5084 #endif
5085 	}
5086 	if (holds_lock == 0)
5087 		SCTP_IPI_ADDR_RUNLOCK();
5088 	return (NULL);
5089 }
5090 
5091 static void
5092 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5093     uint32_t rwnd_req)
5094 {
5095 	/* User pulled some data, do we need a rwnd update? */
5096 	int r_unlocked = 0;
5097 	uint32_t dif, rwnd;
5098 	struct socket *so = NULL;
5099 
5100 	if (stcb == NULL)
5101 		return;
5102 
5103 	atomic_add_int(&stcb->asoc.refcnt, 1);
5104 
5105 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5106 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5107 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5108 		/* Pre-check If we are freeing no update */
5109 		goto no_lock;
5110 	}
5111 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5112 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5113 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5114 		goto out;
5115 	}
5116 	so = stcb->sctp_socket;
5117 	if (so == NULL) {
5118 		goto out;
5119 	}
5120 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5121 	/* Have you have freed enough to look */
5122 	*freed_so_far = 0;
5123 	/* Yep, its worth a look and the lock overhead */
5124 
5125 	/* Figure out what the rwnd would be */
5126 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5127 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5128 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5129 	} else {
5130 		dif = 0;
5131 	}
5132 	if (dif >= rwnd_req) {
5133 		if (hold_rlock) {
5134 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5135 			r_unlocked = 1;
5136 		}
5137 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5138 			/*
5139 			 * One last check before we allow the guy possibly
5140 			 * to get in. There is a race, where the guy has not
5141 			 * reached the gate. In that case
5142 			 */
5143 			goto out;
5144 		}
5145 		SCTP_TCB_LOCK(stcb);
5146 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5147 			/* No reports here */
5148 			SCTP_TCB_UNLOCK(stcb);
5149 			goto out;
5150 		}
5151 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5152 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5153 
5154 		sctp_chunk_output(stcb->sctp_ep, stcb,
5155 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5156 		/* make sure no timer is running */
5157 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5158 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5159 		SCTP_TCB_UNLOCK(stcb);
5160 	} else {
5161 		/* Update how much we have pending */
5162 		stcb->freed_by_sorcv_sincelast = dif;
5163 	}
5164 out:
5165 	if (so && r_unlocked && hold_rlock) {
5166 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5167 	}
5168 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5169 no_lock:
5170 	atomic_add_int(&stcb->asoc.refcnt, -1);
5171 	return;
5172 }
5173 
5174 int
5175 sctp_sorecvmsg(struct socket *so,
5176     struct uio *uio,
5177     struct mbuf **mp,
5178     struct sockaddr *from,
5179     int fromlen,
5180     int *msg_flags,
5181     struct sctp_sndrcvinfo *sinfo,
5182     int filling_sinfo)
5183 {
5184 	/*
5185 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5186 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5187 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5188 	 * On the way out we may send out any combination of:
5189 	 * MSG_NOTIFICATION MSG_EOR
5190 	 *
5191 	 */
5192 	struct sctp_inpcb *inp = NULL;
5193 	int my_len = 0;
5194 	int cp_len = 0, error = 0;
5195 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5196 	struct mbuf *m = NULL;
5197 	struct sctp_tcb *stcb = NULL;
5198 	int wakeup_read_socket = 0;
5199 	int freecnt_applied = 0;
5200 	int out_flags = 0, in_flags = 0;
5201 	int block_allowed = 1;
5202 	uint32_t freed_so_far = 0;
5203 	uint32_t copied_so_far = 0;
5204 	int in_eeor_mode = 0;
5205 	int no_rcv_needed = 0;
5206 	uint32_t rwnd_req = 0;
5207 	int hold_sblock = 0;
5208 	int hold_rlock = 0;
5209 	ssize_t slen = 0;
5210 	uint32_t held_length = 0;
5211 	int sockbuf_lock = 0;
5212 
5213 	if (uio == NULL) {
5214 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5215 		return (EINVAL);
5216 	}
5217 	if (msg_flags) {
5218 		in_flags = *msg_flags;
5219 		if (in_flags & MSG_PEEK)
5220 			SCTP_STAT_INCR(sctps_read_peeks);
5221 	} else {
5222 		in_flags = 0;
5223 	}
5224 	slen = uio->uio_resid;
5225 
5226 	/* Pull in and set up our int flags */
5227 	if (in_flags & MSG_OOB) {
5228 		/* Out of band's NOT supported */
5229 		return (EOPNOTSUPP);
5230 	}
5231 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5232 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5233 		return (EINVAL);
5234 	}
5235 	if ((in_flags & (MSG_DONTWAIT
5236 	    | MSG_NBIO
5237 	    )) ||
5238 	    SCTP_SO_IS_NBIO(so)) {
5239 		block_allowed = 0;
5240 	}
5241 	/* setup the endpoint */
5242 	inp = (struct sctp_inpcb *)so->so_pcb;
5243 	if (inp == NULL) {
5244 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5245 		return (EFAULT);
5246 	}
5247 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5248 	/* Must be at least a MTU's worth */
5249 	if (rwnd_req < SCTP_MIN_RWND)
5250 		rwnd_req = SCTP_MIN_RWND;
5251 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5252 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5253 		sctp_misc_ints(SCTP_SORECV_ENTER,
5254 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5255 	}
5256 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5257 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5258 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5259 	}
5260 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5261 	if (error) {
5262 		goto release_unlocked;
5263 	}
5264 	sockbuf_lock = 1;
5265 restart:
5266 
5267 
5268 restart_nosblocks:
5269 	if (hold_sblock == 0) {
5270 		SOCKBUF_LOCK(&so->so_rcv);
5271 		hold_sblock = 1;
5272 	}
5273 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5274 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5275 		goto out;
5276 	}
5277 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5278 		if (so->so_error) {
5279 			error = so->so_error;
5280 			if ((in_flags & MSG_PEEK) == 0)
5281 				so->so_error = 0;
5282 			goto out;
5283 		} else {
5284 			if (so->so_rcv.sb_cc == 0) {
5285 				/* indicate EOF */
5286 				error = 0;
5287 				goto out;
5288 			}
5289 		}
5290 	}
5291 	if (so->so_rcv.sb_cc <= held_length) {
5292 		if (so->so_error) {
5293 			error = so->so_error;
5294 			if ((in_flags & MSG_PEEK) == 0) {
5295 				so->so_error = 0;
5296 			}
5297 			goto out;
5298 		}
5299 		if ((so->so_rcv.sb_cc == 0) &&
5300 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5301 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5302 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5303 				/*
5304 				 * For active open side clear flags for
5305 				 * re-use passive open is blocked by
5306 				 * connect.
5307 				 */
5308 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5309 					/*
5310 					 * You were aborted, passive side
5311 					 * always hits here
5312 					 */
5313 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5314 					error = ECONNRESET;
5315 				}
5316 				so->so_state &= ~(SS_ISCONNECTING |
5317 				    SS_ISDISCONNECTING |
5318 				    SS_ISCONFIRMING |
5319 				    SS_ISCONNECTED);
5320 				if (error == 0) {
5321 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5322 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5323 						error = ENOTCONN;
5324 					}
5325 				}
5326 				goto out;
5327 			}
5328 		}
5329 		if (block_allowed) {
5330 			error = sbwait(&so->so_rcv);
5331 			if (error) {
5332 				goto out;
5333 			}
5334 			held_length = 0;
5335 			goto restart_nosblocks;
5336 		} else {
5337 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5338 			error = EWOULDBLOCK;
5339 			goto out;
5340 		}
5341 	}
5342 	if (hold_sblock == 1) {
5343 		SOCKBUF_UNLOCK(&so->so_rcv);
5344 		hold_sblock = 0;
5345 	}
5346 	/* we possibly have data we can read */
5347 	/* sa_ignore FREED_MEMORY */
5348 	control = TAILQ_FIRST(&inp->read_queue);
5349 	if (control == NULL) {
5350 		/*
5351 		 * This could be happening since the appender did the
5352 		 * increment but as not yet did the tailq insert onto the
5353 		 * read_queue
5354 		 */
5355 		if (hold_rlock == 0) {
5356 			SCTP_INP_READ_LOCK(inp);
5357 		}
5358 		control = TAILQ_FIRST(&inp->read_queue);
5359 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5360 #ifdef INVARIANTS
5361 			panic("Huh, its non zero and nothing on control?");
5362 #endif
5363 			so->so_rcv.sb_cc = 0;
5364 		}
5365 		SCTP_INP_READ_UNLOCK(inp);
5366 		hold_rlock = 0;
5367 		goto restart;
5368 	}
5369 	if ((control->length == 0) &&
5370 	    (control->do_not_ref_stcb)) {
5371 		/*
5372 		 * Clean up code for freeing assoc that left behind a
5373 		 * pdapi.. maybe a peer in EEOR that just closed after
5374 		 * sending and never indicated a EOR.
5375 		 */
5376 		if (hold_rlock == 0) {
5377 			hold_rlock = 1;
5378 			SCTP_INP_READ_LOCK(inp);
5379 		}
5380 		control->held_length = 0;
5381 		if (control->data) {
5382 			/* Hmm there is data here .. fix */
5383 			struct mbuf *m_tmp;
5384 			int cnt = 0;
5385 
5386 			m_tmp = control->data;
5387 			while (m_tmp) {
5388 				cnt += SCTP_BUF_LEN(m_tmp);
5389 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5390 					control->tail_mbuf = m_tmp;
5391 					control->end_added = 1;
5392 				}
5393 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5394 			}
5395 			control->length = cnt;
5396 		} else {
5397 			/* remove it */
5398 			TAILQ_REMOVE(&inp->read_queue, control, next);
5399 			/* Add back any hiddend data */
5400 			sctp_free_remote_addr(control->whoFrom);
5401 			sctp_free_a_readq(stcb, control);
5402 		}
5403 		if (hold_rlock) {
5404 			hold_rlock = 0;
5405 			SCTP_INP_READ_UNLOCK(inp);
5406 		}
5407 		goto restart;
5408 	}
5409 	if ((control->length == 0) &&
5410 	    (control->end_added == 1)) {
5411 		/*
5412 		 * Do we also need to check for (control->pdapi_aborted ==
5413 		 * 1)?
5414 		 */
5415 		if (hold_rlock == 0) {
5416 			hold_rlock = 1;
5417 			SCTP_INP_READ_LOCK(inp);
5418 		}
5419 		TAILQ_REMOVE(&inp->read_queue, control, next);
5420 		if (control->data) {
5421 #ifdef INVARIANTS
5422 			panic("control->data not null but control->length == 0");
5423 #else
5424 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5425 			sctp_m_freem(control->data);
5426 			control->data = NULL;
5427 #endif
5428 		}
5429 		if (control->aux_data) {
5430 			sctp_m_free(control->aux_data);
5431 			control->aux_data = NULL;
5432 		}
5433 #ifdef INVARIANTS
5434 		if (control->on_strm_q) {
5435 			panic("About to free ctl:%p so:%p and its in %d",
5436 			    control, so, control->on_strm_q);
5437 		}
5438 #endif
5439 		sctp_free_remote_addr(control->whoFrom);
5440 		sctp_free_a_readq(stcb, control);
5441 		if (hold_rlock) {
5442 			hold_rlock = 0;
5443 			SCTP_INP_READ_UNLOCK(inp);
5444 		}
5445 		goto restart;
5446 	}
5447 	if (control->length == 0) {
5448 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5449 		    (filling_sinfo)) {
5450 			/* find a more suitable one then this */
5451 			ctl = TAILQ_NEXT(control, next);
5452 			while (ctl) {
5453 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5454 				    (ctl->some_taken ||
5455 				    (ctl->spec_flags & M_NOTIFICATION) ||
5456 				    ((ctl->do_not_ref_stcb == 0) &&
5457 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5458 				    ) {
5459 					/*-
5460 					 * If we have a different TCB next, and there is data
5461 					 * present. If we have already taken some (pdapi), OR we can
5462 					 * ref the tcb and no delivery as started on this stream, we
5463 					 * take it. Note we allow a notification on a different
5464 					 * assoc to be delivered..
5465 					 */
5466 					control = ctl;
5467 					goto found_one;
5468 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5469 					    (ctl->length) &&
5470 					    ((ctl->some_taken) ||
5471 					    ((ctl->do_not_ref_stcb == 0) &&
5472 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5473 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5474 					/*-
5475 					 * If we have the same tcb, and there is data present, and we
5476 					 * have the strm interleave feature present. Then if we have
5477 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5478 					 * not started a delivery for this stream, we can take it.
5479 					 * Note we do NOT allow a notificaiton on the same assoc to
5480 					 * be delivered.
5481 					 */
5482 					control = ctl;
5483 					goto found_one;
5484 				}
5485 				ctl = TAILQ_NEXT(ctl, next);
5486 			}
5487 		}
5488 		/*
5489 		 * if we reach here, not suitable replacement is available
5490 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5491 		 * into the our held count, and its time to sleep again.
5492 		 */
5493 		held_length = so->so_rcv.sb_cc;
5494 		control->held_length = so->so_rcv.sb_cc;
5495 		goto restart;
5496 	}
5497 	/* Clear the held length since there is something to read */
5498 	control->held_length = 0;
5499 found_one:
5500 	/*
5501 	 * If we reach here, control has a some data for us to read off.
5502 	 * Note that stcb COULD be NULL.
5503 	 */
5504 	if (hold_rlock == 0) {
5505 		hold_rlock = 1;
5506 		SCTP_INP_READ_LOCK(inp);
5507 	}
5508 	control->some_taken++;
5509 	stcb = control->stcb;
5510 	if (stcb) {
5511 		if ((control->do_not_ref_stcb == 0) &&
5512 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5513 			if (freecnt_applied == 0)
5514 				stcb = NULL;
5515 		} else if (control->do_not_ref_stcb == 0) {
5516 			/* you can't free it on me please */
5517 			/*
5518 			 * The lock on the socket buffer protects us so the
5519 			 * free code will stop. But since we used the
5520 			 * socketbuf lock and the sender uses the tcb_lock
5521 			 * to increment, we need to use the atomic add to
5522 			 * the refcnt
5523 			 */
5524 			if (freecnt_applied) {
5525 #ifdef INVARIANTS
5526 				panic("refcnt already incremented");
5527 #else
5528 				SCTP_PRINTF("refcnt already incremented?\n");
5529 #endif
5530 			} else {
5531 				atomic_add_int(&stcb->asoc.refcnt, 1);
5532 				freecnt_applied = 1;
5533 			}
5534 			/*
5535 			 * Setup to remember how much we have not yet told
5536 			 * the peer our rwnd has opened up. Note we grab the
5537 			 * value from the tcb from last time. Note too that
5538 			 * sack sending clears this when a sack is sent,
5539 			 * which is fine. Once we hit the rwnd_req, we then
5540 			 * will go to the sctp_user_rcvd() that will not
5541 			 * lock until it KNOWs it MUST send a WUP-SACK.
5542 			 */
5543 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5544 			stcb->freed_by_sorcv_sincelast = 0;
5545 		}
5546 	}
5547 	if (stcb &&
5548 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5549 	    control->do_not_ref_stcb == 0) {
5550 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5551 	}
5552 	/* First lets get off the sinfo and sockaddr info */
5553 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5554 		sinfo->sinfo_stream = control->sinfo_stream;
5555 		sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn;
5556 		sinfo->sinfo_flags = control->sinfo_flags;
5557 		sinfo->sinfo_ppid = control->sinfo_ppid;
5558 		sinfo->sinfo_context = control->sinfo_context;
5559 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5560 		sinfo->sinfo_tsn = control->sinfo_tsn;
5561 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5562 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5563 		nxt = TAILQ_NEXT(control, next);
5564 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5565 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5566 			struct sctp_extrcvinfo *s_extra;
5567 
5568 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5569 			if ((nxt) &&
5570 			    (nxt->length)) {
5571 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5572 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5573 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5574 				}
5575 				if (nxt->spec_flags & M_NOTIFICATION) {
5576 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5577 				}
5578 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5579 				s_extra->serinfo_next_length = nxt->length;
5580 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5581 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5582 				if (nxt->tail_mbuf != NULL) {
5583 					if (nxt->end_added) {
5584 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5585 					}
5586 				}
5587 			} else {
5588 				/*
5589 				 * we explicitly 0 this, since the memcpy
5590 				 * got some other things beyond the older
5591 				 * sinfo_ that is on the control's structure
5592 				 * :-D
5593 				 */
5594 				nxt = NULL;
5595 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5596 				s_extra->serinfo_next_aid = 0;
5597 				s_extra->serinfo_next_length = 0;
5598 				s_extra->serinfo_next_ppid = 0;
5599 				s_extra->serinfo_next_stream = 0;
5600 			}
5601 		}
5602 		/*
5603 		 * update off the real current cum-ack, if we have an stcb.
5604 		 */
5605 		if ((control->do_not_ref_stcb == 0) && stcb)
5606 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5607 		/*
5608 		 * mask off the high bits, we keep the actual chunk bits in
5609 		 * there.
5610 		 */
5611 		sinfo->sinfo_flags &= 0x00ff;
5612 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5613 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5614 		}
5615 	}
5616 #ifdef SCTP_ASOCLOG_OF_TSNS
5617 	{
5618 		int index, newindex;
5619 		struct sctp_pcbtsn_rlog *entry;
5620 
5621 		do {
5622 			index = inp->readlog_index;
5623 			newindex = index + 1;
5624 			if (newindex >= SCTP_READ_LOG_SIZE) {
5625 				newindex = 0;
5626 			}
5627 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5628 		entry = &inp->readlog[index];
5629 		entry->vtag = control->sinfo_assoc_id;
5630 		entry->strm = control->sinfo_stream;
5631 		entry->seq = control->sinfo_ssn;
5632 		entry->sz = control->length;
5633 		entry->flgs = control->sinfo_flags;
5634 	}
5635 #endif
5636 	if ((fromlen > 0) && (from != NULL)) {
5637 		union sctp_sockstore store;
5638 		size_t len;
5639 
5640 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5641 #ifdef INET6
5642 		case AF_INET6:
5643 			len = sizeof(struct sockaddr_in6);
5644 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5645 			store.sin6.sin6_port = control->port_from;
5646 			break;
5647 #endif
5648 #ifdef INET
5649 		case AF_INET:
5650 #ifdef INET6
5651 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5652 				len = sizeof(struct sockaddr_in6);
5653 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5654 				    &store.sin6);
5655 				store.sin6.sin6_port = control->port_from;
5656 			} else {
5657 				len = sizeof(struct sockaddr_in);
5658 				store.sin = control->whoFrom->ro._l_addr.sin;
5659 				store.sin.sin_port = control->port_from;
5660 			}
5661 #else
5662 			len = sizeof(struct sockaddr_in);
5663 			store.sin = control->whoFrom->ro._l_addr.sin;
5664 			store.sin.sin_port = control->port_from;
5665 #endif
5666 			break;
5667 #endif
5668 		default:
5669 			len = 0;
5670 			break;
5671 		}
5672 		memcpy(from, &store, min((size_t)fromlen, len));
5673 #ifdef INET6
5674 		{
5675 			struct sockaddr_in6 lsa6, *from6;
5676 
5677 			from6 = (struct sockaddr_in6 *)from;
5678 			sctp_recover_scope_mac(from6, (&lsa6));
5679 		}
5680 #endif
5681 	}
5682 	if (hold_rlock) {
5683 		SCTP_INP_READ_UNLOCK(inp);
5684 		hold_rlock = 0;
5685 	}
5686 	if (hold_sblock) {
5687 		SOCKBUF_UNLOCK(&so->so_rcv);
5688 		hold_sblock = 0;
5689 	}
5690 	/* now copy out what data we can */
5691 	if (mp == NULL) {
5692 		/* copy out each mbuf in the chain up to length */
5693 get_more_data:
5694 		m = control->data;
5695 		while (m) {
5696 			/* Move out all we can */
5697 			cp_len = (int)uio->uio_resid;
5698 			my_len = (int)SCTP_BUF_LEN(m);
5699 			if (cp_len > my_len) {
5700 				/* not enough in this buf */
5701 				cp_len = my_len;
5702 			}
5703 			if (hold_rlock) {
5704 				SCTP_INP_READ_UNLOCK(inp);
5705 				hold_rlock = 0;
5706 			}
5707 			if (cp_len > 0)
5708 				error = uiomove(mtod(m, char *), cp_len, uio);
5709 			/* re-read */
5710 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5711 				goto release;
5712 			}
5713 			if ((control->do_not_ref_stcb == 0) && stcb &&
5714 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5715 				no_rcv_needed = 1;
5716 			}
5717 			if (error) {
5718 				/* error we are out of here */
5719 				goto release;
5720 			}
5721 			SCTP_INP_READ_LOCK(inp);
5722 			hold_rlock = 1;
5723 			if (cp_len == SCTP_BUF_LEN(m)) {
5724 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5725 				    (control->end_added)) {
5726 					out_flags |= MSG_EOR;
5727 					if ((control->do_not_ref_stcb == 0) &&
5728 					    (control->stcb != NULL) &&
5729 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5730 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5731 				}
5732 				if (control->spec_flags & M_NOTIFICATION) {
5733 					out_flags |= MSG_NOTIFICATION;
5734 				}
5735 				/* we ate up the mbuf */
5736 				if (in_flags & MSG_PEEK) {
5737 					/* just looking */
5738 					m = SCTP_BUF_NEXT(m);
5739 					copied_so_far += cp_len;
5740 				} else {
5741 					/* dispose of the mbuf */
5742 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5743 						sctp_sblog(&so->so_rcv,
5744 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5745 					}
5746 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5747 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5748 						sctp_sblog(&so->so_rcv,
5749 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5750 					}
5751 					copied_so_far += cp_len;
5752 					freed_so_far += cp_len;
5753 					freed_so_far += MSIZE;
5754 					atomic_subtract_int(&control->length, cp_len);
5755 					control->data = sctp_m_free(m);
5756 					m = control->data;
5757 					/*
5758 					 * been through it all, must hold sb
5759 					 * lock ok to null tail
5760 					 */
5761 					if (control->data == NULL) {
5762 #ifdef INVARIANTS
5763 						if ((control->end_added == 0) ||
5764 						    (TAILQ_NEXT(control, next) == NULL)) {
5765 							/*
5766 							 * If the end is not
5767 							 * added, OR the
5768 							 * next is NOT null
5769 							 * we MUST have the
5770 							 * lock.
5771 							 */
5772 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5773 								panic("Hmm we don't own the lock?");
5774 							}
5775 						}
5776 #endif
5777 						control->tail_mbuf = NULL;
5778 #ifdef INVARIANTS
5779 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5780 							panic("end_added, nothing left and no MSG_EOR");
5781 						}
5782 #endif
5783 					}
5784 				}
5785 			} else {
5786 				/* Do we need to trim the mbuf? */
5787 				if (control->spec_flags & M_NOTIFICATION) {
5788 					out_flags |= MSG_NOTIFICATION;
5789 				}
5790 				if ((in_flags & MSG_PEEK) == 0) {
5791 					SCTP_BUF_RESV_UF(m, cp_len);
5792 					SCTP_BUF_LEN(m) -= cp_len;
5793 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5794 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5795 					}
5796 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5797 					if ((control->do_not_ref_stcb == 0) &&
5798 					    stcb) {
5799 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5800 					}
5801 					copied_so_far += cp_len;
5802 					freed_so_far += cp_len;
5803 					freed_so_far += MSIZE;
5804 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5805 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5806 						    SCTP_LOG_SBRESULT, 0);
5807 					}
5808 					atomic_subtract_int(&control->length, cp_len);
5809 				} else {
5810 					copied_so_far += cp_len;
5811 				}
5812 			}
5813 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5814 				break;
5815 			}
5816 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5817 			    (control->do_not_ref_stcb == 0) &&
5818 			    (freed_so_far >= rwnd_req)) {
5819 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5820 			}
5821 		}		/* end while(m) */
5822 		/*
5823 		 * At this point we have looked at it all and we either have
5824 		 * a MSG_EOR/or read all the user wants... <OR>
5825 		 * control->length == 0.
5826 		 */
5827 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5828 			/* we are done with this control */
5829 			if (control->length == 0) {
5830 				if (control->data) {
5831 #ifdef INVARIANTS
5832 					panic("control->data not null at read eor?");
5833 #else
5834 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5835 					sctp_m_freem(control->data);
5836 					control->data = NULL;
5837 #endif
5838 				}
5839 		done_with_control:
5840 				if (hold_rlock == 0) {
5841 					SCTP_INP_READ_LOCK(inp);
5842 					hold_rlock = 1;
5843 				}
5844 				TAILQ_REMOVE(&inp->read_queue, control, next);
5845 				/* Add back any hiddend data */
5846 				if (control->held_length) {
5847 					held_length = 0;
5848 					control->held_length = 0;
5849 					wakeup_read_socket = 1;
5850 				}
5851 				if (control->aux_data) {
5852 					sctp_m_free(control->aux_data);
5853 					control->aux_data = NULL;
5854 				}
5855 				no_rcv_needed = control->do_not_ref_stcb;
5856 				sctp_free_remote_addr(control->whoFrom);
5857 				control->data = NULL;
5858 #ifdef INVARIANTS
5859 				if (control->on_strm_q) {
5860 					panic("About to free ctl:%p so:%p and its in %d",
5861 					    control, so, control->on_strm_q);
5862 				}
5863 #endif
5864 				sctp_free_a_readq(stcb, control);
5865 				control = NULL;
5866 				if ((freed_so_far >= rwnd_req) &&
5867 				    (no_rcv_needed == 0))
5868 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5869 
5870 			} else {
5871 				/*
5872 				 * The user did not read all of this
5873 				 * message, turn off the returned MSG_EOR
5874 				 * since we are leaving more behind on the
5875 				 * control to read.
5876 				 */
5877 #ifdef INVARIANTS
5878 				if (control->end_added &&
5879 				    (control->data == NULL) &&
5880 				    (control->tail_mbuf == NULL)) {
5881 					panic("Gak, control->length is corrupt?");
5882 				}
5883 #endif
5884 				no_rcv_needed = control->do_not_ref_stcb;
5885 				out_flags &= ~MSG_EOR;
5886 			}
5887 		}
5888 		if (out_flags & MSG_EOR) {
5889 			goto release;
5890 		}
5891 		if ((uio->uio_resid == 0) ||
5892 		    ((in_eeor_mode) &&
5893 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5894 			goto release;
5895 		}
5896 		/*
5897 		 * If I hit here the receiver wants more and this message is
5898 		 * NOT done (pd-api). So two questions. Can we block? if not
5899 		 * we are done. Did the user NOT set MSG_WAITALL?
5900 		 */
5901 		if (block_allowed == 0) {
5902 			goto release;
5903 		}
5904 		/*
5905 		 * We need to wait for more data a few things: - We don't
5906 		 * sbunlock() so we don't get someone else reading. - We
5907 		 * must be sure to account for the case where what is added
5908 		 * is NOT to our control when we wakeup.
5909 		 */
5910 
5911 		/*
5912 		 * Do we need to tell the transport a rwnd update might be
5913 		 * needed before we go to sleep?
5914 		 */
5915 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5916 		    ((freed_so_far >= rwnd_req) &&
5917 		    (control->do_not_ref_stcb == 0) &&
5918 		    (no_rcv_needed == 0))) {
5919 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5920 		}
5921 wait_some_more:
5922 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5923 			goto release;
5924 		}
5925 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5926 			goto release;
5927 
5928 		if (hold_rlock == 1) {
5929 			SCTP_INP_READ_UNLOCK(inp);
5930 			hold_rlock = 0;
5931 		}
5932 		if (hold_sblock == 0) {
5933 			SOCKBUF_LOCK(&so->so_rcv);
5934 			hold_sblock = 1;
5935 		}
5936 		if ((copied_so_far) && (control->length == 0) &&
5937 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5938 			goto release;
5939 		}
5940 		if (so->so_rcv.sb_cc <= control->held_length) {
5941 			error = sbwait(&so->so_rcv);
5942 			if (error) {
5943 				goto release;
5944 			}
5945 			control->held_length = 0;
5946 		}
5947 		if (hold_sblock) {
5948 			SOCKBUF_UNLOCK(&so->so_rcv);
5949 			hold_sblock = 0;
5950 		}
5951 		if (control->length == 0) {
5952 			/* still nothing here */
5953 			if (control->end_added == 1) {
5954 				/* he aborted, or is done i.e.did a shutdown */
5955 				out_flags |= MSG_EOR;
5956 				if (control->pdapi_aborted) {
5957 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5958 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5959 
5960 					out_flags |= MSG_TRUNC;
5961 				} else {
5962 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5963 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5964 				}
5965 				goto done_with_control;
5966 			}
5967 			if (so->so_rcv.sb_cc > held_length) {
5968 				control->held_length = so->so_rcv.sb_cc;
5969 				held_length = 0;
5970 			}
5971 			goto wait_some_more;
5972 		} else if (control->data == NULL) {
5973 			/*
5974 			 * we must re-sync since data is probably being
5975 			 * added
5976 			 */
5977 			SCTP_INP_READ_LOCK(inp);
5978 			if ((control->length > 0) && (control->data == NULL)) {
5979 				/*
5980 				 * big trouble.. we have the lock and its
5981 				 * corrupt?
5982 				 */
5983 #ifdef INVARIANTS
5984 				panic("Impossible data==NULL length !=0");
5985 #endif
5986 				out_flags |= MSG_EOR;
5987 				out_flags |= MSG_TRUNC;
5988 				control->length = 0;
5989 				SCTP_INP_READ_UNLOCK(inp);
5990 				goto done_with_control;
5991 			}
5992 			SCTP_INP_READ_UNLOCK(inp);
5993 			/* We will fall around to get more data */
5994 		}
5995 		goto get_more_data;
5996 	} else {
5997 		/*-
5998 		 * Give caller back the mbuf chain,
5999 		 * store in uio_resid the length
6000 		 */
6001 		wakeup_read_socket = 0;
6002 		if ((control->end_added == 0) ||
6003 		    (TAILQ_NEXT(control, next) == NULL)) {
6004 			/* Need to get rlock */
6005 			if (hold_rlock == 0) {
6006 				SCTP_INP_READ_LOCK(inp);
6007 				hold_rlock = 1;
6008 			}
6009 		}
6010 		if (control->end_added) {
6011 			out_flags |= MSG_EOR;
6012 			if ((control->do_not_ref_stcb == 0) &&
6013 			    (control->stcb != NULL) &&
6014 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6015 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6016 		}
6017 		if (control->spec_flags & M_NOTIFICATION) {
6018 			out_flags |= MSG_NOTIFICATION;
6019 		}
6020 		uio->uio_resid = control->length;
6021 		*mp = control->data;
6022 		m = control->data;
6023 		while (m) {
6024 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6025 				sctp_sblog(&so->so_rcv,
6026 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6027 			}
6028 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6029 			freed_so_far += SCTP_BUF_LEN(m);
6030 			freed_so_far += MSIZE;
6031 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6032 				sctp_sblog(&so->so_rcv,
6033 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6034 			}
6035 			m = SCTP_BUF_NEXT(m);
6036 		}
6037 		control->data = control->tail_mbuf = NULL;
6038 		control->length = 0;
6039 		if (out_flags & MSG_EOR) {
6040 			/* Done with this control */
6041 			goto done_with_control;
6042 		}
6043 	}
6044 release:
6045 	if (hold_rlock == 1) {
6046 		SCTP_INP_READ_UNLOCK(inp);
6047 		hold_rlock = 0;
6048 	}
6049 	if (hold_sblock == 1) {
6050 		SOCKBUF_UNLOCK(&so->so_rcv);
6051 		hold_sblock = 0;
6052 	}
6053 	sbunlock(&so->so_rcv);
6054 	sockbuf_lock = 0;
6055 
6056 release_unlocked:
6057 	if (hold_sblock) {
6058 		SOCKBUF_UNLOCK(&so->so_rcv);
6059 		hold_sblock = 0;
6060 	}
6061 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6062 		if ((freed_so_far >= rwnd_req) &&
6063 		    (control && (control->do_not_ref_stcb == 0)) &&
6064 		    (no_rcv_needed == 0))
6065 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6066 	}
6067 out:
6068 	if (msg_flags) {
6069 		*msg_flags = out_flags;
6070 	}
6071 	if (((out_flags & MSG_EOR) == 0) &&
6072 	    ((in_flags & MSG_PEEK) == 0) &&
6073 	    (sinfo) &&
6074 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6075 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6076 		struct sctp_extrcvinfo *s_extra;
6077 
6078 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6079 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6080 	}
6081 	if (hold_rlock == 1) {
6082 		SCTP_INP_READ_UNLOCK(inp);
6083 	}
6084 	if (hold_sblock) {
6085 		SOCKBUF_UNLOCK(&so->so_rcv);
6086 	}
6087 	if (sockbuf_lock) {
6088 		sbunlock(&so->so_rcv);
6089 	}
6090 	if (freecnt_applied) {
6091 		/*
6092 		 * The lock on the socket buffer protects us so the free
6093 		 * code will stop. But since we used the socketbuf lock and
6094 		 * the sender uses the tcb_lock to increment, we need to use
6095 		 * the atomic add to the refcnt.
6096 		 */
6097 		if (stcb == NULL) {
6098 #ifdef INVARIANTS
6099 			panic("stcb for refcnt has gone NULL?");
6100 			goto stage_left;
6101 #else
6102 			goto stage_left;
6103 #endif
6104 		}
6105 		/* Save the value back for next time */
6106 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6107 		atomic_add_int(&stcb->asoc.refcnt, -1);
6108 	}
6109 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6110 		if (stcb) {
6111 			sctp_misc_ints(SCTP_SORECV_DONE,
6112 			    freed_so_far,
6113 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6114 			    stcb->asoc.my_rwnd,
6115 			    so->so_rcv.sb_cc);
6116 		} else {
6117 			sctp_misc_ints(SCTP_SORECV_DONE,
6118 			    freed_so_far,
6119 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6120 			    0,
6121 			    so->so_rcv.sb_cc);
6122 		}
6123 	}
6124 stage_left:
6125 	if (wakeup_read_socket) {
6126 		sctp_sorwakeup(inp, so);
6127 	}
6128 	return (error);
6129 }
6130 
6131 
6132 #ifdef SCTP_MBUF_LOGGING
6133 struct mbuf *
6134 sctp_m_free(struct mbuf *m)
6135 {
6136 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6137 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6138 	}
6139 	return (m_free(m));
6140 }
6141 
6142 void
6143 sctp_m_freem(struct mbuf *mb)
6144 {
6145 	while (mb != NULL)
6146 		mb = sctp_m_free(mb);
6147 }
6148 
6149 #endif
6150 
6151 int
6152 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6153 {
6154 	/*
6155 	 * Given a local address. For all associations that holds the
6156 	 * address, request a peer-set-primary.
6157 	 */
6158 	struct sctp_ifa *ifa;
6159 	struct sctp_laddr *wi;
6160 
6161 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6162 	if (ifa == NULL) {
6163 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6164 		return (EADDRNOTAVAIL);
6165 	}
6166 	/*
6167 	 * Now that we have the ifa we must awaken the iterator with this
6168 	 * message.
6169 	 */
6170 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6171 	if (wi == NULL) {
6172 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6173 		return (ENOMEM);
6174 	}
6175 	/* Now incr the count and int wi structure */
6176 	SCTP_INCR_LADDR_COUNT();
6177 	bzero(wi, sizeof(*wi));
6178 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6179 	wi->ifa = ifa;
6180 	wi->action = SCTP_SET_PRIM_ADDR;
6181 	atomic_add_int(&ifa->refcount, 1);
6182 
6183 	/* Now add it to the work queue */
6184 	SCTP_WQ_ADDR_LOCK();
6185 	/*
6186 	 * Should this really be a tailq? As it is we will process the
6187 	 * newest first :-0
6188 	 */
6189 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6190 	SCTP_WQ_ADDR_UNLOCK();
6191 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6192 	    (struct sctp_inpcb *)NULL,
6193 	    (struct sctp_tcb *)NULL,
6194 	    (struct sctp_nets *)NULL);
6195 	return (0);
6196 }
6197 
6198 
6199 int
6200 sctp_soreceive(struct socket *so,
6201     struct sockaddr **psa,
6202     struct uio *uio,
6203     struct mbuf **mp0,
6204     struct mbuf **controlp,
6205     int *flagsp)
6206 {
6207 	int error, fromlen;
6208 	uint8_t sockbuf[256];
6209 	struct sockaddr *from;
6210 	struct sctp_extrcvinfo sinfo;
6211 	int filling_sinfo = 1;
6212 	struct sctp_inpcb *inp;
6213 
6214 	inp = (struct sctp_inpcb *)so->so_pcb;
6215 	/* pickup the assoc we are reading from */
6216 	if (inp == NULL) {
6217 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6218 		return (EINVAL);
6219 	}
6220 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6221 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6222 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6223 	    (controlp == NULL)) {
6224 		/* user does not want the sndrcv ctl */
6225 		filling_sinfo = 0;
6226 	}
6227 	if (psa) {
6228 		from = (struct sockaddr *)sockbuf;
6229 		fromlen = sizeof(sockbuf);
6230 		from->sa_len = 0;
6231 	} else {
6232 		from = NULL;
6233 		fromlen = 0;
6234 	}
6235 
6236 	if (filling_sinfo) {
6237 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6238 	}
6239 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6240 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6241 	if (controlp != NULL) {
6242 		/* copy back the sinfo in a CMSG format */
6243 		if (filling_sinfo)
6244 			*controlp = sctp_build_ctl_nchunk(inp,
6245 			    (struct sctp_sndrcvinfo *)&sinfo);
6246 		else
6247 			*controlp = NULL;
6248 	}
6249 	if (psa) {
6250 		/* copy back the address info */
6251 		if (from && from->sa_len) {
6252 			*psa = sodupsockaddr(from, M_NOWAIT);
6253 		} else {
6254 			*psa = NULL;
6255 		}
6256 	}
6257 	return (error);
6258 }
6259 
6260 
6261 
6262 
6263 
6264 int
6265 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6266     int totaddr, int *error)
6267 {
6268 	int added = 0;
6269 	int i;
6270 	struct sctp_inpcb *inp;
6271 	struct sockaddr *sa;
6272 	size_t incr = 0;
6273 
6274 #ifdef INET
6275 	struct sockaddr_in *sin;
6276 
6277 #endif
6278 #ifdef INET6
6279 	struct sockaddr_in6 *sin6;
6280 
6281 #endif
6282 
6283 	sa = addr;
6284 	inp = stcb->sctp_ep;
6285 	*error = 0;
6286 	for (i = 0; i < totaddr; i++) {
6287 		switch (sa->sa_family) {
6288 #ifdef INET
6289 		case AF_INET:
6290 			incr = sizeof(struct sockaddr_in);
6291 			sin = (struct sockaddr_in *)sa;
6292 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6293 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6294 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6295 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6296 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6297 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6298 				*error = EINVAL;
6299 				goto out_now;
6300 			}
6301 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6302 			    SCTP_DONOT_SETSCOPE,
6303 			    SCTP_ADDR_IS_CONFIRMED)) {
6304 				/* assoc gone no un-lock */
6305 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6306 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6307 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6308 				*error = ENOBUFS;
6309 				goto out_now;
6310 			}
6311 			added++;
6312 			break;
6313 #endif
6314 #ifdef INET6
6315 		case AF_INET6:
6316 			incr = sizeof(struct sockaddr_in6);
6317 			sin6 = (struct sockaddr_in6 *)sa;
6318 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6319 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6320 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6321 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6322 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6323 				*error = EINVAL;
6324 				goto out_now;
6325 			}
6326 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6327 			    SCTP_DONOT_SETSCOPE,
6328 			    SCTP_ADDR_IS_CONFIRMED)) {
6329 				/* assoc gone no un-lock */
6330 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6331 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6332 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6333 				*error = ENOBUFS;
6334 				goto out_now;
6335 			}
6336 			added++;
6337 			break;
6338 #endif
6339 		default:
6340 			break;
6341 		}
6342 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6343 	}
6344 out_now:
6345 	return (added);
6346 }
6347 
6348 struct sctp_tcb *
6349 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6350     unsigned int *totaddr,
6351     unsigned int *num_v4, unsigned int *num_v6, int *error,
6352     unsigned int limit, int *bad_addr)
6353 {
6354 	struct sockaddr *sa;
6355 	struct sctp_tcb *stcb = NULL;
6356 	unsigned int incr, at, i;
6357 
6358 	at = 0;
6359 	sa = addr;
6360 	*error = *num_v6 = *num_v4 = 0;
6361 	/* account and validate addresses */
6362 	for (i = 0; i < *totaddr; i++) {
6363 		switch (sa->sa_family) {
6364 #ifdef INET
6365 		case AF_INET:
6366 			incr = (unsigned int)sizeof(struct sockaddr_in);
6367 			if (sa->sa_len != incr) {
6368 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6369 				*error = EINVAL;
6370 				*bad_addr = 1;
6371 				return (NULL);
6372 			}
6373 			(*num_v4) += 1;
6374 			break;
6375 #endif
6376 #ifdef INET6
6377 		case AF_INET6:
6378 			{
6379 				struct sockaddr_in6 *sin6;
6380 
6381 				sin6 = (struct sockaddr_in6 *)sa;
6382 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6383 					/* Must be non-mapped for connectx */
6384 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6385 					*error = EINVAL;
6386 					*bad_addr = 1;
6387 					return (NULL);
6388 				}
6389 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6390 				if (sa->sa_len != incr) {
6391 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6392 					*error = EINVAL;
6393 					*bad_addr = 1;
6394 					return (NULL);
6395 				}
6396 				(*num_v6) += 1;
6397 				break;
6398 			}
6399 #endif
6400 		default:
6401 			*totaddr = i;
6402 			incr = 0;
6403 			/* we are done */
6404 			break;
6405 		}
6406 		if (i == *totaddr) {
6407 			break;
6408 		}
6409 		SCTP_INP_INCR_REF(inp);
6410 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6411 		if (stcb != NULL) {
6412 			/* Already have or am bring up an association */
6413 			return (stcb);
6414 		} else {
6415 			SCTP_INP_DECR_REF(inp);
6416 		}
6417 		if ((at + incr) > limit) {
6418 			*totaddr = i;
6419 			break;
6420 		}
6421 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6422 	}
6423 	return ((struct sctp_tcb *)NULL);
6424 }
6425 
6426 /*
6427  * sctp_bindx(ADD) for one address.
6428  * assumes all arguments are valid/checked by caller.
6429  */
6430 void
6431 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6432     struct sockaddr *sa, sctp_assoc_t assoc_id,
6433     uint32_t vrf_id, int *error, void *p)
6434 {
6435 	struct sockaddr *addr_touse;
6436 
6437 #if defined(INET) && defined(INET6)
6438 	struct sockaddr_in sin;
6439 
6440 #endif
6441 
6442 	/* see if we're bound all already! */
6443 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6444 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6445 		*error = EINVAL;
6446 		return;
6447 	}
6448 	addr_touse = sa;
6449 #ifdef INET6
6450 	if (sa->sa_family == AF_INET6) {
6451 #ifdef INET
6452 		struct sockaddr_in6 *sin6;
6453 
6454 #endif
6455 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6456 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6457 			*error = EINVAL;
6458 			return;
6459 		}
6460 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6461 			/* can only bind v6 on PF_INET6 sockets */
6462 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6463 			*error = EINVAL;
6464 			return;
6465 		}
6466 #ifdef INET
6467 		sin6 = (struct sockaddr_in6 *)addr_touse;
6468 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6469 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6470 			    SCTP_IPV6_V6ONLY(inp)) {
6471 				/* can't bind v4-mapped on PF_INET sockets */
6472 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473 				*error = EINVAL;
6474 				return;
6475 			}
6476 			in6_sin6_2_sin(&sin, sin6);
6477 			addr_touse = (struct sockaddr *)&sin;
6478 		}
6479 #endif
6480 	}
6481 #endif
6482 #ifdef INET
6483 	if (sa->sa_family == AF_INET) {
6484 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6485 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6486 			*error = EINVAL;
6487 			return;
6488 		}
6489 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6490 		    SCTP_IPV6_V6ONLY(inp)) {
6491 			/* can't bind v4 on PF_INET sockets */
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		}
6496 	}
6497 #endif
6498 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6499 		if (p == NULL) {
6500 			/* Can't get proc for Net/Open BSD */
6501 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6502 			*error = EINVAL;
6503 			return;
6504 		}
6505 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6506 		return;
6507 	}
6508 	/*
6509 	 * No locks required here since bind and mgmt_ep_sa all do their own
6510 	 * locking. If we do something for the FIX: below we may need to
6511 	 * lock in that case.
6512 	 */
6513 	if (assoc_id == 0) {
6514 		/* add the address */
6515 		struct sctp_inpcb *lep;
6516 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6517 
6518 		/* validate the incoming port */
6519 		if ((lsin->sin_port != 0) &&
6520 		    (lsin->sin_port != inp->sctp_lport)) {
6521 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6522 			*error = EINVAL;
6523 			return;
6524 		} else {
6525 			/* user specified 0 port, set it to existing port */
6526 			lsin->sin_port = inp->sctp_lport;
6527 		}
6528 
6529 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6530 		if (lep != NULL) {
6531 			/*
6532 			 * We must decrement the refcount since we have the
6533 			 * ep already and are binding. No remove going on
6534 			 * here.
6535 			 */
6536 			SCTP_INP_DECR_REF(lep);
6537 		}
6538 		if (lep == inp) {
6539 			/* already bound to it.. ok */
6540 			return;
6541 		} else if (lep == NULL) {
6542 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6543 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6544 			    SCTP_ADD_IP_ADDRESS,
6545 			    vrf_id, NULL);
6546 		} else {
6547 			*error = EADDRINUSE;
6548 		}
6549 		if (*error)
6550 			return;
6551 	} else {
6552 		/*
6553 		 * FIX: decide whether we allow assoc based bindx
6554 		 */
6555 	}
6556 }
6557 
6558 /*
6559  * sctp_bindx(DELETE) for one address.
6560  * assumes all arguments are valid/checked by caller.
6561  */
6562 void
6563 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6564     struct sockaddr *sa, sctp_assoc_t assoc_id,
6565     uint32_t vrf_id, int *error)
6566 {
6567 	struct sockaddr *addr_touse;
6568 
6569 #if defined(INET) && defined(INET6)
6570 	struct sockaddr_in sin;
6571 
6572 #endif
6573 
6574 	/* see if we're bound all already! */
6575 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6576 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 		*error = EINVAL;
6578 		return;
6579 	}
6580 	addr_touse = sa;
6581 #ifdef INET6
6582 	if (sa->sa_family == AF_INET6) {
6583 #ifdef INET
6584 		struct sockaddr_in6 *sin6;
6585 
6586 #endif
6587 
6588 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6594 			/* can only bind v6 on PF_INET6 sockets */
6595 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6596 			*error = EINVAL;
6597 			return;
6598 		}
6599 #ifdef INET
6600 		sin6 = (struct sockaddr_in6 *)addr_touse;
6601 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6602 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6603 			    SCTP_IPV6_V6ONLY(inp)) {
6604 				/* can't bind mapped-v4 on PF_INET sockets */
6605 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6606 				*error = EINVAL;
6607 				return;
6608 			}
6609 			in6_sin6_2_sin(&sin, sin6);
6610 			addr_touse = (struct sockaddr *)&sin;
6611 		}
6612 #endif
6613 	}
6614 #endif
6615 #ifdef INET
6616 	if (sa->sa_family == AF_INET) {
6617 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6618 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6619 			*error = EINVAL;
6620 			return;
6621 		}
6622 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6623 		    SCTP_IPV6_V6ONLY(inp)) {
6624 			/* can't bind v4 on PF_INET sockets */
6625 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626 			*error = EINVAL;
6627 			return;
6628 		}
6629 	}
6630 #endif
6631 	/*
6632 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6633 	 * below is ever changed we may need to lock before calling
6634 	 * association level binding.
6635 	 */
6636 	if (assoc_id == 0) {
6637 		/* delete the address */
6638 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6639 		    SCTP_DEL_IP_ADDRESS,
6640 		    vrf_id, NULL);
6641 	} else {
6642 		/*
6643 		 * FIX: decide whether we allow assoc based bindx
6644 		 */
6645 	}
6646 }
6647 
6648 /*
6649  * returns the valid local address count for an assoc, taking into account
6650  * all scoping rules
6651  */
6652 int
6653 sctp_local_addr_count(struct sctp_tcb *stcb)
6654 {
6655 	int loopback_scope;
6656 
6657 #if defined(INET)
6658 	int ipv4_local_scope, ipv4_addr_legal;
6659 
6660 #endif
6661 #if defined (INET6)
6662 	int local_scope, site_scope, ipv6_addr_legal;
6663 
6664 #endif
6665 	struct sctp_vrf *vrf;
6666 	struct sctp_ifn *sctp_ifn;
6667 	struct sctp_ifa *sctp_ifa;
6668 	int count = 0;
6669 
6670 	/* Turn on all the appropriate scopes */
6671 	loopback_scope = stcb->asoc.scope.loopback_scope;
6672 #if defined(INET)
6673 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6674 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6675 #endif
6676 #if defined(INET6)
6677 	local_scope = stcb->asoc.scope.local_scope;
6678 	site_scope = stcb->asoc.scope.site_scope;
6679 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6680 #endif
6681 	SCTP_IPI_ADDR_RLOCK();
6682 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6683 	if (vrf == NULL) {
6684 		/* no vrf, no addresses */
6685 		SCTP_IPI_ADDR_RUNLOCK();
6686 		return (0);
6687 	}
6688 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6689 		/*
6690 		 * bound all case: go through all ifns on the vrf
6691 		 */
6692 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6693 			if ((loopback_scope == 0) &&
6694 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6695 				continue;
6696 			}
6697 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6698 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6699 					continue;
6700 				switch (sctp_ifa->address.sa.sa_family) {
6701 #ifdef INET
6702 				case AF_INET:
6703 					if (ipv4_addr_legal) {
6704 						struct sockaddr_in *sin;
6705 
6706 						sin = &sctp_ifa->address.sin;
6707 						if (sin->sin_addr.s_addr == 0) {
6708 							/*
6709 							 * skip unspecified
6710 							 * addrs
6711 							 */
6712 							continue;
6713 						}
6714 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6715 						    &sin->sin_addr) != 0) {
6716 							continue;
6717 						}
6718 						if ((ipv4_local_scope == 0) &&
6719 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6720 							continue;
6721 						}
6722 						/* count this one */
6723 						count++;
6724 					} else {
6725 						continue;
6726 					}
6727 					break;
6728 #endif
6729 #ifdef INET6
6730 				case AF_INET6:
6731 					if (ipv6_addr_legal) {
6732 						struct sockaddr_in6 *sin6;
6733 
6734 						sin6 = &sctp_ifa->address.sin6;
6735 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6736 							continue;
6737 						}
6738 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6739 						    &sin6->sin6_addr) != 0) {
6740 							continue;
6741 						}
6742 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6743 							if (local_scope == 0)
6744 								continue;
6745 							if (sin6->sin6_scope_id == 0) {
6746 								if (sa6_recoverscope(sin6) != 0)
6747 									/*
6748 									 *
6749 									 * bad
6750 									 *
6751 									 * li
6752 									 * nk
6753 									 *
6754 									 * loc
6755 									 * al
6756 									 *
6757 									 * add
6758 									 * re
6759 									 * ss
6760 									 * */
6761 									continue;
6762 							}
6763 						}
6764 						if ((site_scope == 0) &&
6765 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6766 							continue;
6767 						}
6768 						/* count this one */
6769 						count++;
6770 					}
6771 					break;
6772 #endif
6773 				default:
6774 					/* TSNH */
6775 					break;
6776 				}
6777 			}
6778 		}
6779 	} else {
6780 		/*
6781 		 * subset bound case
6782 		 */
6783 		struct sctp_laddr *laddr;
6784 
6785 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6786 		    sctp_nxt_addr) {
6787 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6788 				continue;
6789 			}
6790 			/* count this one */
6791 			count++;
6792 		}
6793 	}
6794 	SCTP_IPI_ADDR_RUNLOCK();
6795 	return (count);
6796 }
6797 
6798 #if defined(SCTP_LOCAL_TRACE_BUF)
6799 
6800 void
6801 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6802 {
6803 	uint32_t saveindex, newindex;
6804 
6805 	do {
6806 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6807 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6808 			newindex = 1;
6809 		} else {
6810 			newindex = saveindex + 1;
6811 		}
6812 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6813 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6814 		saveindex = 0;
6815 	}
6816 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6817 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6818 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6819 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6820 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6821 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6822 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6823 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6824 }
6825 
6826 #endif
6827 static void
6828 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6829     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6830 {
6831 	struct ip *iph;
6832 
6833 #ifdef INET6
6834 	struct ip6_hdr *ip6;
6835 
6836 #endif
6837 	struct mbuf *sp, *last;
6838 	struct udphdr *uhdr;
6839 	uint16_t port;
6840 
6841 	if ((m->m_flags & M_PKTHDR) == 0) {
6842 		/* Can't handle one that is not a pkt hdr */
6843 		goto out;
6844 	}
6845 	/* Pull the src port */
6846 	iph = mtod(m, struct ip *);
6847 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6848 	port = uhdr->uh_sport;
6849 	/*
6850 	 * Split out the mbuf chain. Leave the IP header in m, place the
6851 	 * rest in the sp.
6852 	 */
6853 	sp = m_split(m, off, M_NOWAIT);
6854 	if (sp == NULL) {
6855 		/* Gak, drop packet, we can't do a split */
6856 		goto out;
6857 	}
6858 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6859 		/* Gak, packet can't have an SCTP header in it - too small */
6860 		m_freem(sp);
6861 		goto out;
6862 	}
6863 	/* Now pull up the UDP header and SCTP header together */
6864 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6865 	if (sp == NULL) {
6866 		/* Gak pullup failed */
6867 		goto out;
6868 	}
6869 	/* Trim out the UDP header */
6870 	m_adj(sp, sizeof(struct udphdr));
6871 
6872 	/* Now reconstruct the mbuf chain */
6873 	for (last = m; last->m_next; last = last->m_next);
6874 	last->m_next = sp;
6875 	m->m_pkthdr.len += sp->m_pkthdr.len;
6876 	/*
6877 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6878 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6879 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6880 	 * SCTP checksum. Therefore, clear the bit.
6881 	 */
6882 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6883 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6884 	    m->m_pkthdr.len,
6885 	    if_name(m->m_pkthdr.rcvif),
6886 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6887 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6888 	iph = mtod(m, struct ip *);
6889 	switch (iph->ip_v) {
6890 #ifdef INET
6891 	case IPVERSION:
6892 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6893 		sctp_input_with_port(m, off, port);
6894 		break;
6895 #endif
6896 #ifdef INET6
6897 	case IPV6_VERSION >> 4:
6898 		ip6 = mtod(m, struct ip6_hdr *);
6899 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6900 		sctp6_input_with_port(&m, &off, port);
6901 		break;
6902 #endif
6903 	default:
6904 		goto out;
6905 		break;
6906 	}
6907 	return;
6908 out:
6909 	m_freem(m);
6910 }
6911 
6912 #ifdef INET
6913 static void
6914 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6915 {
6916 	struct ip *outer_ip, *inner_ip;
6917 	struct sctphdr *sh;
6918 	struct icmp *icmp;
6919 	struct udphdr *udp;
6920 	struct sctp_inpcb *inp;
6921 	struct sctp_tcb *stcb;
6922 	struct sctp_nets *net;
6923 	struct sctp_init_chunk *ch;
6924 	struct sockaddr_in src, dst;
6925 	uint8_t type, code;
6926 
6927 	inner_ip = (struct ip *)vip;
6928 	icmp = (struct icmp *)((caddr_t)inner_ip -
6929 	    (sizeof(struct icmp) - sizeof(struct ip)));
6930 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6931 	if (ntohs(outer_ip->ip_len) <
6932 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6933 		return;
6934 	}
6935 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6936 	sh = (struct sctphdr *)(udp + 1);
6937 	memset(&src, 0, sizeof(struct sockaddr_in));
6938 	src.sin_family = AF_INET;
6939 	src.sin_len = sizeof(struct sockaddr_in);
6940 	src.sin_port = sh->src_port;
6941 	src.sin_addr = inner_ip->ip_src;
6942 	memset(&dst, 0, sizeof(struct sockaddr_in));
6943 	dst.sin_family = AF_INET;
6944 	dst.sin_len = sizeof(struct sockaddr_in);
6945 	dst.sin_port = sh->dest_port;
6946 	dst.sin_addr = inner_ip->ip_dst;
6947 	/*
6948 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6949 	 * holds our local endpoint address. Thus we reverse the dst and the
6950 	 * src in the lookup.
6951 	 */
6952 	inp = NULL;
6953 	net = NULL;
6954 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6955 	    (struct sockaddr *)&src,
6956 	    &inp, &net, 1,
6957 	    SCTP_DEFAULT_VRFID);
6958 	if ((stcb != NULL) &&
6959 	    (net != NULL) &&
6960 	    (inp != NULL)) {
6961 		/* Check the UDP port numbers */
6962 		if ((udp->uh_dport != net->port) ||
6963 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6964 			SCTP_TCB_UNLOCK(stcb);
6965 			return;
6966 		}
6967 		/* Check the verification tag */
6968 		if (ntohl(sh->v_tag) != 0) {
6969 			/*
6970 			 * This must be the verification tag used for
6971 			 * sending out packets. We don't consider packets
6972 			 * reflecting the verification tag.
6973 			 */
6974 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6975 				SCTP_TCB_UNLOCK(stcb);
6976 				return;
6977 			}
6978 		} else {
6979 			if (ntohs(outer_ip->ip_len) >=
6980 			    sizeof(struct ip) +
6981 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6982 				/*
6983 				 * In this case we can check if we got an
6984 				 * INIT chunk and if the initiate tag
6985 				 * matches.
6986 				 */
6987 				ch = (struct sctp_init_chunk *)(sh + 1);
6988 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6989 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6990 					SCTP_TCB_UNLOCK(stcb);
6991 					return;
6992 				}
6993 			} else {
6994 				SCTP_TCB_UNLOCK(stcb);
6995 				return;
6996 			}
6997 		}
6998 		type = icmp->icmp_type;
6999 		code = icmp->icmp_code;
7000 		if ((type == ICMP_UNREACH) &&
7001 		    (code == ICMP_UNREACH_PORT)) {
7002 			code = ICMP_UNREACH_PROTOCOL;
7003 		}
7004 		sctp_notify(inp, stcb, net, type, code,
7005 		    ntohs(inner_ip->ip_len),
7006 		    ntohs(icmp->icmp_nextmtu));
7007 	} else {
7008 		if ((stcb == NULL) && (inp != NULL)) {
7009 			/* reduce ref-count */
7010 			SCTP_INP_WLOCK(inp);
7011 			SCTP_INP_DECR_REF(inp);
7012 			SCTP_INP_WUNLOCK(inp);
7013 		}
7014 		if (stcb) {
7015 			SCTP_TCB_UNLOCK(stcb);
7016 		}
7017 	}
7018 	return;
7019 }
7020 
7021 #endif
7022 
7023 #ifdef INET6
7024 static void
7025 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7026 {
7027 	struct ip6ctlparam *ip6cp;
7028 	struct sctp_inpcb *inp;
7029 	struct sctp_tcb *stcb;
7030 	struct sctp_nets *net;
7031 	struct sctphdr sh;
7032 	struct udphdr udp;
7033 	struct sockaddr_in6 src, dst;
7034 	uint8_t type, code;
7035 
7036 	ip6cp = (struct ip6ctlparam *)d;
7037 	/*
7038 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7039 	 */
7040 	if (ip6cp->ip6c_m == NULL) {
7041 		return;
7042 	}
7043 	/*
7044 	 * Check if we can safely examine the ports and the verification tag
7045 	 * of the SCTP common header.
7046 	 */
7047 	if (ip6cp->ip6c_m->m_pkthdr.len <
7048 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7049 		return;
7050 	}
7051 	/* Copy out the UDP header. */
7052 	memset(&udp, 0, sizeof(struct udphdr));
7053 	m_copydata(ip6cp->ip6c_m,
7054 	    ip6cp->ip6c_off,
7055 	    sizeof(struct udphdr),
7056 	    (caddr_t)&udp);
7057 	/* Copy out the port numbers and the verification tag. */
7058 	memset(&sh, 0, sizeof(struct sctphdr));
7059 	m_copydata(ip6cp->ip6c_m,
7060 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7061 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7062 	    (caddr_t)&sh);
7063 	memset(&src, 0, sizeof(struct sockaddr_in6));
7064 	src.sin6_family = AF_INET6;
7065 	src.sin6_len = sizeof(struct sockaddr_in6);
7066 	src.sin6_port = sh.src_port;
7067 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7068 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7069 		return;
7070 	}
7071 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7072 	dst.sin6_family = AF_INET6;
7073 	dst.sin6_len = sizeof(struct sockaddr_in6);
7074 	dst.sin6_port = sh.dest_port;
7075 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7076 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7077 		return;
7078 	}
7079 	inp = NULL;
7080 	net = NULL;
7081 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7082 	    (struct sockaddr *)&src,
7083 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7084 	if ((stcb != NULL) &&
7085 	    (net != NULL) &&
7086 	    (inp != NULL)) {
7087 		/* Check the UDP port numbers */
7088 		if ((udp.uh_dport != net->port) ||
7089 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7090 			SCTP_TCB_UNLOCK(stcb);
7091 			return;
7092 		}
7093 		/* Check the verification tag */
7094 		if (ntohl(sh.v_tag) != 0) {
7095 			/*
7096 			 * This must be the verification tag used for
7097 			 * sending out packets. We don't consider packets
7098 			 * reflecting the verification tag.
7099 			 */
7100 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7101 				SCTP_TCB_UNLOCK(stcb);
7102 				return;
7103 			}
7104 		} else {
7105 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7106 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7107 			    sizeof(struct sctphdr) +
7108 			    sizeof(struct sctp_chunkhdr) +
7109 			    offsetof(struct sctp_init, a_rwnd)) {
7110 				/*
7111 				 * In this case we can check if we got an
7112 				 * INIT chunk and if the initiate tag
7113 				 * matches.
7114 				 */
7115 				uint32_t initiate_tag;
7116 				uint8_t chunk_type;
7117 
7118 				m_copydata(ip6cp->ip6c_m,
7119 				    ip6cp->ip6c_off +
7120 				    sizeof(struct udphdr) +
7121 				    sizeof(struct sctphdr),
7122 				    sizeof(uint8_t),
7123 				    (caddr_t)&chunk_type);
7124 				m_copydata(ip6cp->ip6c_m,
7125 				    ip6cp->ip6c_off +
7126 				    sizeof(struct udphdr) +
7127 				    sizeof(struct sctphdr) +
7128 				    sizeof(struct sctp_chunkhdr),
7129 				    sizeof(uint32_t),
7130 				    (caddr_t)&initiate_tag);
7131 				if ((chunk_type != SCTP_INITIATION) ||
7132 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7133 					SCTP_TCB_UNLOCK(stcb);
7134 					return;
7135 				}
7136 			} else {
7137 				SCTP_TCB_UNLOCK(stcb);
7138 				return;
7139 			}
7140 		}
7141 		type = ip6cp->ip6c_icmp6->icmp6_type;
7142 		code = ip6cp->ip6c_icmp6->icmp6_code;
7143 		if ((type == ICMP6_DST_UNREACH) &&
7144 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7145 			type = ICMP6_PARAM_PROB;
7146 			code = ICMP6_PARAMPROB_NEXTHEADER;
7147 		}
7148 		sctp6_notify(inp, stcb, net, type, code,
7149 		    (uint16_t) ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7150 	} else {
7151 		if ((stcb == NULL) && (inp != NULL)) {
7152 			/* reduce inp's ref-count */
7153 			SCTP_INP_WLOCK(inp);
7154 			SCTP_INP_DECR_REF(inp);
7155 			SCTP_INP_WUNLOCK(inp);
7156 		}
7157 		if (stcb) {
7158 			SCTP_TCB_UNLOCK(stcb);
7159 		}
7160 	}
7161 }
7162 
7163 #endif
7164 
7165 void
7166 sctp_over_udp_stop(void)
7167 {
7168 	/*
7169 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7170 	 * for writting!
7171 	 */
7172 #ifdef INET
7173 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7174 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7175 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7176 	}
7177 #endif
7178 #ifdef INET6
7179 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7180 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7181 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7182 	}
7183 #endif
7184 }
7185 
7186 int
7187 sctp_over_udp_start(void)
7188 {
7189 	uint16_t port;
7190 	int ret;
7191 
7192 #ifdef INET
7193 	struct sockaddr_in sin;
7194 
7195 #endif
7196 #ifdef INET6
7197 	struct sockaddr_in6 sin6;
7198 
7199 #endif
7200 	/*
7201 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7202 	 * for writting!
7203 	 */
7204 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7205 	if (ntohs(port) == 0) {
7206 		/* Must have a port set */
7207 		return (EINVAL);
7208 	}
7209 #ifdef INET
7210 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7211 		/* Already running -- must stop first */
7212 		return (EALREADY);
7213 	}
7214 #endif
7215 #ifdef INET6
7216 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7217 		/* Already running -- must stop first */
7218 		return (EALREADY);
7219 	}
7220 #endif
7221 #ifdef INET
7222 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7223 	    SOCK_DGRAM, IPPROTO_UDP,
7224 	    curthread->td_ucred, curthread))) {
7225 		sctp_over_udp_stop();
7226 		return (ret);
7227 	}
7228 	/* Call the special UDP hook. */
7229 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7230 	    sctp_recv_udp_tunneled_packet,
7231 	    sctp_recv_icmp_tunneled_packet,
7232 	    NULL))) {
7233 		sctp_over_udp_stop();
7234 		return (ret);
7235 	}
7236 	/* Ok, we have a socket, bind it to the port. */
7237 	memset(&sin, 0, sizeof(struct sockaddr_in));
7238 	sin.sin_len = sizeof(struct sockaddr_in);
7239 	sin.sin_family = AF_INET;
7240 	sin.sin_port = htons(port);
7241 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7242 	    (struct sockaddr *)&sin, curthread))) {
7243 		sctp_over_udp_stop();
7244 		return (ret);
7245 	}
7246 #endif
7247 #ifdef INET6
7248 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7249 	    SOCK_DGRAM, IPPROTO_UDP,
7250 	    curthread->td_ucred, curthread))) {
7251 		sctp_over_udp_stop();
7252 		return (ret);
7253 	}
7254 	/* Call the special UDP hook. */
7255 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7256 	    sctp_recv_udp_tunneled_packet,
7257 	    sctp_recv_icmp6_tunneled_packet,
7258 	    NULL))) {
7259 		sctp_over_udp_stop();
7260 		return (ret);
7261 	}
7262 	/* Ok, we have a socket, bind it to the port. */
7263 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7264 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7265 	sin6.sin6_family = AF_INET6;
7266 	sin6.sin6_port = htons(port);
7267 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7268 	    (struct sockaddr *)&sin6, curthread))) {
7269 		sctp_over_udp_stop();
7270 		return (ret);
7271 	}
7272 #endif
7273 	return (0);
7274 }
7275