xref: /freebsd/sys/netinet/sctputil.c (revision cbd30a72ca196976c1c700400ecd424baa1b9c16)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 #ifdef INET6
56 #include <netinet/icmp6.h>
57 #endif
58 
59 
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 
64 extern const struct sctp_cc_functions sctp_cc_functions[];
65 extern const struct sctp_ss_functions sctp_ss_functions[];
66 
67 void
68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->rtt / 1000;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 }
128 
129 void
130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
131 {
132 	struct sctp_cwnd_log sctp_clog;
133 
134 	sctp_clog.x.strlog.stcb = stcb;
135 	sctp_clog.x.strlog.n_tsn = tsn;
136 	sctp_clog.x.strlog.n_sseq = sseq;
137 	sctp_clog.x.strlog.e_tsn = 0;
138 	sctp_clog.x.strlog.e_sseq = 0;
139 	sctp_clog.x.strlog.strm = stream;
140 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
141 	    SCTP_LOG_EVENT_STRM,
142 	    from,
143 	    sctp_clog.x.misc.log1,
144 	    sctp_clog.x.misc.log2,
145 	    sctp_clog.x.misc.log3,
146 	    sctp_clog.x.misc.log4);
147 }
148 
149 void
150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
151 {
152 	struct sctp_cwnd_log sctp_clog;
153 
154 	sctp_clog.x.nagle.stcb = (void *)stcb;
155 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
156 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
157 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
158 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
159 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
160 	    SCTP_LOG_EVENT_NAGLE,
161 	    action,
162 	    sctp_clog.x.misc.log1,
163 	    sctp_clog.x.misc.log2,
164 	    sctp_clog.x.misc.log3,
165 	    sctp_clog.x.misc.log4);
166 }
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 }
222 
223 #ifdef SCTP_MBUF_LOGGING
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 void
250 sctp_log_mbc(struct mbuf *m, int from)
251 {
252 	struct mbuf *mat;
253 
254 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
255 		sctp_log_mb(mat, from);
256 	}
257 }
258 #endif
259 
260 void
261 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
262 {
263 	struct sctp_cwnd_log sctp_clog;
264 
265 	if (control == NULL) {
266 		SCTP_PRINTF("Gak log of NULL?\n");
267 		return;
268 	}
269 	sctp_clog.x.strlog.stcb = control->stcb;
270 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
271 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
272 	sctp_clog.x.strlog.strm = control->sinfo_stream;
273 	if (poschk != NULL) {
274 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
275 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
276 	} else {
277 		sctp_clog.x.strlog.e_tsn = 0;
278 		sctp_clog.x.strlog.e_sseq = 0;
279 	}
280 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
281 	    SCTP_LOG_EVENT_STRM,
282 	    from,
283 	    sctp_clog.x.misc.log1,
284 	    sctp_clog.x.misc.log2,
285 	    sctp_clog.x.misc.log3,
286 	    sctp_clog.x.misc.log4);
287 }
288 
289 void
290 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
291 {
292 	struct sctp_cwnd_log sctp_clog;
293 
294 	sctp_clog.x.cwnd.net = net;
295 	if (stcb->asoc.send_queue_cnt > 255)
296 		sctp_clog.x.cwnd.cnt_in_send = 255;
297 	else
298 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
299 	if (stcb->asoc.stream_queue_cnt > 255)
300 		sctp_clog.x.cwnd.cnt_in_str = 255;
301 	else
302 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
303 
304 	if (net) {
305 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
306 		sctp_clog.x.cwnd.inflight = net->flight_size;
307 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
308 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
309 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
310 	}
311 	if (SCTP_CWNDLOG_PRESEND == from) {
312 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
313 	}
314 	sctp_clog.x.cwnd.cwnd_augment = augment;
315 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
316 	    SCTP_LOG_EVENT_CWND,
317 	    from,
318 	    sctp_clog.x.misc.log1,
319 	    sctp_clog.x.misc.log2,
320 	    sctp_clog.x.misc.log3,
321 	    sctp_clog.x.misc.log4);
322 }
323 
324 void
325 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
326 {
327 	struct sctp_cwnd_log sctp_clog;
328 
329 	memset(&sctp_clog, 0, sizeof(sctp_clog));
330 	if (inp) {
331 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
332 
333 	} else {
334 		sctp_clog.x.lock.sock = (void *)NULL;
335 	}
336 	sctp_clog.x.lock.inp = (void *)inp;
337 	if (stcb) {
338 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
339 	} else {
340 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	if (inp) {
343 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
344 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
345 	} else {
346 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
350 	if (inp && (inp->sctp_socket)) {
351 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
352 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
353 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
354 	} else {
355 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
356 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
357 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
358 	}
359 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
360 	    SCTP_LOG_LOCK_EVENT,
361 	    from,
362 	    sctp_clog.x.misc.log1,
363 	    sctp_clog.x.misc.log2,
364 	    sctp_clog.x.misc.log3,
365 	    sctp_clog.x.misc.log4);
366 }
367 
368 void
369 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
370 {
371 	struct sctp_cwnd_log sctp_clog;
372 
373 	memset(&sctp_clog, 0, sizeof(sctp_clog));
374 	sctp_clog.x.cwnd.net = net;
375 	sctp_clog.x.cwnd.cwnd_new_value = error;
376 	sctp_clog.x.cwnd.inflight = net->flight_size;
377 	sctp_clog.x.cwnd.cwnd_augment = burst;
378 	if (stcb->asoc.send_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_send = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
382 	if (stcb->asoc.stream_queue_cnt > 255)
383 		sctp_clog.x.cwnd.cnt_in_str = 255;
384 	else
385 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
386 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
387 	    SCTP_LOG_EVENT_MAXBURST,
388 	    from,
389 	    sctp_clog.x.misc.log1,
390 	    sctp_clog.x.misc.log2,
391 	    sctp_clog.x.misc.log3,
392 	    sctp_clog.x.misc.log4);
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 #ifdef SCTP_MBCNT_LOGGING
432 static void
433 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
434 {
435 	struct sctp_cwnd_log sctp_clog;
436 
437 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
438 	sctp_clog.x.mbcnt.size_change = book;
439 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
440 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
441 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
442 	    SCTP_LOG_EVENT_MBCNT,
443 	    from,
444 	    sctp_clog.x.misc.log1,
445 	    sctp_clog.x.misc.log2,
446 	    sctp_clog.x.misc.log3,
447 	    sctp_clog.x.misc.log4);
448 }
449 #endif
450 
451 void
452 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
453 {
454 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
455 	    SCTP_LOG_MISC_EVENT,
456 	    from,
457 	    a, b, c, d);
458 }
459 
460 void
461 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
462 {
463 	struct sctp_cwnd_log sctp_clog;
464 
465 	sctp_clog.x.wake.stcb = (void *)stcb;
466 	sctp_clog.x.wake.wake_cnt = wake_cnt;
467 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
468 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
469 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
470 
471 	if (stcb->asoc.stream_queue_cnt < 0xff)
472 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
473 	else
474 		sctp_clog.x.wake.stream_qcnt = 0xff;
475 
476 	if (stcb->asoc.chunks_on_out_queue < 0xff)
477 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
478 	else
479 		sctp_clog.x.wake.chunks_on_oque = 0xff;
480 
481 	sctp_clog.x.wake.sctpflags = 0;
482 	/* set in the defered mode stuff */
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
484 		sctp_clog.x.wake.sctpflags |= 1;
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
486 		sctp_clog.x.wake.sctpflags |= 2;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
488 		sctp_clog.x.wake.sctpflags |= 4;
489 	/* what about the sb */
490 	if (stcb->sctp_socket) {
491 		struct socket *so = stcb->sctp_socket;
492 
493 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
494 	} else {
495 		sctp_clog.x.wake.sbflags = 0xff;
496 	}
497 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
498 	    SCTP_LOG_EVENT_WAKE,
499 	    from,
500 	    sctp_clog.x.misc.log1,
501 	    sctp_clog.x.misc.log2,
502 	    sctp_clog.x.misc.log3,
503 	    sctp_clog.x.misc.log4);
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 }
526 
527 int
528 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
529 {
530 	/* May need to fix this if ktrdump does not work */
531 	return (0);
532 }
533 
534 #ifdef SCTP_AUDITING_ENABLED
535 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
536 static int sctp_audit_indx = 0;
537 
538 static
539 void
540 sctp_print_audit_report(void)
541 {
542 	int i;
543 	int cnt;
544 
545 	cnt = 0;
546 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
547 		if ((sctp_audit_data[i][0] == 0xe0) &&
548 		    (sctp_audit_data[i][1] == 0x01)) {
549 			cnt = 0;
550 			SCTP_PRINTF("\n");
551 		} else if (sctp_audit_data[i][0] == 0xf0) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
555 		    (sctp_audit_data[i][1] == 0x01)) {
556 			SCTP_PRINTF("\n");
557 			cnt = 0;
558 		}
559 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
560 		    (uint32_t)sctp_audit_data[i][1]);
561 		cnt++;
562 		if ((cnt % 14) == 0)
563 			SCTP_PRINTF("\n");
564 	}
565 	for (i = 0; i < sctp_audit_indx; i++) {
566 		if ((sctp_audit_data[i][0] == 0xe0) &&
567 		    (sctp_audit_data[i][1] == 0x01)) {
568 			cnt = 0;
569 			SCTP_PRINTF("\n");
570 		} else if (sctp_audit_data[i][0] == 0xf0) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
574 		    (sctp_audit_data[i][1] == 0x01)) {
575 			SCTP_PRINTF("\n");
576 			cnt = 0;
577 		}
578 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
579 		    (uint32_t)sctp_audit_data[i][1]);
580 		cnt++;
581 		if ((cnt % 14) == 0)
582 			SCTP_PRINTF("\n");
583 	}
584 	SCTP_PRINTF("\n");
585 }
586 
587 void
588 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
589     struct sctp_nets *net)
590 {
591 	int resend_cnt, tot_out, rep, tot_book_cnt;
592 	struct sctp_nets *lnet;
593 	struct sctp_tmit_chunk *chk;
594 
595 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
596 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
597 	sctp_audit_indx++;
598 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
599 		sctp_audit_indx = 0;
600 	}
601 	if (inp == NULL) {
602 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
603 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
604 		sctp_audit_indx++;
605 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
606 			sctp_audit_indx = 0;
607 		}
608 		return;
609 	}
610 	if (stcb == NULL) {
611 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
612 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
613 		sctp_audit_indx++;
614 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
615 			sctp_audit_indx = 0;
616 		}
617 		return;
618 	}
619 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
620 	sctp_audit_data[sctp_audit_indx][1] =
621 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
622 	sctp_audit_indx++;
623 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
624 		sctp_audit_indx = 0;
625 	}
626 	rep = 0;
627 	tot_book_cnt = 0;
628 	resend_cnt = tot_out = 0;
629 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
630 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
631 			resend_cnt++;
632 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
633 			tot_out += chk->book_size;
634 			tot_book_cnt++;
635 		}
636 	}
637 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
638 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
639 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
640 		sctp_audit_indx++;
641 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
642 			sctp_audit_indx = 0;
643 		}
644 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
645 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
646 		rep = 1;
647 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
648 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
649 		sctp_audit_data[sctp_audit_indx][1] =
650 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
651 		sctp_audit_indx++;
652 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
653 			sctp_audit_indx = 0;
654 		}
655 	}
656 	if (tot_out != stcb->asoc.total_flight) {
657 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
658 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
659 		sctp_audit_indx++;
660 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
661 			sctp_audit_indx = 0;
662 		}
663 		rep = 1;
664 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
665 		    (int)stcb->asoc.total_flight);
666 		stcb->asoc.total_flight = tot_out;
667 	}
668 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
669 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
670 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
671 		sctp_audit_indx++;
672 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
673 			sctp_audit_indx = 0;
674 		}
675 		rep = 1;
676 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
677 
678 		stcb->asoc.total_flight_count = tot_book_cnt;
679 	}
680 	tot_out = 0;
681 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
682 		tot_out += lnet->flight_size;
683 	}
684 	if (tot_out != stcb->asoc.total_flight) {
685 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
686 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
687 		sctp_audit_indx++;
688 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
689 			sctp_audit_indx = 0;
690 		}
691 		rep = 1;
692 		SCTP_PRINTF("real flight:%d net total was %d\n",
693 		    stcb->asoc.total_flight, tot_out);
694 		/* now corrective action */
695 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
696 
697 			tot_out = 0;
698 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
699 				if ((chk->whoTo == lnet) &&
700 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
701 					tot_out += chk->book_size;
702 				}
703 			}
704 			if (lnet->flight_size != tot_out) {
705 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
706 				    (void *)lnet, lnet->flight_size,
707 				    tot_out);
708 				lnet->flight_size = tot_out;
709 			}
710 		}
711 	}
712 	if (rep) {
713 		sctp_print_audit_report();
714 	}
715 }
716 
717 void
718 sctp_audit_log(uint8_t ev, uint8_t fd)
719 {
720 
721 	sctp_audit_data[sctp_audit_indx][0] = ev;
722 	sctp_audit_data[sctp_audit_indx][1] = fd;
723 	sctp_audit_indx++;
724 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
725 		sctp_audit_indx = 0;
726 	}
727 }
728 
729 #endif
730 
731 /*
732  * sctp_stop_timers_for_shutdown() should be called
733  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
734  * state to make sure that all timers are stopped.
735  */
736 void
737 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
738 {
739 	struct sctp_association *asoc;
740 	struct sctp_nets *net;
741 
742 	asoc = &stcb->asoc;
743 
744 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
749 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
750 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
751 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
752 	}
753 }
754 
755 /*
756  * a list of sizes based on typical mtu's, used only if next hop size not
757  * returned.
758  */
759 static uint32_t sctp_mtu_sizes[] = {
760 	68,
761 	296,
762 	508,
763 	512,
764 	544,
765 	576,
766 	1006,
767 	1492,
768 	1500,
769 	1536,
770 	2002,
771 	2048,
772 	4352,
773 	4464,
774 	8166,
775 	17914,
776 	32000,
777 	65535
778 };
779 
780 /*
781  * Return the largest MTU smaller than val. If there is no
782  * entry, just return val.
783  */
784 uint32_t
785 sctp_get_prev_mtu(uint32_t val)
786 {
787 	uint32_t i;
788 
789 	if (val <= sctp_mtu_sizes[0]) {
790 		return (val);
791 	}
792 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
793 		if (val <= sctp_mtu_sizes[i]) {
794 			break;
795 		}
796 	}
797 	return (sctp_mtu_sizes[i - 1]);
798 }
799 
800 /*
801  * Return the smallest MTU larger than val. If there is no
802  * entry, just return val.
803  */
804 uint32_t
805 sctp_get_next_mtu(uint32_t val)
806 {
807 	/* select another MTU that is just bigger than this one */
808 	uint32_t i;
809 
810 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
811 		if (val < sctp_mtu_sizes[i]) {
812 			return (sctp_mtu_sizes[i]);
813 		}
814 	}
815 	return (val);
816 }
817 
818 void
819 sctp_fill_random_store(struct sctp_pcb *m)
820 {
821 	/*
822 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
823 	 * our counter. The result becomes our good random numbers and we
824 	 * then setup to give these out. Note that we do no locking to
825 	 * protect this. This is ok, since if competing folks call this we
826 	 * will get more gobbled gook in the random store which is what we
827 	 * want. There is a danger that two guys will use the same random
828 	 * numbers, but thats ok too since that is random as well :->
829 	 */
830 	m->store_at = 0;
831 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
832 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
833 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
834 	m->random_counter++;
835 }
836 
837 uint32_t
838 sctp_select_initial_TSN(struct sctp_pcb *inp)
839 {
840 	/*
841 	 * A true implementation should use random selection process to get
842 	 * the initial stream sequence number, using RFC1750 as a good
843 	 * guideline
844 	 */
845 	uint32_t x, *xp;
846 	uint8_t *p;
847 	int store_at, new_store;
848 
849 	if (inp->initial_sequence_debug != 0) {
850 		uint32_t ret;
851 
852 		ret = inp->initial_sequence_debug;
853 		inp->initial_sequence_debug++;
854 		return (ret);
855 	}
856 retry:
857 	store_at = inp->store_at;
858 	new_store = store_at + sizeof(uint32_t);
859 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
860 		new_store = 0;
861 	}
862 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
863 		goto retry;
864 	}
865 	if (new_store == 0) {
866 		/* Refill the random store */
867 		sctp_fill_random_store(inp);
868 	}
869 	p = &inp->random_store[store_at];
870 	xp = (uint32_t *)p;
871 	x = *xp;
872 	return (x);
873 }
874 
875 uint32_t
876 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
877 {
878 	uint32_t x;
879 	struct timeval now;
880 
881 	if (check) {
882 		(void)SCTP_GETTIME_TIMEVAL(&now);
883 	}
884 	for (;;) {
885 		x = sctp_select_initial_TSN(&inp->sctp_ep);
886 		if (x == 0) {
887 			/* we never use 0 */
888 			continue;
889 		}
890 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
891 			break;
892 		}
893 	}
894 	return (x);
895 }
896 
897 int32_t
898 sctp_map_assoc_state(int kernel_state)
899 {
900 	int32_t user_state;
901 
902 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
903 		user_state = SCTP_CLOSED;
904 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
905 		user_state = SCTP_SHUTDOWN_PENDING;
906 	} else {
907 		switch (kernel_state & SCTP_STATE_MASK) {
908 		case SCTP_STATE_EMPTY:
909 			user_state = SCTP_CLOSED;
910 			break;
911 		case SCTP_STATE_INUSE:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_COOKIE_WAIT:
915 			user_state = SCTP_COOKIE_WAIT;
916 			break;
917 		case SCTP_STATE_COOKIE_ECHOED:
918 			user_state = SCTP_COOKIE_ECHOED;
919 			break;
920 		case SCTP_STATE_OPEN:
921 			user_state = SCTP_ESTABLISHED;
922 			break;
923 		case SCTP_STATE_SHUTDOWN_SENT:
924 			user_state = SCTP_SHUTDOWN_SENT;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_RECEIVED:
927 			user_state = SCTP_SHUTDOWN_RECEIVED;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
930 			user_state = SCTP_SHUTDOWN_ACK_SENT;
931 			break;
932 		default:
933 			user_state = SCTP_CLOSED;
934 			break;
935 		}
936 	}
937 	return (user_state);
938 }
939 
940 int
941 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
942     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
943 {
944 	struct sctp_association *asoc;
945 
946 	/*
947 	 * Anything set to zero is taken care of by the allocation routine's
948 	 * bzero
949 	 */
950 
951 	/*
952 	 * Up front select what scoping to apply on addresses I tell my peer
953 	 * Not sure what to do with these right now, we will need to come up
954 	 * with a way to set them. We may need to pass them through from the
955 	 * caller in the sctp_aloc_assoc() function.
956 	 */
957 	int i;
958 #if defined(SCTP_DETAILED_STR_STATS)
959 	int j;
960 #endif
961 
962 	asoc = &stcb->asoc;
963 	/* init all variables to a known value. */
964 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
965 	asoc->max_burst = inp->sctp_ep.max_burst;
966 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
967 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
968 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
969 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
970 	asoc->ecn_supported = inp->ecn_supported;
971 	asoc->prsctp_supported = inp->prsctp_supported;
972 	asoc->idata_supported = inp->idata_supported;
973 	asoc->auth_supported = inp->auth_supported;
974 	asoc->asconf_supported = inp->asconf_supported;
975 	asoc->reconfig_supported = inp->reconfig_supported;
976 	asoc->nrsack_supported = inp->nrsack_supported;
977 	asoc->pktdrop_supported = inp->pktdrop_supported;
978 	asoc->idata_supported = inp->idata_supported;
979 	asoc->sctp_cmt_pf = (uint8_t)0;
980 	asoc->sctp_frag_point = inp->sctp_frag_point;
981 	asoc->sctp_features = inp->sctp_features;
982 	asoc->default_dscp = inp->sctp_ep.default_dscp;
983 	asoc->max_cwnd = inp->max_cwnd;
984 #ifdef INET6
985 	if (inp->sctp_ep.default_flowlabel) {
986 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
987 	} else {
988 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
989 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
990 			asoc->default_flowlabel &= 0x000fffff;
991 			asoc->default_flowlabel |= 0x80000000;
992 		} else {
993 			asoc->default_flowlabel = 0;
994 		}
995 	}
996 #endif
997 	asoc->sb_send_resv = 0;
998 	if (override_tag) {
999 		asoc->my_vtag = override_tag;
1000 	} else {
1001 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1002 	}
1003 	/* Get the nonce tags */
1004 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1005 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1006 	asoc->vrf_id = vrf_id;
1007 
1008 #ifdef SCTP_ASOCLOG_OF_TSNS
1009 	asoc->tsn_in_at = 0;
1010 	asoc->tsn_out_at = 0;
1011 	asoc->tsn_in_wrapped = 0;
1012 	asoc->tsn_out_wrapped = 0;
1013 	asoc->cumack_log_at = 0;
1014 	asoc->cumack_log_atsnt = 0;
1015 #endif
1016 #ifdef SCTP_FS_SPEC_LOG
1017 	asoc->fs_index = 0;
1018 #endif
1019 	asoc->refcnt = 0;
1020 	asoc->assoc_up_sent = 0;
1021 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1022 	    sctp_select_initial_TSN(&inp->sctp_ep);
1023 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1024 	/* we are optimisitic here */
1025 	asoc->peer_supports_nat = 0;
1026 	asoc->sent_queue_retran_cnt = 0;
1027 
1028 	/* for CMT */
1029 	asoc->last_net_cmt_send_started = NULL;
1030 
1031 	/* This will need to be adjusted */
1032 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1033 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1034 	asoc->asconf_seq_in = asoc->last_acked_seq;
1035 
1036 	/* here we are different, we hold the next one we expect */
1037 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1038 
1039 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1040 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1041 
1042 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1043 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1044 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1045 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1046 	asoc->free_chunk_cnt = 0;
1047 
1048 	asoc->iam_blocking = 0;
1049 	asoc->context = inp->sctp_context;
1050 	asoc->local_strreset_support = inp->local_strreset_support;
1051 	asoc->def_send = inp->def_send;
1052 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1053 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1054 	asoc->pr_sctp_cnt = 0;
1055 	asoc->total_output_queue_size = 0;
1056 
1057 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1058 		asoc->scope.ipv6_addr_legal = 1;
1059 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1060 			asoc->scope.ipv4_addr_legal = 1;
1061 		} else {
1062 			asoc->scope.ipv4_addr_legal = 0;
1063 		}
1064 	} else {
1065 		asoc->scope.ipv6_addr_legal = 0;
1066 		asoc->scope.ipv4_addr_legal = 1;
1067 	}
1068 
1069 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1070 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1071 
1072 	asoc->smallest_mtu = inp->sctp_frag_point;
1073 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1074 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1075 
1076 	asoc->stream_locked_on = 0;
1077 	asoc->ecn_echo_cnt_onq = 0;
1078 	asoc->stream_locked = 0;
1079 
1080 	asoc->send_sack = 1;
1081 
1082 	LIST_INIT(&asoc->sctp_restricted_addrs);
1083 
1084 	TAILQ_INIT(&asoc->nets);
1085 	TAILQ_INIT(&asoc->pending_reply_queue);
1086 	TAILQ_INIT(&asoc->asconf_ack_sent);
1087 	/* Setup to fill the hb random cache at first HB */
1088 	asoc->hb_random_idx = 4;
1089 
1090 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1091 
1092 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1093 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1094 
1095 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1096 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1097 
1098 	/*
1099 	 * Now the stream parameters, here we allocate space for all streams
1100 	 * that we request by default.
1101 	 */
1102 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1103 	    o_strms;
1104 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1105 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1106 	    SCTP_M_STRMO);
1107 	if (asoc->strmout == NULL) {
1108 		/* big trouble no memory */
1109 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1110 		return (ENOMEM);
1111 	}
1112 	for (i = 0; i < asoc->streamoutcnt; i++) {
1113 		/*
1114 		 * inbound side must be set to 0xffff, also NOTE when we get
1115 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1116 		 * count (streamoutcnt) but first check if we sent to any of
1117 		 * the upper streams that were dropped (if some were). Those
1118 		 * that were dropped must be notified to the upper layer as
1119 		 * failed to send.
1120 		 */
1121 		asoc->strmout[i].next_mid_ordered = 0;
1122 		asoc->strmout[i].next_mid_unordered = 0;
1123 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1124 		asoc->strmout[i].chunks_on_queues = 0;
1125 #if defined(SCTP_DETAILED_STR_STATS)
1126 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1127 			asoc->strmout[i].abandoned_sent[j] = 0;
1128 			asoc->strmout[i].abandoned_unsent[j] = 0;
1129 		}
1130 #else
1131 		asoc->strmout[i].abandoned_sent[0] = 0;
1132 		asoc->strmout[i].abandoned_unsent[0] = 0;
1133 #endif
1134 		asoc->strmout[i].sid = i;
1135 		asoc->strmout[i].last_msg_incomplete = 0;
1136 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1137 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1138 	}
1139 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1140 
1141 	/* Now the mapping array */
1142 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1143 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1144 	    SCTP_M_MAP);
1145 	if (asoc->mapping_array == NULL) {
1146 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1147 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1148 		return (ENOMEM);
1149 	}
1150 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1151 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1152 	    SCTP_M_MAP);
1153 	if (asoc->nr_mapping_array == NULL) {
1154 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1155 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1156 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1157 		return (ENOMEM);
1158 	}
1159 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1160 
1161 	/* Now the init of the other outqueues */
1162 	TAILQ_INIT(&asoc->free_chunks);
1163 	TAILQ_INIT(&asoc->control_send_queue);
1164 	TAILQ_INIT(&asoc->asconf_send_queue);
1165 	TAILQ_INIT(&asoc->send_queue);
1166 	TAILQ_INIT(&asoc->sent_queue);
1167 	TAILQ_INIT(&asoc->resetHead);
1168 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1169 	TAILQ_INIT(&asoc->asconf_queue);
1170 	/* authentication fields */
1171 	asoc->authinfo.random = NULL;
1172 	asoc->authinfo.active_keyid = 0;
1173 	asoc->authinfo.assoc_key = NULL;
1174 	asoc->authinfo.assoc_keyid = 0;
1175 	asoc->authinfo.recv_key = NULL;
1176 	asoc->authinfo.recv_keyid = 0;
1177 	LIST_INIT(&asoc->shared_keys);
1178 	asoc->marked_retrans = 0;
1179 	asoc->port = inp->sctp_ep.port;
1180 	asoc->timoinit = 0;
1181 	asoc->timodata = 0;
1182 	asoc->timosack = 0;
1183 	asoc->timoshutdown = 0;
1184 	asoc->timoheartbeat = 0;
1185 	asoc->timocookie = 0;
1186 	asoc->timoshutdownack = 0;
1187 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188 	asoc->discontinuity_time = asoc->start_time;
1189 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1190 		asoc->abandoned_unsent[i] = 0;
1191 		asoc->abandoned_sent[i] = 0;
1192 	}
1193 	/*
1194 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1195 	 * freed later when the association is freed.
1196 	 */
1197 	return (0);
1198 }
1199 
1200 void
1201 sctp_print_mapping_array(struct sctp_association *asoc)
1202 {
1203 	unsigned int i, limit;
1204 
1205 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1206 	    asoc->mapping_array_size,
1207 	    asoc->mapping_array_base_tsn,
1208 	    asoc->cumulative_tsn,
1209 	    asoc->highest_tsn_inside_map,
1210 	    asoc->highest_tsn_inside_nr_map);
1211 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1212 		if (asoc->mapping_array[limit - 1] != 0) {
1213 			break;
1214 		}
1215 	}
1216 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1217 	for (i = 0; i < limit; i++) {
1218 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1219 	}
1220 	if (limit % 16)
1221 		SCTP_PRINTF("\n");
1222 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1223 		if (asoc->nr_mapping_array[limit - 1]) {
1224 			break;
1225 		}
1226 	}
1227 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1228 	for (i = 0; i < limit; i++) {
1229 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1230 	}
1231 	if (limit % 16)
1232 		SCTP_PRINTF("\n");
1233 }
1234 
1235 int
1236 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1237 {
1238 	/* mapping array needs to grow */
1239 	uint8_t *new_array1, *new_array2;
1240 	uint32_t new_size;
1241 
1242 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1243 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1244 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1245 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1246 		/* can't get more, forget it */
1247 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1248 		if (new_array1) {
1249 			SCTP_FREE(new_array1, SCTP_M_MAP);
1250 		}
1251 		if (new_array2) {
1252 			SCTP_FREE(new_array2, SCTP_M_MAP);
1253 		}
1254 		return (-1);
1255 	}
1256 	memset(new_array1, 0, new_size);
1257 	memset(new_array2, 0, new_size);
1258 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1259 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1260 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1261 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1262 	asoc->mapping_array = new_array1;
1263 	asoc->nr_mapping_array = new_array2;
1264 	asoc->mapping_array_size = new_size;
1265 	return (0);
1266 }
1267 
1268 
1269 static void
1270 sctp_iterator_work(struct sctp_iterator *it)
1271 {
1272 	int iteration_count = 0;
1273 	int inp_skip = 0;
1274 	int first_in = 1;
1275 	struct sctp_inpcb *tinp;
1276 
1277 	SCTP_INP_INFO_RLOCK();
1278 	SCTP_ITERATOR_LOCK();
1279 	sctp_it_ctl.cur_it = it;
1280 	if (it->inp) {
1281 		SCTP_INP_RLOCK(it->inp);
1282 		SCTP_INP_DECR_REF(it->inp);
1283 	}
1284 	if (it->inp == NULL) {
1285 		/* iterator is complete */
1286 done_with_iterator:
1287 		sctp_it_ctl.cur_it = NULL;
1288 		SCTP_ITERATOR_UNLOCK();
1289 		SCTP_INP_INFO_RUNLOCK();
1290 		if (it->function_atend != NULL) {
1291 			(*it->function_atend) (it->pointer, it->val);
1292 		}
1293 		SCTP_FREE(it, SCTP_M_ITER);
1294 		return;
1295 	}
1296 select_a_new_ep:
1297 	if (first_in) {
1298 		first_in = 0;
1299 	} else {
1300 		SCTP_INP_RLOCK(it->inp);
1301 	}
1302 	while (((it->pcb_flags) &&
1303 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1304 	    ((it->pcb_features) &&
1305 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1306 		/* endpoint flags or features don't match, so keep looking */
1307 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1308 			SCTP_INP_RUNLOCK(it->inp);
1309 			goto done_with_iterator;
1310 		}
1311 		tinp = it->inp;
1312 		it->inp = LIST_NEXT(it->inp, sctp_list);
1313 		SCTP_INP_RUNLOCK(tinp);
1314 		if (it->inp == NULL) {
1315 			goto done_with_iterator;
1316 		}
1317 		SCTP_INP_RLOCK(it->inp);
1318 	}
1319 	/* now go through each assoc which is in the desired state */
1320 	if (it->done_current_ep == 0) {
1321 		if (it->function_inp != NULL)
1322 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1323 		it->done_current_ep = 1;
1324 	}
1325 	if (it->stcb == NULL) {
1326 		/* run the per instance function */
1327 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1328 	}
1329 	if ((inp_skip) || it->stcb == NULL) {
1330 		if (it->function_inp_end != NULL) {
1331 			inp_skip = (*it->function_inp_end) (it->inp,
1332 			    it->pointer,
1333 			    it->val);
1334 		}
1335 		SCTP_INP_RUNLOCK(it->inp);
1336 		goto no_stcb;
1337 	}
1338 	while (it->stcb) {
1339 		SCTP_TCB_LOCK(it->stcb);
1340 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1341 			/* not in the right state... keep looking */
1342 			SCTP_TCB_UNLOCK(it->stcb);
1343 			goto next_assoc;
1344 		}
1345 		/* see if we have limited out the iterator loop */
1346 		iteration_count++;
1347 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1348 			/* Pause to let others grab the lock */
1349 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1350 			SCTP_TCB_UNLOCK(it->stcb);
1351 			SCTP_INP_INCR_REF(it->inp);
1352 			SCTP_INP_RUNLOCK(it->inp);
1353 			SCTP_ITERATOR_UNLOCK();
1354 			SCTP_INP_INFO_RUNLOCK();
1355 			SCTP_INP_INFO_RLOCK();
1356 			SCTP_ITERATOR_LOCK();
1357 			if (sctp_it_ctl.iterator_flags) {
1358 				/* We won't be staying here */
1359 				SCTP_INP_DECR_REF(it->inp);
1360 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1361 				if (sctp_it_ctl.iterator_flags &
1362 				    SCTP_ITERATOR_STOP_CUR_IT) {
1363 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1364 					goto done_with_iterator;
1365 				}
1366 				if (sctp_it_ctl.iterator_flags &
1367 				    SCTP_ITERATOR_STOP_CUR_INP) {
1368 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1369 					goto no_stcb;
1370 				}
1371 				/* If we reach here huh? */
1372 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1373 				    sctp_it_ctl.iterator_flags);
1374 				sctp_it_ctl.iterator_flags = 0;
1375 			}
1376 			SCTP_INP_RLOCK(it->inp);
1377 			SCTP_INP_DECR_REF(it->inp);
1378 			SCTP_TCB_LOCK(it->stcb);
1379 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1380 			iteration_count = 0;
1381 		}
1382 		/* run function on this one */
1383 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1384 
1385 		/*
1386 		 * we lie here, it really needs to have its own type but
1387 		 * first I must verify that this won't effect things :-0
1388 		 */
1389 		if (it->no_chunk_output == 0)
1390 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1391 
1392 		SCTP_TCB_UNLOCK(it->stcb);
1393 next_assoc:
1394 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1395 		if (it->stcb == NULL) {
1396 			/* Run last function */
1397 			if (it->function_inp_end != NULL) {
1398 				inp_skip = (*it->function_inp_end) (it->inp,
1399 				    it->pointer,
1400 				    it->val);
1401 			}
1402 		}
1403 	}
1404 	SCTP_INP_RUNLOCK(it->inp);
1405 no_stcb:
1406 	/* done with all assocs on this endpoint, move on to next endpoint */
1407 	it->done_current_ep = 0;
1408 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1409 		it->inp = NULL;
1410 	} else {
1411 		it->inp = LIST_NEXT(it->inp, sctp_list);
1412 	}
1413 	if (it->inp == NULL) {
1414 		goto done_with_iterator;
1415 	}
1416 	goto select_a_new_ep;
1417 }
1418 
1419 void
1420 sctp_iterator_worker(void)
1421 {
1422 	struct sctp_iterator *it, *nit;
1423 
1424 	/* This function is called with the WQ lock in place */
1425 
1426 	sctp_it_ctl.iterator_running = 1;
1427 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1428 		/* now lets work on this one */
1429 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1430 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1431 		CURVNET_SET(it->vn);
1432 		sctp_iterator_work(it);
1433 		CURVNET_RESTORE();
1434 		SCTP_IPI_ITERATOR_WQ_LOCK();
1435 		/* sa_ignore FREED_MEMORY */
1436 	}
1437 	sctp_it_ctl.iterator_running = 0;
1438 	return;
1439 }
1440 
1441 
1442 static void
1443 sctp_handle_addr_wq(void)
1444 {
1445 	/* deal with the ADDR wq from the rtsock calls */
1446 	struct sctp_laddr *wi, *nwi;
1447 	struct sctp_asconf_iterator *asc;
1448 
1449 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1450 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1451 	if (asc == NULL) {
1452 		/* Try later, no memory */
1453 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1454 		    (struct sctp_inpcb *)NULL,
1455 		    (struct sctp_tcb *)NULL,
1456 		    (struct sctp_nets *)NULL);
1457 		return;
1458 	}
1459 	LIST_INIT(&asc->list_of_work);
1460 	asc->cnt = 0;
1461 
1462 	SCTP_WQ_ADDR_LOCK();
1463 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1464 		LIST_REMOVE(wi, sctp_nxt_addr);
1465 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1466 		asc->cnt++;
1467 	}
1468 	SCTP_WQ_ADDR_UNLOCK();
1469 
1470 	if (asc->cnt == 0) {
1471 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1472 	} else {
1473 		int ret;
1474 
1475 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1476 		    sctp_asconf_iterator_stcb,
1477 		    NULL,	/* No ep end for boundall */
1478 		    SCTP_PCB_FLAGS_BOUNDALL,
1479 		    SCTP_PCB_ANY_FEATURES,
1480 		    SCTP_ASOC_ANY_STATE,
1481 		    (void *)asc, 0,
1482 		    sctp_asconf_iterator_end, NULL, 0);
1483 		if (ret) {
1484 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1485 			/*
1486 			 * Freeing if we are stopping or put back on the
1487 			 * addr_wq.
1488 			 */
1489 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1490 				sctp_asconf_iterator_end(asc, 0);
1491 			} else {
1492 				SCTP_WQ_ADDR_LOCK();
1493 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1494 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1495 				}
1496 				SCTP_WQ_ADDR_UNLOCK();
1497 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1498 			}
1499 		}
1500 	}
1501 }
1502 
1503 void
1504 sctp_timeout_handler(void *t)
1505 {
1506 	struct sctp_inpcb *inp;
1507 	struct sctp_tcb *stcb;
1508 	struct sctp_nets *net;
1509 	struct sctp_timer *tmr;
1510 	struct mbuf *op_err;
1511 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1512 	struct socket *so;
1513 #endif
1514 	int did_output;
1515 	int type;
1516 
1517 	tmr = (struct sctp_timer *)t;
1518 	inp = (struct sctp_inpcb *)tmr->ep;
1519 	stcb = (struct sctp_tcb *)tmr->tcb;
1520 	net = (struct sctp_nets *)tmr->net;
1521 	CURVNET_SET((struct vnet *)tmr->vnet);
1522 	did_output = 1;
1523 
1524 #ifdef SCTP_AUDITING_ENABLED
1525 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1526 	sctp_auditing(3, inp, stcb, net);
1527 #endif
1528 
1529 	/* sanity checks... */
1530 	if (tmr->self != (void *)tmr) {
1531 		/*
1532 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1533 		 * (void *)tmr);
1534 		 */
1535 		CURVNET_RESTORE();
1536 		return;
1537 	}
1538 	tmr->stopped_from = 0xa001;
1539 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1540 		/*
1541 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1542 		 * tmr->type);
1543 		 */
1544 		CURVNET_RESTORE();
1545 		return;
1546 	}
1547 	tmr->stopped_from = 0xa002;
1548 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1549 		CURVNET_RESTORE();
1550 		return;
1551 	}
1552 	/* if this is an iterator timeout, get the struct and clear inp */
1553 	tmr->stopped_from = 0xa003;
1554 	if (inp) {
1555 		SCTP_INP_INCR_REF(inp);
1556 		if ((inp->sctp_socket == NULL) &&
1557 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1558 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1559 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1560 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1566 		    ) {
1567 			SCTP_INP_DECR_REF(inp);
1568 			CURVNET_RESTORE();
1569 			return;
1570 		}
1571 	}
1572 	tmr->stopped_from = 0xa004;
1573 	if (stcb) {
1574 		atomic_add_int(&stcb->asoc.refcnt, 1);
1575 		if (stcb->asoc.state == 0) {
1576 			atomic_add_int(&stcb->asoc.refcnt, -1);
1577 			if (inp) {
1578 				SCTP_INP_DECR_REF(inp);
1579 			}
1580 			CURVNET_RESTORE();
1581 			return;
1582 		}
1583 	}
1584 	type = tmr->type;
1585 	tmr->stopped_from = 0xa005;
1586 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1587 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1588 		if (inp) {
1589 			SCTP_INP_DECR_REF(inp);
1590 		}
1591 		if (stcb) {
1592 			atomic_add_int(&stcb->asoc.refcnt, -1);
1593 		}
1594 		CURVNET_RESTORE();
1595 		return;
1596 	}
1597 	tmr->stopped_from = 0xa006;
1598 
1599 	if (stcb) {
1600 		SCTP_TCB_LOCK(stcb);
1601 		atomic_add_int(&stcb->asoc.refcnt, -1);
1602 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1603 		    ((stcb->asoc.state == 0) ||
1604 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1605 			SCTP_TCB_UNLOCK(stcb);
1606 			if (inp) {
1607 				SCTP_INP_DECR_REF(inp);
1608 			}
1609 			CURVNET_RESTORE();
1610 			return;
1611 		}
1612 	}
1613 	/* record in stopped what t-o occurred */
1614 	tmr->stopped_from = type;
1615 
1616 	/* mark as being serviced now */
1617 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1618 		/*
1619 		 * Callout has been rescheduled.
1620 		 */
1621 		goto get_out;
1622 	}
1623 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1624 		/*
1625 		 * Not active, so no action.
1626 		 */
1627 		goto get_out;
1628 	}
1629 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1630 
1631 	/* call the handler for the appropriate timer type */
1632 	switch (type) {
1633 	case SCTP_TIMER_TYPE_ZERO_COPY:
1634 		if (inp == NULL) {
1635 			break;
1636 		}
1637 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1638 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1639 		}
1640 		break;
1641 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1642 		if (inp == NULL) {
1643 			break;
1644 		}
1645 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1646 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1647 		}
1648 		break;
1649 	case SCTP_TIMER_TYPE_ADDR_WQ:
1650 		sctp_handle_addr_wq();
1651 		break;
1652 	case SCTP_TIMER_TYPE_SEND:
1653 		if ((stcb == NULL) || (inp == NULL)) {
1654 			break;
1655 		}
1656 		SCTP_STAT_INCR(sctps_timodata);
1657 		stcb->asoc.timodata++;
1658 		stcb->asoc.num_send_timers_up--;
1659 		if (stcb->asoc.num_send_timers_up < 0) {
1660 			stcb->asoc.num_send_timers_up = 0;
1661 		}
1662 		SCTP_TCB_LOCK_ASSERT(stcb);
1663 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1664 			/* no need to unlock on tcb its gone */
1665 
1666 			goto out_decr;
1667 		}
1668 		SCTP_TCB_LOCK_ASSERT(stcb);
1669 #ifdef SCTP_AUDITING_ENABLED
1670 		sctp_auditing(4, inp, stcb, net);
1671 #endif
1672 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1673 		if ((stcb->asoc.num_send_timers_up == 0) &&
1674 		    (stcb->asoc.sent_queue_cnt > 0)) {
1675 			struct sctp_tmit_chunk *chk;
1676 
1677 			/*
1678 			 * safeguard. If there on some on the sent queue
1679 			 * somewhere but no timers running something is
1680 			 * wrong... so we start a timer on the first chunk
1681 			 * on the send queue on whatever net it is sent to.
1682 			 */
1683 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1684 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1685 			    chk->whoTo);
1686 		}
1687 		break;
1688 	case SCTP_TIMER_TYPE_INIT:
1689 		if ((stcb == NULL) || (inp == NULL)) {
1690 			break;
1691 		}
1692 		SCTP_STAT_INCR(sctps_timoinit);
1693 		stcb->asoc.timoinit++;
1694 		if (sctp_t1init_timer(inp, stcb, net)) {
1695 			/* no need to unlock on tcb its gone */
1696 			goto out_decr;
1697 		}
1698 		/* We do output but not here */
1699 		did_output = 0;
1700 		break;
1701 	case SCTP_TIMER_TYPE_RECV:
1702 		if ((stcb == NULL) || (inp == NULL)) {
1703 			break;
1704 		}
1705 		SCTP_STAT_INCR(sctps_timosack);
1706 		stcb->asoc.timosack++;
1707 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1708 #ifdef SCTP_AUDITING_ENABLED
1709 		sctp_auditing(4, inp, stcb, net);
1710 #endif
1711 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1712 		break;
1713 	case SCTP_TIMER_TYPE_SHUTDOWN:
1714 		if ((stcb == NULL) || (inp == NULL)) {
1715 			break;
1716 		}
1717 		if (sctp_shutdown_timer(inp, stcb, net)) {
1718 			/* no need to unlock on tcb its gone */
1719 			goto out_decr;
1720 		}
1721 		SCTP_STAT_INCR(sctps_timoshutdown);
1722 		stcb->asoc.timoshutdown++;
1723 #ifdef SCTP_AUDITING_ENABLED
1724 		sctp_auditing(4, inp, stcb, net);
1725 #endif
1726 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1727 		break;
1728 	case SCTP_TIMER_TYPE_HEARTBEAT:
1729 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1730 			break;
1731 		}
1732 		SCTP_STAT_INCR(sctps_timoheartbeat);
1733 		stcb->asoc.timoheartbeat++;
1734 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1735 			/* no need to unlock on tcb its gone */
1736 			goto out_decr;
1737 		}
1738 #ifdef SCTP_AUDITING_ENABLED
1739 		sctp_auditing(4, inp, stcb, net);
1740 #endif
1741 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1742 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1743 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1744 		}
1745 		break;
1746 	case SCTP_TIMER_TYPE_COOKIE:
1747 		if ((stcb == NULL) || (inp == NULL)) {
1748 			break;
1749 		}
1750 		if (sctp_cookie_timer(inp, stcb, net)) {
1751 			/* no need to unlock on tcb its gone */
1752 			goto out_decr;
1753 		}
1754 		SCTP_STAT_INCR(sctps_timocookie);
1755 		stcb->asoc.timocookie++;
1756 #ifdef SCTP_AUDITING_ENABLED
1757 		sctp_auditing(4, inp, stcb, net);
1758 #endif
1759 		/*
1760 		 * We consider T3 and Cookie timer pretty much the same with
1761 		 * respect to where from in chunk_output.
1762 		 */
1763 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1764 		break;
1765 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1766 		{
1767 			struct timeval tv;
1768 			int i, secret;
1769 
1770 			if (inp == NULL) {
1771 				break;
1772 			}
1773 			SCTP_STAT_INCR(sctps_timosecret);
1774 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1775 			SCTP_INP_WLOCK(inp);
1776 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1777 			inp->sctp_ep.last_secret_number =
1778 			    inp->sctp_ep.current_secret_number;
1779 			inp->sctp_ep.current_secret_number++;
1780 			if (inp->sctp_ep.current_secret_number >=
1781 			    SCTP_HOW_MANY_SECRETS) {
1782 				inp->sctp_ep.current_secret_number = 0;
1783 			}
1784 			secret = (int)inp->sctp_ep.current_secret_number;
1785 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1786 				inp->sctp_ep.secret_key[secret][i] =
1787 				    sctp_select_initial_TSN(&inp->sctp_ep);
1788 			}
1789 			SCTP_INP_WUNLOCK(inp);
1790 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1791 		}
1792 		did_output = 0;
1793 		break;
1794 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1795 		if ((stcb == NULL) || (inp == NULL)) {
1796 			break;
1797 		}
1798 		SCTP_STAT_INCR(sctps_timopathmtu);
1799 		sctp_pathmtu_timer(inp, stcb, net);
1800 		did_output = 0;
1801 		break;
1802 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1803 		if ((stcb == NULL) || (inp == NULL)) {
1804 			break;
1805 		}
1806 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1807 			/* no need to unlock on tcb its gone */
1808 			goto out_decr;
1809 		}
1810 		SCTP_STAT_INCR(sctps_timoshutdownack);
1811 		stcb->asoc.timoshutdownack++;
1812 #ifdef SCTP_AUDITING_ENABLED
1813 		sctp_auditing(4, inp, stcb, net);
1814 #endif
1815 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1816 		break;
1817 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1818 		if ((stcb == NULL) || (inp == NULL)) {
1819 			break;
1820 		}
1821 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1822 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1823 		    "Shutdown guard timer expired");
1824 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1825 		/* no need to unlock on tcb its gone */
1826 		goto out_decr;
1827 
1828 	case SCTP_TIMER_TYPE_STRRESET:
1829 		if ((stcb == NULL) || (inp == NULL)) {
1830 			break;
1831 		}
1832 		if (sctp_strreset_timer(inp, stcb, net)) {
1833 			/* no need to unlock on tcb its gone */
1834 			goto out_decr;
1835 		}
1836 		SCTP_STAT_INCR(sctps_timostrmrst);
1837 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1838 		break;
1839 	case SCTP_TIMER_TYPE_ASCONF:
1840 		if ((stcb == NULL) || (inp == NULL)) {
1841 			break;
1842 		}
1843 		if (sctp_asconf_timer(inp, stcb, net)) {
1844 			/* no need to unlock on tcb its gone */
1845 			goto out_decr;
1846 		}
1847 		SCTP_STAT_INCR(sctps_timoasconf);
1848 #ifdef SCTP_AUDITING_ENABLED
1849 		sctp_auditing(4, inp, stcb, net);
1850 #endif
1851 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1852 		break;
1853 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1854 		if ((stcb == NULL) || (inp == NULL)) {
1855 			break;
1856 		}
1857 		sctp_delete_prim_timer(inp, stcb, net);
1858 		SCTP_STAT_INCR(sctps_timodelprim);
1859 		break;
1860 
1861 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1862 		if ((stcb == NULL) || (inp == NULL)) {
1863 			break;
1864 		}
1865 		SCTP_STAT_INCR(sctps_timoautoclose);
1866 		sctp_autoclose_timer(inp, stcb, net);
1867 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1868 		did_output = 0;
1869 		break;
1870 	case SCTP_TIMER_TYPE_ASOCKILL:
1871 		if ((stcb == NULL) || (inp == NULL)) {
1872 			break;
1873 		}
1874 		SCTP_STAT_INCR(sctps_timoassockill);
1875 		/* Can we free it yet? */
1876 		SCTP_INP_DECR_REF(inp);
1877 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1878 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1879 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1880 		so = SCTP_INP_SO(inp);
1881 		atomic_add_int(&stcb->asoc.refcnt, 1);
1882 		SCTP_TCB_UNLOCK(stcb);
1883 		SCTP_SOCKET_LOCK(so, 1);
1884 		SCTP_TCB_LOCK(stcb);
1885 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1886 #endif
1887 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1888 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1889 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1890 		SCTP_SOCKET_UNLOCK(so, 1);
1891 #endif
1892 		/*
1893 		 * free asoc, always unlocks (or destroy's) so prevent
1894 		 * duplicate unlock or unlock of a free mtx :-0
1895 		 */
1896 		stcb = NULL;
1897 		goto out_no_decr;
1898 	case SCTP_TIMER_TYPE_INPKILL:
1899 		SCTP_STAT_INCR(sctps_timoinpkill);
1900 		if (inp == NULL) {
1901 			break;
1902 		}
1903 		/*
1904 		 * special case, take away our increment since WE are the
1905 		 * killer
1906 		 */
1907 		SCTP_INP_DECR_REF(inp);
1908 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1909 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1910 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1911 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1912 		inp = NULL;
1913 		goto out_no_decr;
1914 	default:
1915 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1916 		    type);
1917 		break;
1918 	}
1919 #ifdef SCTP_AUDITING_ENABLED
1920 	sctp_audit_log(0xF1, (uint8_t)type);
1921 	if (inp)
1922 		sctp_auditing(5, inp, stcb, net);
1923 #endif
1924 	if ((did_output) && stcb) {
1925 		/*
1926 		 * Now we need to clean up the control chunk chain if an
1927 		 * ECNE is on it. It must be marked as UNSENT again so next
1928 		 * call will continue to send it until such time that we get
1929 		 * a CWR, to remove it. It is, however, less likely that we
1930 		 * will find a ecn echo on the chain though.
1931 		 */
1932 		sctp_fix_ecn_echo(&stcb->asoc);
1933 	}
1934 get_out:
1935 	if (stcb) {
1936 		SCTP_TCB_UNLOCK(stcb);
1937 	}
1938 out_decr:
1939 	if (inp) {
1940 		SCTP_INP_DECR_REF(inp);
1941 	}
1942 out_no_decr:
1943 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1944 	CURVNET_RESTORE();
1945 }
1946 
1947 void
1948 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1949     struct sctp_nets *net)
1950 {
1951 	uint32_t to_ticks;
1952 	struct sctp_timer *tmr;
1953 
1954 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1955 		return;
1956 
1957 	tmr = NULL;
1958 	if (stcb) {
1959 		SCTP_TCB_LOCK_ASSERT(stcb);
1960 	}
1961 	switch (t_type) {
1962 	case SCTP_TIMER_TYPE_ZERO_COPY:
1963 		tmr = &inp->sctp_ep.zero_copy_timer;
1964 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1965 		break;
1966 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1967 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1968 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1969 		break;
1970 	case SCTP_TIMER_TYPE_ADDR_WQ:
1971 		/* Only 1 tick away :-) */
1972 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1973 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1974 		break;
1975 	case SCTP_TIMER_TYPE_SEND:
1976 		/* Here we use the RTO timer */
1977 		{
1978 			int rto_val;
1979 
1980 			if ((stcb == NULL) || (net == NULL)) {
1981 				return;
1982 			}
1983 			tmr = &net->rxt_timer;
1984 			if (net->RTO == 0) {
1985 				rto_val = stcb->asoc.initial_rto;
1986 			} else {
1987 				rto_val = net->RTO;
1988 			}
1989 			to_ticks = MSEC_TO_TICKS(rto_val);
1990 		}
1991 		break;
1992 	case SCTP_TIMER_TYPE_INIT:
1993 		/*
1994 		 * Here we use the INIT timer default usually about 1
1995 		 * minute.
1996 		 */
1997 		if ((stcb == NULL) || (net == NULL)) {
1998 			return;
1999 		}
2000 		tmr = &net->rxt_timer;
2001 		if (net->RTO == 0) {
2002 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2003 		} else {
2004 			to_ticks = MSEC_TO_TICKS(net->RTO);
2005 		}
2006 		break;
2007 	case SCTP_TIMER_TYPE_RECV:
2008 		/*
2009 		 * Here we use the Delayed-Ack timer value from the inp
2010 		 * ususually about 200ms.
2011 		 */
2012 		if (stcb == NULL) {
2013 			return;
2014 		}
2015 		tmr = &stcb->asoc.dack_timer;
2016 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2017 		break;
2018 	case SCTP_TIMER_TYPE_SHUTDOWN:
2019 		/* Here we use the RTO of the destination. */
2020 		if ((stcb == NULL) || (net == NULL)) {
2021 			return;
2022 		}
2023 		if (net->RTO == 0) {
2024 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2025 		} else {
2026 			to_ticks = MSEC_TO_TICKS(net->RTO);
2027 		}
2028 		tmr = &net->rxt_timer;
2029 		break;
2030 	case SCTP_TIMER_TYPE_HEARTBEAT:
2031 		/*
2032 		 * the net is used here so that we can add in the RTO. Even
2033 		 * though we use a different timer. We also add the HB timer
2034 		 * PLUS a random jitter.
2035 		 */
2036 		if ((stcb == NULL) || (net == NULL)) {
2037 			return;
2038 		} else {
2039 			uint32_t rndval;
2040 			uint32_t jitter;
2041 
2042 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2043 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2044 				return;
2045 			}
2046 			if (net->RTO == 0) {
2047 				to_ticks = stcb->asoc.initial_rto;
2048 			} else {
2049 				to_ticks = net->RTO;
2050 			}
2051 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2052 			jitter = rndval % to_ticks;
2053 			if (jitter >= (to_ticks >> 1)) {
2054 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2055 			} else {
2056 				to_ticks = to_ticks - jitter;
2057 			}
2058 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2059 			    !(net->dest_state & SCTP_ADDR_PF)) {
2060 				to_ticks += net->heart_beat_delay;
2061 			}
2062 			/*
2063 			 * Now we must convert the to_ticks that are now in
2064 			 * ms to ticks.
2065 			 */
2066 			to_ticks = MSEC_TO_TICKS(to_ticks);
2067 			tmr = &net->hb_timer;
2068 		}
2069 		break;
2070 	case SCTP_TIMER_TYPE_COOKIE:
2071 		/*
2072 		 * Here we can use the RTO timer from the network since one
2073 		 * RTT was compelete. If a retran happened then we will be
2074 		 * using the RTO initial value.
2075 		 */
2076 		if ((stcb == NULL) || (net == NULL)) {
2077 			return;
2078 		}
2079 		if (net->RTO == 0) {
2080 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2081 		} else {
2082 			to_ticks = MSEC_TO_TICKS(net->RTO);
2083 		}
2084 		tmr = &net->rxt_timer;
2085 		break;
2086 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2087 		/*
2088 		 * nothing needed but the endpoint here ususually about 60
2089 		 * minutes.
2090 		 */
2091 		tmr = &inp->sctp_ep.signature_change;
2092 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2093 		break;
2094 	case SCTP_TIMER_TYPE_ASOCKILL:
2095 		if (stcb == NULL) {
2096 			return;
2097 		}
2098 		tmr = &stcb->asoc.strreset_timer;
2099 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2100 		break;
2101 	case SCTP_TIMER_TYPE_INPKILL:
2102 		/*
2103 		 * The inp is setup to die. We re-use the signature_chage
2104 		 * timer since that has stopped and we are in the GONE
2105 		 * state.
2106 		 */
2107 		tmr = &inp->sctp_ep.signature_change;
2108 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2109 		break;
2110 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2111 		/*
2112 		 * Here we use the value found in the EP for PMTU ususually
2113 		 * about 10 minutes.
2114 		 */
2115 		if ((stcb == NULL) || (net == NULL)) {
2116 			return;
2117 		}
2118 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2119 			return;
2120 		}
2121 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2122 		tmr = &net->pmtu_timer;
2123 		break;
2124 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2125 		/* Here we use the RTO of the destination */
2126 		if ((stcb == NULL) || (net == NULL)) {
2127 			return;
2128 		}
2129 		if (net->RTO == 0) {
2130 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2131 		} else {
2132 			to_ticks = MSEC_TO_TICKS(net->RTO);
2133 		}
2134 		tmr = &net->rxt_timer;
2135 		break;
2136 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2137 		/*
2138 		 * Here we use the endpoints shutdown guard timer usually
2139 		 * about 3 minutes.
2140 		 */
2141 		if (stcb == NULL) {
2142 			return;
2143 		}
2144 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2145 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2146 		} else {
2147 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2148 		}
2149 		tmr = &stcb->asoc.shut_guard_timer;
2150 		break;
2151 	case SCTP_TIMER_TYPE_STRRESET:
2152 		/*
2153 		 * Here the timer comes from the stcb but its value is from
2154 		 * the net's RTO.
2155 		 */
2156 		if ((stcb == NULL) || (net == NULL)) {
2157 			return;
2158 		}
2159 		if (net->RTO == 0) {
2160 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2161 		} else {
2162 			to_ticks = MSEC_TO_TICKS(net->RTO);
2163 		}
2164 		tmr = &stcb->asoc.strreset_timer;
2165 		break;
2166 	case SCTP_TIMER_TYPE_ASCONF:
2167 		/*
2168 		 * Here the timer comes from the stcb but its value is from
2169 		 * the net's RTO.
2170 		 */
2171 		if ((stcb == NULL) || (net == NULL)) {
2172 			return;
2173 		}
2174 		if (net->RTO == 0) {
2175 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2176 		} else {
2177 			to_ticks = MSEC_TO_TICKS(net->RTO);
2178 		}
2179 		tmr = &stcb->asoc.asconf_timer;
2180 		break;
2181 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2182 		if ((stcb == NULL) || (net != NULL)) {
2183 			return;
2184 		}
2185 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2186 		tmr = &stcb->asoc.delete_prim_timer;
2187 		break;
2188 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2189 		if (stcb == NULL) {
2190 			return;
2191 		}
2192 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2193 			/*
2194 			 * Really an error since stcb is NOT set to
2195 			 * autoclose
2196 			 */
2197 			return;
2198 		}
2199 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2200 		tmr = &stcb->asoc.autoclose_timer;
2201 		break;
2202 	default:
2203 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2204 		    __func__, t_type);
2205 		return;
2206 		break;
2207 	}
2208 	if ((to_ticks <= 0) || (tmr == NULL)) {
2209 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2210 		    __func__, t_type, to_ticks, (void *)tmr);
2211 		return;
2212 	}
2213 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2214 		/*
2215 		 * we do NOT allow you to have it already running. if it is
2216 		 * we leave the current one up unchanged
2217 		 */
2218 		return;
2219 	}
2220 	/* At this point we can proceed */
2221 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2222 		stcb->asoc.num_send_timers_up++;
2223 	}
2224 	tmr->stopped_from = 0;
2225 	tmr->type = t_type;
2226 	tmr->ep = (void *)inp;
2227 	tmr->tcb = (void *)stcb;
2228 	tmr->net = (void *)net;
2229 	tmr->self = (void *)tmr;
2230 	tmr->vnet = (void *)curvnet;
2231 	tmr->ticks = sctp_get_tick_count();
2232 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2233 	return;
2234 }
2235 
2236 void
2237 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2238     struct sctp_nets *net, uint32_t from)
2239 {
2240 	struct sctp_timer *tmr;
2241 
2242 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2243 	    (inp == NULL))
2244 		return;
2245 
2246 	tmr = NULL;
2247 	if (stcb) {
2248 		SCTP_TCB_LOCK_ASSERT(stcb);
2249 	}
2250 	switch (t_type) {
2251 	case SCTP_TIMER_TYPE_ZERO_COPY:
2252 		tmr = &inp->sctp_ep.zero_copy_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2255 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ADDR_WQ:
2258 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2259 		break;
2260 	case SCTP_TIMER_TYPE_SEND:
2261 		if ((stcb == NULL) || (net == NULL)) {
2262 			return;
2263 		}
2264 		tmr = &net->rxt_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_INIT:
2267 		if ((stcb == NULL) || (net == NULL)) {
2268 			return;
2269 		}
2270 		tmr = &net->rxt_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_RECV:
2273 		if (stcb == NULL) {
2274 			return;
2275 		}
2276 		tmr = &stcb->asoc.dack_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_SHUTDOWN:
2279 		if ((stcb == NULL) || (net == NULL)) {
2280 			return;
2281 		}
2282 		tmr = &net->rxt_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_HEARTBEAT:
2285 		if ((stcb == NULL) || (net == NULL)) {
2286 			return;
2287 		}
2288 		tmr = &net->hb_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_COOKIE:
2291 		if ((stcb == NULL) || (net == NULL)) {
2292 			return;
2293 		}
2294 		tmr = &net->rxt_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2297 		/* nothing needed but the endpoint here */
2298 		tmr = &inp->sctp_ep.signature_change;
2299 		/*
2300 		 * We re-use the newcookie timer for the INP kill timer. We
2301 		 * must assure that we do not kill it by accident.
2302 		 */
2303 		break;
2304 	case SCTP_TIMER_TYPE_ASOCKILL:
2305 		/*
2306 		 * Stop the asoc kill timer.
2307 		 */
2308 		if (stcb == NULL) {
2309 			return;
2310 		}
2311 		tmr = &stcb->asoc.strreset_timer;
2312 		break;
2313 
2314 	case SCTP_TIMER_TYPE_INPKILL:
2315 		/*
2316 		 * The inp is setup to die. We re-use the signature_chage
2317 		 * timer since that has stopped and we are in the GONE
2318 		 * state.
2319 		 */
2320 		tmr = &inp->sctp_ep.signature_change;
2321 		break;
2322 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2323 		if ((stcb == NULL) || (net == NULL)) {
2324 			return;
2325 		}
2326 		tmr = &net->pmtu_timer;
2327 		break;
2328 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2329 		if ((stcb == NULL) || (net == NULL)) {
2330 			return;
2331 		}
2332 		tmr = &net->rxt_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2335 		if (stcb == NULL) {
2336 			return;
2337 		}
2338 		tmr = &stcb->asoc.shut_guard_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_STRRESET:
2341 		if (stcb == NULL) {
2342 			return;
2343 		}
2344 		tmr = &stcb->asoc.strreset_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_ASCONF:
2347 		if (stcb == NULL) {
2348 			return;
2349 		}
2350 		tmr = &stcb->asoc.asconf_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.delete_prim_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.autoclose_timer;
2363 		break;
2364 	default:
2365 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2366 		    __func__, t_type);
2367 		break;
2368 	}
2369 	if (tmr == NULL) {
2370 		return;
2371 	}
2372 	if ((tmr->type != t_type) && tmr->type) {
2373 		/*
2374 		 * Ok we have a timer that is under joint use. Cookie timer
2375 		 * per chance with the SEND timer. We therefore are NOT
2376 		 * running the timer that the caller wants stopped.  So just
2377 		 * return.
2378 		 */
2379 		return;
2380 	}
2381 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2382 		stcb->asoc.num_send_timers_up--;
2383 		if (stcb->asoc.num_send_timers_up < 0) {
2384 			stcb->asoc.num_send_timers_up = 0;
2385 		}
2386 	}
2387 	tmr->self = NULL;
2388 	tmr->stopped_from = from;
2389 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2390 	return;
2391 }
2392 
2393 uint32_t
2394 sctp_calculate_len(struct mbuf *m)
2395 {
2396 	uint32_t tlen = 0;
2397 	struct mbuf *at;
2398 
2399 	at = m;
2400 	while (at) {
2401 		tlen += SCTP_BUF_LEN(at);
2402 		at = SCTP_BUF_NEXT(at);
2403 	}
2404 	return (tlen);
2405 }
2406 
2407 void
2408 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2409     struct sctp_association *asoc, uint32_t mtu)
2410 {
2411 	/*
2412 	 * Reset the P-MTU size on this association, this involves changing
2413 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2414 	 * allow the DF flag to be cleared.
2415 	 */
2416 	struct sctp_tmit_chunk *chk;
2417 	unsigned int eff_mtu, ovh;
2418 
2419 	asoc->smallest_mtu = mtu;
2420 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2421 		ovh = SCTP_MIN_OVERHEAD;
2422 	} else {
2423 		ovh = SCTP_MIN_V4_OVERHEAD;
2424 	}
2425 	eff_mtu = mtu - ovh;
2426 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2427 		if (chk->send_size > eff_mtu) {
2428 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2429 		}
2430 	}
2431 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2432 		if (chk->send_size > eff_mtu) {
2433 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2434 		}
2435 	}
2436 }
2437 
2438 
2439 /*
2440  * given an association and starting time of the current RTT period return
2441  * RTO in number of msecs net should point to the current network
2442  */
2443 
2444 uint32_t
2445 sctp_calculate_rto(struct sctp_tcb *stcb,
2446     struct sctp_association *asoc,
2447     struct sctp_nets *net,
2448     struct timeval *told,
2449     int safe, int rtt_from_sack)
2450 {
2451 	/*-
2452 	 * given an association and the starting time of the current RTT
2453 	 * period (in value1/value2) return RTO in number of msecs.
2454 	 */
2455 	int32_t rtt;		/* RTT in ms */
2456 	uint32_t new_rto;
2457 	int first_measure = 0;
2458 	struct timeval now, then, *old;
2459 
2460 	/* Copy it out for sparc64 */
2461 	if (safe == sctp_align_unsafe_makecopy) {
2462 		old = &then;
2463 		memcpy(&then, told, sizeof(struct timeval));
2464 	} else if (safe == sctp_align_safe_nocopy) {
2465 		old = told;
2466 	} else {
2467 		/* error */
2468 		SCTP_PRINTF("Huh, bad rto calc call\n");
2469 		return (0);
2470 	}
2471 	/************************/
2472 	/* 1. calculate new RTT */
2473 	/************************/
2474 	/* get the current time */
2475 	if (stcb->asoc.use_precise_time) {
2476 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2477 	} else {
2478 		(void)SCTP_GETTIME_TIMEVAL(&now);
2479 	}
2480 	timevalsub(&now, old);
2481 	/* store the current RTT in us */
2482 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2483 	        (uint64_t)now.tv_usec;
2484 
2485 	/* compute rtt in ms */
2486 	rtt = (int32_t)(net->rtt / 1000);
2487 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2488 		/*
2489 		 * Tell the CC module that a new update has just occurred
2490 		 * from a sack
2491 		 */
2492 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2493 	}
2494 	/*
2495 	 * Do we need to determine the lan? We do this only on sacks i.e.
2496 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2497 	 */
2498 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2499 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2500 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2501 			net->lan_type = SCTP_LAN_INTERNET;
2502 		} else {
2503 			net->lan_type = SCTP_LAN_LOCAL;
2504 		}
2505 	}
2506 	/***************************/
2507 	/* 2. update RTTVAR & SRTT */
2508 	/***************************/
2509 	/*-
2510 	 * Compute the scaled average lastsa and the
2511 	 * scaled variance lastsv as described in van Jacobson
2512 	 * Paper "Congestion Avoidance and Control", Annex A.
2513 	 *
2514 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2515 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2516 	 */
2517 	if (net->RTO_measured) {
2518 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2519 		net->lastsa += rtt;
2520 		if (rtt < 0) {
2521 			rtt = -rtt;
2522 		}
2523 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2524 		net->lastsv += rtt;
2525 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2526 			rto_logging(net, SCTP_LOG_RTTVAR);
2527 		}
2528 	} else {
2529 		/* First RTO measurment */
2530 		net->RTO_measured = 1;
2531 		first_measure = 1;
2532 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2533 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2534 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2535 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2536 		}
2537 	}
2538 	if (net->lastsv == 0) {
2539 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2540 	}
2541 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2542 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2543 	    (stcb->asoc.sat_network_lockout == 0)) {
2544 		stcb->asoc.sat_network = 1;
2545 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2546 		stcb->asoc.sat_network = 0;
2547 		stcb->asoc.sat_network_lockout = 1;
2548 	}
2549 	/* bound it, per C6/C7 in Section 5.3.1 */
2550 	if (new_rto < stcb->asoc.minrto) {
2551 		new_rto = stcb->asoc.minrto;
2552 	}
2553 	if (new_rto > stcb->asoc.maxrto) {
2554 		new_rto = stcb->asoc.maxrto;
2555 	}
2556 	/* we are now returning the RTO */
2557 	return (new_rto);
2558 }
2559 
2560 /*
2561  * return a pointer to a contiguous piece of data from the given mbuf chain
2562  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2563  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2564  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2565  */
2566 caddr_t
2567 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2568 {
2569 	uint32_t count;
2570 	uint8_t *ptr;
2571 
2572 	ptr = in_ptr;
2573 	if ((off < 0) || (len <= 0))
2574 		return (NULL);
2575 
2576 	/* find the desired start location */
2577 	while ((m != NULL) && (off > 0)) {
2578 		if (off < SCTP_BUF_LEN(m))
2579 			break;
2580 		off -= SCTP_BUF_LEN(m);
2581 		m = SCTP_BUF_NEXT(m);
2582 	}
2583 	if (m == NULL)
2584 		return (NULL);
2585 
2586 	/* is the current mbuf large enough (eg. contiguous)? */
2587 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2588 		return (mtod(m, caddr_t)+off);
2589 	} else {
2590 		/* else, it spans more than one mbuf, so save a temp copy... */
2591 		while ((m != NULL) && (len > 0)) {
2592 			count = min(SCTP_BUF_LEN(m) - off, len);
2593 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2594 			len -= count;
2595 			ptr += count;
2596 			off = 0;
2597 			m = SCTP_BUF_NEXT(m);
2598 		}
2599 		if ((m == NULL) && (len > 0))
2600 			return (NULL);
2601 		else
2602 			return ((caddr_t)in_ptr);
2603 	}
2604 }
2605 
2606 
2607 
2608 struct sctp_paramhdr *
2609 sctp_get_next_param(struct mbuf *m,
2610     int offset,
2611     struct sctp_paramhdr *pull,
2612     int pull_limit)
2613 {
2614 	/* This just provides a typed signature to Peter's Pull routine */
2615 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2616 	    (uint8_t *)pull));
2617 }
2618 
2619 
2620 struct mbuf *
2621 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2622 {
2623 	struct mbuf *m_last;
2624 	caddr_t dp;
2625 
2626 	if (padlen > 3) {
2627 		return (NULL);
2628 	}
2629 	if (padlen <= M_TRAILINGSPACE(m)) {
2630 		/*
2631 		 * The easy way. We hope the majority of the time we hit
2632 		 * here :)
2633 		 */
2634 		m_last = m;
2635 	} else {
2636 		/* Hard way we must grow the mbuf chain */
2637 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2638 		if (m_last == NULL) {
2639 			return (NULL);
2640 		}
2641 		SCTP_BUF_LEN(m_last) = 0;
2642 		SCTP_BUF_NEXT(m_last) = NULL;
2643 		SCTP_BUF_NEXT(m) = m_last;
2644 	}
2645 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2646 	SCTP_BUF_LEN(m_last) += padlen;
2647 	memset(dp, 0, padlen);
2648 	return (m_last);
2649 }
2650 
2651 struct mbuf *
2652 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2653 {
2654 	/* find the last mbuf in chain and pad it */
2655 	struct mbuf *m_at;
2656 
2657 	if (last_mbuf != NULL) {
2658 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2659 	} else {
2660 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2661 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2662 				return (sctp_add_pad_tombuf(m_at, padval));
2663 			}
2664 		}
2665 	}
2666 	return (NULL);
2667 }
2668 
2669 static void
2670 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2671     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2672 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2673     SCTP_UNUSED
2674 #endif
2675 )
2676 {
2677 	struct mbuf *m_notify;
2678 	struct sctp_assoc_change *sac;
2679 	struct sctp_queued_to_read *control;
2680 	unsigned int notif_len;
2681 	uint16_t abort_len;
2682 	unsigned int i;
2683 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2684 	struct socket *so;
2685 #endif
2686 
2687 	if (stcb == NULL) {
2688 		return;
2689 	}
2690 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2691 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2692 		if (abort != NULL) {
2693 			abort_len = ntohs(abort->ch.chunk_length);
2694 		} else {
2695 			abort_len = 0;
2696 		}
2697 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2698 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2699 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2700 			notif_len += abort_len;
2701 		}
2702 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2703 		if (m_notify == NULL) {
2704 			/* Retry with smaller value. */
2705 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2706 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2707 			if (m_notify == NULL) {
2708 				goto set_error;
2709 			}
2710 		}
2711 		SCTP_BUF_NEXT(m_notify) = NULL;
2712 		sac = mtod(m_notify, struct sctp_assoc_change *);
2713 		memset(sac, 0, notif_len);
2714 		sac->sac_type = SCTP_ASSOC_CHANGE;
2715 		sac->sac_flags = 0;
2716 		sac->sac_length = sizeof(struct sctp_assoc_change);
2717 		sac->sac_state = state;
2718 		sac->sac_error = error;
2719 		/* XXX verify these stream counts */
2720 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2721 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2722 		sac->sac_assoc_id = sctp_get_associd(stcb);
2723 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2724 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2725 				i = 0;
2726 				if (stcb->asoc.prsctp_supported == 1) {
2727 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2728 				}
2729 				if (stcb->asoc.auth_supported == 1) {
2730 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2731 				}
2732 				if (stcb->asoc.asconf_supported == 1) {
2733 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2734 				}
2735 				if (stcb->asoc.idata_supported == 1) {
2736 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2737 				}
2738 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2739 				if (stcb->asoc.reconfig_supported == 1) {
2740 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2741 				}
2742 				sac->sac_length += i;
2743 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2744 				memcpy(sac->sac_info, abort, abort_len);
2745 				sac->sac_length += abort_len;
2746 			}
2747 		}
2748 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2749 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2750 		    0, 0, stcb->asoc.context, 0, 0, 0,
2751 		    m_notify);
2752 		if (control != NULL) {
2753 			control->length = SCTP_BUF_LEN(m_notify);
2754 			/* not that we need this */
2755 			control->tail_mbuf = m_notify;
2756 			control->spec_flags = M_NOTIFICATION;
2757 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2758 			    control,
2759 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2760 			    so_locked);
2761 		} else {
2762 			sctp_m_freem(m_notify);
2763 		}
2764 	}
2765 	/*
2766 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2767 	 * comes in.
2768 	 */
2769 set_error:
2770 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2771 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2772 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2773 		SOCK_LOCK(stcb->sctp_socket);
2774 		if (from_peer) {
2775 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2776 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2777 				stcb->sctp_socket->so_error = ECONNREFUSED;
2778 			} else {
2779 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2780 				stcb->sctp_socket->so_error = ECONNRESET;
2781 			}
2782 		} else {
2783 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2784 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2785 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2786 				stcb->sctp_socket->so_error = ETIMEDOUT;
2787 			} else {
2788 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2789 				stcb->sctp_socket->so_error = ECONNABORTED;
2790 			}
2791 		}
2792 	}
2793 	/* Wake ANY sleepers */
2794 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2795 	so = SCTP_INP_SO(stcb->sctp_ep);
2796 	if (!so_locked) {
2797 		atomic_add_int(&stcb->asoc.refcnt, 1);
2798 		SCTP_TCB_UNLOCK(stcb);
2799 		SCTP_SOCKET_LOCK(so, 1);
2800 		SCTP_TCB_LOCK(stcb);
2801 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2802 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2803 			SCTP_SOCKET_UNLOCK(so, 1);
2804 			return;
2805 		}
2806 	}
2807 #endif
2808 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2809 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2810 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2811 		socantrcvmore_locked(stcb->sctp_socket);
2812 	}
2813 	sorwakeup(stcb->sctp_socket);
2814 	sowwakeup(stcb->sctp_socket);
2815 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2816 	if (!so_locked) {
2817 		SCTP_SOCKET_UNLOCK(so, 1);
2818 	}
2819 #endif
2820 }
2821 
2822 static void
2823 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2824     struct sockaddr *sa, uint32_t error, int so_locked
2825 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2826     SCTP_UNUSED
2827 #endif
2828 )
2829 {
2830 	struct mbuf *m_notify;
2831 	struct sctp_paddr_change *spc;
2832 	struct sctp_queued_to_read *control;
2833 
2834 	if ((stcb == NULL) ||
2835 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2836 		/* event not enabled */
2837 		return;
2838 	}
2839 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2840 	if (m_notify == NULL)
2841 		return;
2842 	SCTP_BUF_LEN(m_notify) = 0;
2843 	spc = mtod(m_notify, struct sctp_paddr_change *);
2844 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2845 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2846 	spc->spc_flags = 0;
2847 	spc->spc_length = sizeof(struct sctp_paddr_change);
2848 	switch (sa->sa_family) {
2849 #ifdef INET
2850 	case AF_INET:
2851 #ifdef INET6
2852 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2853 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2854 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2855 		} else {
2856 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2857 		}
2858 #else
2859 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2860 #endif
2861 		break;
2862 #endif
2863 #ifdef INET6
2864 	case AF_INET6:
2865 		{
2866 			struct sockaddr_in6 *sin6;
2867 
2868 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2869 
2870 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2871 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2872 				if (sin6->sin6_scope_id == 0) {
2873 					/* recover scope_id for user */
2874 					(void)sa6_recoverscope(sin6);
2875 				} else {
2876 					/* clear embedded scope_id for user */
2877 					in6_clearscope(&sin6->sin6_addr);
2878 				}
2879 			}
2880 			break;
2881 		}
2882 #endif
2883 	default:
2884 		/* TSNH */
2885 		break;
2886 	}
2887 	spc->spc_state = state;
2888 	spc->spc_error = error;
2889 	spc->spc_assoc_id = sctp_get_associd(stcb);
2890 
2891 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2892 	SCTP_BUF_NEXT(m_notify) = NULL;
2893 
2894 	/* append to socket */
2895 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2896 	    0, 0, stcb->asoc.context, 0, 0, 0,
2897 	    m_notify);
2898 	if (control == NULL) {
2899 		/* no memory */
2900 		sctp_m_freem(m_notify);
2901 		return;
2902 	}
2903 	control->length = SCTP_BUF_LEN(m_notify);
2904 	control->spec_flags = M_NOTIFICATION;
2905 	/* not that we need this */
2906 	control->tail_mbuf = m_notify;
2907 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2908 	    control,
2909 	    &stcb->sctp_socket->so_rcv, 1,
2910 	    SCTP_READ_LOCK_NOT_HELD,
2911 	    so_locked);
2912 }
2913 
2914 
2915 static void
2916 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2917     struct sctp_tmit_chunk *chk, int so_locked
2918 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2919     SCTP_UNUSED
2920 #endif
2921 )
2922 {
2923 	struct mbuf *m_notify;
2924 	struct sctp_send_failed *ssf;
2925 	struct sctp_send_failed_event *ssfe;
2926 	struct sctp_queued_to_read *control;
2927 	struct sctp_chunkhdr *chkhdr;
2928 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2929 
2930 	if ((stcb == NULL) ||
2931 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2932 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2933 		/* event not enabled */
2934 		return;
2935 	}
2936 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2937 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2938 	} else {
2939 		notifhdr_len = sizeof(struct sctp_send_failed);
2940 	}
2941 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2942 	if (m_notify == NULL)
2943 		/* no space left */
2944 		return;
2945 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2946 	if (stcb->asoc.idata_supported) {
2947 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2948 	} else {
2949 		chkhdr_len = sizeof(struct sctp_data_chunk);
2950 	}
2951 	/* Use some defaults in case we can't access the chunk header */
2952 	if (chk->send_size >= chkhdr_len) {
2953 		payload_len = chk->send_size - chkhdr_len;
2954 	} else {
2955 		payload_len = 0;
2956 	}
2957 	padding_len = 0;
2958 	if (chk->data != NULL) {
2959 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2960 		if (chkhdr != NULL) {
2961 			chk_len = ntohs(chkhdr->chunk_length);
2962 			if ((chk_len >= chkhdr_len) &&
2963 			    (chk->send_size >= chk_len) &&
2964 			    (chk->send_size - chk_len < 4)) {
2965 				padding_len = chk->send_size - chk_len;
2966 				payload_len = chk->send_size - chkhdr_len - padding_len;
2967 			}
2968 		}
2969 	}
2970 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2971 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2972 		memset(ssfe, 0, notifhdr_len);
2973 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2974 		if (sent) {
2975 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2976 		} else {
2977 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2978 		}
2979 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2980 		ssfe->ssfe_error = error;
2981 		/* not exactly what the user sent in, but should be close :) */
2982 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2983 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2984 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2985 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2986 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2987 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2988 	} else {
2989 		ssf = mtod(m_notify, struct sctp_send_failed *);
2990 		memset(ssf, 0, notifhdr_len);
2991 		ssf->ssf_type = SCTP_SEND_FAILED;
2992 		if (sent) {
2993 			ssf->ssf_flags = SCTP_DATA_SENT;
2994 		} else {
2995 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2996 		}
2997 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2998 		ssf->ssf_error = error;
2999 		/* not exactly what the user sent in, but should be close :) */
3000 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3001 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3002 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3003 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3004 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3005 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3006 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3007 	}
3008 	if (chk->data != NULL) {
3009 		/* Trim off the sctp chunk header (it should be there) */
3010 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3011 			m_adj(chk->data, chkhdr_len);
3012 			m_adj(chk->data, -padding_len);
3013 			sctp_mbuf_crush(chk->data);
3014 			chk->send_size -= (chkhdr_len + padding_len);
3015 		}
3016 	}
3017 	SCTP_BUF_NEXT(m_notify) = chk->data;
3018 	/* Steal off the mbuf */
3019 	chk->data = NULL;
3020 	/*
3021 	 * For this case, we check the actual socket buffer, since the assoc
3022 	 * is going away we don't want to overfill the socket buffer for a
3023 	 * non-reader
3024 	 */
3025 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3026 		sctp_m_freem(m_notify);
3027 		return;
3028 	}
3029 	/* append to socket */
3030 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3031 	    0, 0, stcb->asoc.context, 0, 0, 0,
3032 	    m_notify);
3033 	if (control == NULL) {
3034 		/* no memory */
3035 		sctp_m_freem(m_notify);
3036 		return;
3037 	}
3038 	control->spec_flags = M_NOTIFICATION;
3039 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3040 	    control,
3041 	    &stcb->sctp_socket->so_rcv, 1,
3042 	    SCTP_READ_LOCK_NOT_HELD,
3043 	    so_locked);
3044 }
3045 
3046 
3047 static void
3048 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3049     struct sctp_stream_queue_pending *sp, int so_locked
3050 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3051     SCTP_UNUSED
3052 #endif
3053 )
3054 {
3055 	struct mbuf *m_notify;
3056 	struct sctp_send_failed *ssf;
3057 	struct sctp_send_failed_event *ssfe;
3058 	struct sctp_queued_to_read *control;
3059 	int notifhdr_len;
3060 
3061 	if ((stcb == NULL) ||
3062 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3063 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3064 		/* event not enabled */
3065 		return;
3066 	}
3067 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3068 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3069 	} else {
3070 		notifhdr_len = sizeof(struct sctp_send_failed);
3071 	}
3072 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3073 	if (m_notify == NULL) {
3074 		/* no space left */
3075 		return;
3076 	}
3077 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3078 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3079 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3080 		memset(ssfe, 0, notifhdr_len);
3081 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3082 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3083 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3084 		ssfe->ssfe_error = error;
3085 		/* not exactly what the user sent in, but should be close :) */
3086 		ssfe->ssfe_info.snd_sid = sp->sid;
3087 		if (sp->some_taken) {
3088 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3089 		} else {
3090 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3091 		}
3092 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3093 		ssfe->ssfe_info.snd_context = sp->context;
3094 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3095 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3096 	} else {
3097 		ssf = mtod(m_notify, struct sctp_send_failed *);
3098 		memset(ssf, 0, notifhdr_len);
3099 		ssf->ssf_type = SCTP_SEND_FAILED;
3100 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3101 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3102 		ssf->ssf_error = error;
3103 		/* not exactly what the user sent in, but should be close :) */
3104 		ssf->ssf_info.sinfo_stream = sp->sid;
3105 		ssf->ssf_info.sinfo_ssn = 0;
3106 		if (sp->some_taken) {
3107 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3108 		} else {
3109 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3110 		}
3111 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3112 		ssf->ssf_info.sinfo_context = sp->context;
3113 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3114 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3115 	}
3116 	SCTP_BUF_NEXT(m_notify) = sp->data;
3117 
3118 	/* Steal off the mbuf */
3119 	sp->data = NULL;
3120 	/*
3121 	 * For this case, we check the actual socket buffer, since the assoc
3122 	 * is going away we don't want to overfill the socket buffer for a
3123 	 * non-reader
3124 	 */
3125 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3126 		sctp_m_freem(m_notify);
3127 		return;
3128 	}
3129 	/* append to socket */
3130 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3131 	    0, 0, stcb->asoc.context, 0, 0, 0,
3132 	    m_notify);
3133 	if (control == NULL) {
3134 		/* no memory */
3135 		sctp_m_freem(m_notify);
3136 		return;
3137 	}
3138 	control->spec_flags = M_NOTIFICATION;
3139 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3140 	    control,
3141 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3142 }
3143 
3144 
3145 
3146 static void
3147 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3148 {
3149 	struct mbuf *m_notify;
3150 	struct sctp_adaptation_event *sai;
3151 	struct sctp_queued_to_read *control;
3152 
3153 	if ((stcb == NULL) ||
3154 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3155 		/* event not enabled */
3156 		return;
3157 	}
3158 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3159 	if (m_notify == NULL)
3160 		/* no space left */
3161 		return;
3162 	SCTP_BUF_LEN(m_notify) = 0;
3163 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3164 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3165 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3166 	sai->sai_flags = 0;
3167 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3168 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3169 	sai->sai_assoc_id = sctp_get_associd(stcb);
3170 
3171 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3172 	SCTP_BUF_NEXT(m_notify) = NULL;
3173 
3174 	/* append to socket */
3175 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3176 	    0, 0, stcb->asoc.context, 0, 0, 0,
3177 	    m_notify);
3178 	if (control == NULL) {
3179 		/* no memory */
3180 		sctp_m_freem(m_notify);
3181 		return;
3182 	}
3183 	control->length = SCTP_BUF_LEN(m_notify);
3184 	control->spec_flags = M_NOTIFICATION;
3185 	/* not that we need this */
3186 	control->tail_mbuf = m_notify;
3187 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3188 	    control,
3189 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3190 }
3191 
3192 /* This always must be called with the read-queue LOCKED in the INP */
3193 static void
3194 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3195     uint32_t val, int so_locked
3196 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3197     SCTP_UNUSED
3198 #endif
3199 )
3200 {
3201 	struct mbuf *m_notify;
3202 	struct sctp_pdapi_event *pdapi;
3203 	struct sctp_queued_to_read *control;
3204 	struct sockbuf *sb;
3205 
3206 	if ((stcb == NULL) ||
3207 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3208 		/* event not enabled */
3209 		return;
3210 	}
3211 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3212 		return;
3213 	}
3214 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3215 	if (m_notify == NULL)
3216 		/* no space left */
3217 		return;
3218 	SCTP_BUF_LEN(m_notify) = 0;
3219 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3220 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3221 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3222 	pdapi->pdapi_flags = 0;
3223 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3224 	pdapi->pdapi_indication = error;
3225 	pdapi->pdapi_stream = (val >> 16);
3226 	pdapi->pdapi_seq = (val & 0x0000ffff);
3227 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3228 
3229 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3230 	SCTP_BUF_NEXT(m_notify) = NULL;
3231 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3232 	    0, 0, stcb->asoc.context, 0, 0, 0,
3233 	    m_notify);
3234 	if (control == NULL) {
3235 		/* no memory */
3236 		sctp_m_freem(m_notify);
3237 		return;
3238 	}
3239 	control->spec_flags = M_NOTIFICATION;
3240 	control->length = SCTP_BUF_LEN(m_notify);
3241 	/* not that we need this */
3242 	control->tail_mbuf = m_notify;
3243 	control->held_length = 0;
3244 	control->length = 0;
3245 	sb = &stcb->sctp_socket->so_rcv;
3246 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3247 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3248 	}
3249 	sctp_sballoc(stcb, sb, m_notify);
3250 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3251 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3252 	}
3253 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3254 	control->end_added = 1;
3255 	if (stcb->asoc.control_pdapi)
3256 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3257 	else {
3258 		/* we really should not see this case */
3259 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3260 	}
3261 	if (stcb->sctp_ep && stcb->sctp_socket) {
3262 		/* This should always be the case */
3263 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3264 		struct socket *so;
3265 
3266 		so = SCTP_INP_SO(stcb->sctp_ep);
3267 		if (!so_locked) {
3268 			atomic_add_int(&stcb->asoc.refcnt, 1);
3269 			SCTP_TCB_UNLOCK(stcb);
3270 			SCTP_SOCKET_LOCK(so, 1);
3271 			SCTP_TCB_LOCK(stcb);
3272 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3273 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3274 				SCTP_SOCKET_UNLOCK(so, 1);
3275 				return;
3276 			}
3277 		}
3278 #endif
3279 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3280 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3281 		if (!so_locked) {
3282 			SCTP_SOCKET_UNLOCK(so, 1);
3283 		}
3284 #endif
3285 	}
3286 }
3287 
3288 static void
3289 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3290 {
3291 	struct mbuf *m_notify;
3292 	struct sctp_shutdown_event *sse;
3293 	struct sctp_queued_to_read *control;
3294 
3295 	/*
3296 	 * For TCP model AND UDP connected sockets we will send an error up
3297 	 * when an SHUTDOWN completes
3298 	 */
3299 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3300 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3301 		/* mark socket closed for read/write and wakeup! */
3302 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3303 		struct socket *so;
3304 
3305 		so = SCTP_INP_SO(stcb->sctp_ep);
3306 		atomic_add_int(&stcb->asoc.refcnt, 1);
3307 		SCTP_TCB_UNLOCK(stcb);
3308 		SCTP_SOCKET_LOCK(so, 1);
3309 		SCTP_TCB_LOCK(stcb);
3310 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3311 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3312 			SCTP_SOCKET_UNLOCK(so, 1);
3313 			return;
3314 		}
3315 #endif
3316 		socantsendmore(stcb->sctp_socket);
3317 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3318 		SCTP_SOCKET_UNLOCK(so, 1);
3319 #endif
3320 	}
3321 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3322 		/* event not enabled */
3323 		return;
3324 	}
3325 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3326 	if (m_notify == NULL)
3327 		/* no space left */
3328 		return;
3329 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3330 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3331 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3332 	sse->sse_flags = 0;
3333 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3334 	sse->sse_assoc_id = sctp_get_associd(stcb);
3335 
3336 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3337 	SCTP_BUF_NEXT(m_notify) = NULL;
3338 
3339 	/* append to socket */
3340 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3341 	    0, 0, stcb->asoc.context, 0, 0, 0,
3342 	    m_notify);
3343 	if (control == NULL) {
3344 		/* no memory */
3345 		sctp_m_freem(m_notify);
3346 		return;
3347 	}
3348 	control->spec_flags = M_NOTIFICATION;
3349 	control->length = SCTP_BUF_LEN(m_notify);
3350 	/* not that we need this */
3351 	control->tail_mbuf = m_notify;
3352 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3353 	    control,
3354 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3355 }
3356 
3357 static void
3358 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3359     int so_locked
3360 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3361     SCTP_UNUSED
3362 #endif
3363 )
3364 {
3365 	struct mbuf *m_notify;
3366 	struct sctp_sender_dry_event *event;
3367 	struct sctp_queued_to_read *control;
3368 
3369 	if ((stcb == NULL) ||
3370 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3371 		/* event not enabled */
3372 		return;
3373 	}
3374 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3375 	if (m_notify == NULL) {
3376 		/* no space left */
3377 		return;
3378 	}
3379 	SCTP_BUF_LEN(m_notify) = 0;
3380 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3381 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3382 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3383 	event->sender_dry_flags = 0;
3384 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3385 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3386 
3387 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3388 	SCTP_BUF_NEXT(m_notify) = NULL;
3389 
3390 	/* append to socket */
3391 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3392 	    0, 0, stcb->asoc.context, 0, 0, 0,
3393 	    m_notify);
3394 	if (control == NULL) {
3395 		/* no memory */
3396 		sctp_m_freem(m_notify);
3397 		return;
3398 	}
3399 	control->length = SCTP_BUF_LEN(m_notify);
3400 	control->spec_flags = M_NOTIFICATION;
3401 	/* not that we need this */
3402 	control->tail_mbuf = m_notify;
3403 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3404 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3405 }
3406 
3407 
3408 void
3409 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3410 {
3411 	struct mbuf *m_notify;
3412 	struct sctp_queued_to_read *control;
3413 	struct sctp_stream_change_event *stradd;
3414 
3415 	if ((stcb == NULL) ||
3416 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3417 		/* event not enabled */
3418 		return;
3419 	}
3420 	if ((stcb->asoc.peer_req_out) && flag) {
3421 		/* Peer made the request, don't tell the local user */
3422 		stcb->asoc.peer_req_out = 0;
3423 		return;
3424 	}
3425 	stcb->asoc.peer_req_out = 0;
3426 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3427 	if (m_notify == NULL)
3428 		/* no space left */
3429 		return;
3430 	SCTP_BUF_LEN(m_notify) = 0;
3431 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3432 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3433 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3434 	stradd->strchange_flags = flag;
3435 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3436 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3437 	stradd->strchange_instrms = numberin;
3438 	stradd->strchange_outstrms = numberout;
3439 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3440 	SCTP_BUF_NEXT(m_notify) = NULL;
3441 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3442 		/* no space */
3443 		sctp_m_freem(m_notify);
3444 		return;
3445 	}
3446 	/* append to socket */
3447 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3448 	    0, 0, stcb->asoc.context, 0, 0, 0,
3449 	    m_notify);
3450 	if (control == NULL) {
3451 		/* no memory */
3452 		sctp_m_freem(m_notify);
3453 		return;
3454 	}
3455 	control->spec_flags = M_NOTIFICATION;
3456 	control->length = SCTP_BUF_LEN(m_notify);
3457 	/* not that we need this */
3458 	control->tail_mbuf = m_notify;
3459 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3460 	    control,
3461 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3462 }
3463 
3464 void
3465 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3466 {
3467 	struct mbuf *m_notify;
3468 	struct sctp_queued_to_read *control;
3469 	struct sctp_assoc_reset_event *strasoc;
3470 
3471 	if ((stcb == NULL) ||
3472 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3473 		/* event not enabled */
3474 		return;
3475 	}
3476 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3477 	if (m_notify == NULL)
3478 		/* no space left */
3479 		return;
3480 	SCTP_BUF_LEN(m_notify) = 0;
3481 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3482 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3483 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3484 	strasoc->assocreset_flags = flag;
3485 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3486 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3487 	strasoc->assocreset_local_tsn = sending_tsn;
3488 	strasoc->assocreset_remote_tsn = recv_tsn;
3489 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3490 	SCTP_BUF_NEXT(m_notify) = NULL;
3491 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3492 		/* no space */
3493 		sctp_m_freem(m_notify);
3494 		return;
3495 	}
3496 	/* append to socket */
3497 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3498 	    0, 0, stcb->asoc.context, 0, 0, 0,
3499 	    m_notify);
3500 	if (control == NULL) {
3501 		/* no memory */
3502 		sctp_m_freem(m_notify);
3503 		return;
3504 	}
3505 	control->spec_flags = M_NOTIFICATION;
3506 	control->length = SCTP_BUF_LEN(m_notify);
3507 	/* not that we need this */
3508 	control->tail_mbuf = m_notify;
3509 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3510 	    control,
3511 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3512 }
3513 
3514 
3515 
3516 static void
3517 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3518     int number_entries, uint16_t *list, int flag)
3519 {
3520 	struct mbuf *m_notify;
3521 	struct sctp_queued_to_read *control;
3522 	struct sctp_stream_reset_event *strreset;
3523 	int len;
3524 
3525 	if ((stcb == NULL) ||
3526 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3527 		/* event not enabled */
3528 		return;
3529 	}
3530 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3531 	if (m_notify == NULL)
3532 		/* no space left */
3533 		return;
3534 	SCTP_BUF_LEN(m_notify) = 0;
3535 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3536 	if (len > M_TRAILINGSPACE(m_notify)) {
3537 		/* never enough room */
3538 		sctp_m_freem(m_notify);
3539 		return;
3540 	}
3541 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3542 	memset(strreset, 0, len);
3543 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3544 	strreset->strreset_flags = flag;
3545 	strreset->strreset_length = len;
3546 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3547 	if (number_entries) {
3548 		int i;
3549 
3550 		for (i = 0; i < number_entries; i++) {
3551 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3552 		}
3553 	}
3554 	SCTP_BUF_LEN(m_notify) = len;
3555 	SCTP_BUF_NEXT(m_notify) = NULL;
3556 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3557 		/* no space */
3558 		sctp_m_freem(m_notify);
3559 		return;
3560 	}
3561 	/* append to socket */
3562 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3563 	    0, 0, stcb->asoc.context, 0, 0, 0,
3564 	    m_notify);
3565 	if (control == NULL) {
3566 		/* no memory */
3567 		sctp_m_freem(m_notify);
3568 		return;
3569 	}
3570 	control->spec_flags = M_NOTIFICATION;
3571 	control->length = SCTP_BUF_LEN(m_notify);
3572 	/* not that we need this */
3573 	control->tail_mbuf = m_notify;
3574 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3575 	    control,
3576 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3577 }
3578 
3579 
3580 static void
3581 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3582 {
3583 	struct mbuf *m_notify;
3584 	struct sctp_remote_error *sre;
3585 	struct sctp_queued_to_read *control;
3586 	unsigned int notif_len;
3587 	uint16_t chunk_len;
3588 
3589 	if ((stcb == NULL) ||
3590 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3591 		return;
3592 	}
3593 	if (chunk != NULL) {
3594 		chunk_len = ntohs(chunk->ch.chunk_length);
3595 	} else {
3596 		chunk_len = 0;
3597 	}
3598 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3599 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3600 	if (m_notify == NULL) {
3601 		/* Retry with smaller value. */
3602 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3603 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3604 		if (m_notify == NULL) {
3605 			return;
3606 		}
3607 	}
3608 	SCTP_BUF_NEXT(m_notify) = NULL;
3609 	sre = mtod(m_notify, struct sctp_remote_error *);
3610 	memset(sre, 0, notif_len);
3611 	sre->sre_type = SCTP_REMOTE_ERROR;
3612 	sre->sre_flags = 0;
3613 	sre->sre_length = sizeof(struct sctp_remote_error);
3614 	sre->sre_error = error;
3615 	sre->sre_assoc_id = sctp_get_associd(stcb);
3616 	if (notif_len > sizeof(struct sctp_remote_error)) {
3617 		memcpy(sre->sre_data, chunk, chunk_len);
3618 		sre->sre_length += chunk_len;
3619 	}
3620 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3621 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3622 	    0, 0, stcb->asoc.context, 0, 0, 0,
3623 	    m_notify);
3624 	if (control != NULL) {
3625 		control->length = SCTP_BUF_LEN(m_notify);
3626 		/* not that we need this */
3627 		control->tail_mbuf = m_notify;
3628 		control->spec_flags = M_NOTIFICATION;
3629 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3630 		    control,
3631 		    &stcb->sctp_socket->so_rcv, 1,
3632 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3633 	} else {
3634 		sctp_m_freem(m_notify);
3635 	}
3636 }
3637 
3638 
3639 void
3640 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3641     uint32_t error, void *data, int so_locked
3642 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3643     SCTP_UNUSED
3644 #endif
3645 )
3646 {
3647 	if ((stcb == NULL) ||
3648 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3649 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3650 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3651 		/* If the socket is gone we are out of here */
3652 		return;
3653 	}
3654 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3655 		return;
3656 	}
3657 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3658 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3659 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3660 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3661 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3662 			/* Don't report these in front states */
3663 			return;
3664 		}
3665 	}
3666 	switch (notification) {
3667 	case SCTP_NOTIFY_ASSOC_UP:
3668 		if (stcb->asoc.assoc_up_sent == 0) {
3669 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3670 			stcb->asoc.assoc_up_sent = 1;
3671 		}
3672 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3673 			sctp_notify_adaptation_layer(stcb);
3674 		}
3675 		if (stcb->asoc.auth_supported == 0) {
3676 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3677 			    NULL, so_locked);
3678 		}
3679 		break;
3680 	case SCTP_NOTIFY_ASSOC_DOWN:
3681 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3682 		break;
3683 	case SCTP_NOTIFY_INTERFACE_DOWN:
3684 		{
3685 			struct sctp_nets *net;
3686 
3687 			net = (struct sctp_nets *)data;
3688 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3689 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3690 			break;
3691 		}
3692 	case SCTP_NOTIFY_INTERFACE_UP:
3693 		{
3694 			struct sctp_nets *net;
3695 
3696 			net = (struct sctp_nets *)data;
3697 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3698 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3699 			break;
3700 		}
3701 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3702 		{
3703 			struct sctp_nets *net;
3704 
3705 			net = (struct sctp_nets *)data;
3706 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3707 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3708 			break;
3709 		}
3710 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3711 		sctp_notify_send_failed2(stcb, error,
3712 		    (struct sctp_stream_queue_pending *)data, so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_SENT_DG_FAIL:
3715 		sctp_notify_send_failed(stcb, 1, error,
3716 		    (struct sctp_tmit_chunk *)data, so_locked);
3717 		break;
3718 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3719 		sctp_notify_send_failed(stcb, 0, error,
3720 		    (struct sctp_tmit_chunk *)data, so_locked);
3721 		break;
3722 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3723 		{
3724 			uint32_t val;
3725 
3726 			val = *((uint32_t *)data);
3727 
3728 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3729 			break;
3730 		}
3731 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3732 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3733 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3734 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3735 		} else {
3736 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3737 		}
3738 		break;
3739 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3740 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3741 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3742 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3743 		} else {
3744 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3745 		}
3746 		break;
3747 	case SCTP_NOTIFY_ASSOC_RESTART:
3748 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3749 		if (stcb->asoc.auth_supported == 0) {
3750 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3751 			    NULL, so_locked);
3752 		}
3753 		break;
3754 	case SCTP_NOTIFY_STR_RESET_SEND:
3755 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3756 		break;
3757 	case SCTP_NOTIFY_STR_RESET_RECV:
3758 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3759 		break;
3760 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3761 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3762 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3766 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3767 		break;
3768 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3769 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3770 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3771 		break;
3772 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3773 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3774 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3775 		break;
3776 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3777 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3778 		    error, so_locked);
3779 		break;
3780 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3781 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3782 		    error, so_locked);
3783 		break;
3784 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3785 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3786 		    error, so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3789 		sctp_notify_shutdown_event(stcb);
3790 		break;
3791 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3792 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3793 		    (uint16_t)(uintptr_t)data,
3794 		    so_locked);
3795 		break;
3796 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3797 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3798 		    (uint16_t)(uintptr_t)data,
3799 		    so_locked);
3800 		break;
3801 	case SCTP_NOTIFY_NO_PEER_AUTH:
3802 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3803 		    (uint16_t)(uintptr_t)data,
3804 		    so_locked);
3805 		break;
3806 	case SCTP_NOTIFY_SENDER_DRY:
3807 		sctp_notify_sender_dry_event(stcb, so_locked);
3808 		break;
3809 	case SCTP_NOTIFY_REMOTE_ERROR:
3810 		sctp_notify_remote_error(stcb, error, data);
3811 		break;
3812 	default:
3813 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3814 		    __func__, notification, notification);
3815 		break;
3816 	}			/* end switch */
3817 }
3818 
3819 void
3820 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3821 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3822     SCTP_UNUSED
3823 #endif
3824 )
3825 {
3826 	struct sctp_association *asoc;
3827 	struct sctp_stream_out *outs;
3828 	struct sctp_tmit_chunk *chk, *nchk;
3829 	struct sctp_stream_queue_pending *sp, *nsp;
3830 	int i;
3831 
3832 	if (stcb == NULL) {
3833 		return;
3834 	}
3835 	asoc = &stcb->asoc;
3836 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3837 		/* already being freed */
3838 		return;
3839 	}
3840 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3841 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3842 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3843 		return;
3844 	}
3845 	/* now through all the gunk freeing chunks */
3846 	if (holds_lock == 0) {
3847 		SCTP_TCB_SEND_LOCK(stcb);
3848 	}
3849 	/* sent queue SHOULD be empty */
3850 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3851 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3852 		asoc->sent_queue_cnt--;
3853 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3854 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3855 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3856 #ifdef INVARIANTS
3857 			} else {
3858 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3859 #endif
3860 			}
3861 		}
3862 		if (chk->data != NULL) {
3863 			sctp_free_bufspace(stcb, asoc, chk, 1);
3864 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3865 			    error, chk, so_locked);
3866 			if (chk->data) {
3867 				sctp_m_freem(chk->data);
3868 				chk->data = NULL;
3869 			}
3870 		}
3871 		sctp_free_a_chunk(stcb, chk, so_locked);
3872 		/* sa_ignore FREED_MEMORY */
3873 	}
3874 	/* pending send queue SHOULD be empty */
3875 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3876 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3877 		asoc->send_queue_cnt--;
3878 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3879 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3880 #ifdef INVARIANTS
3881 		} else {
3882 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3883 #endif
3884 		}
3885 		if (chk->data != NULL) {
3886 			sctp_free_bufspace(stcb, asoc, chk, 1);
3887 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3888 			    error, chk, so_locked);
3889 			if (chk->data) {
3890 				sctp_m_freem(chk->data);
3891 				chk->data = NULL;
3892 			}
3893 		}
3894 		sctp_free_a_chunk(stcb, chk, so_locked);
3895 		/* sa_ignore FREED_MEMORY */
3896 	}
3897 	for (i = 0; i < asoc->streamoutcnt; i++) {
3898 		/* For each stream */
3899 		outs = &asoc->strmout[i];
3900 		/* clean up any sends there */
3901 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3902 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3903 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3904 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3905 			sctp_free_spbufspace(stcb, asoc, sp);
3906 			if (sp->data) {
3907 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3908 				    error, (void *)sp, so_locked);
3909 				if (sp->data) {
3910 					sctp_m_freem(sp->data);
3911 					sp->data = NULL;
3912 					sp->tail_mbuf = NULL;
3913 					sp->length = 0;
3914 				}
3915 			}
3916 			if (sp->net) {
3917 				sctp_free_remote_addr(sp->net);
3918 				sp->net = NULL;
3919 			}
3920 			/* Free the chunk */
3921 			sctp_free_a_strmoq(stcb, sp, so_locked);
3922 			/* sa_ignore FREED_MEMORY */
3923 		}
3924 	}
3925 
3926 	if (holds_lock == 0) {
3927 		SCTP_TCB_SEND_UNLOCK(stcb);
3928 	}
3929 }
3930 
3931 void
3932 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3933     struct sctp_abort_chunk *abort, int so_locked
3934 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3935     SCTP_UNUSED
3936 #endif
3937 )
3938 {
3939 	if (stcb == NULL) {
3940 		return;
3941 	}
3942 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3943 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3944 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3945 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3946 	}
3947 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3948 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3949 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3950 		return;
3951 	}
3952 	/* Tell them we lost the asoc */
3953 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3954 	if (from_peer) {
3955 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3956 	} else {
3957 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3958 	}
3959 }
3960 
3961 void
3962 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3963     struct mbuf *m, int iphlen,
3964     struct sockaddr *src, struct sockaddr *dst,
3965     struct sctphdr *sh, struct mbuf *op_err,
3966     uint8_t mflowtype, uint32_t mflowid,
3967     uint32_t vrf_id, uint16_t port)
3968 {
3969 	uint32_t vtag;
3970 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3971 	struct socket *so;
3972 #endif
3973 
3974 	vtag = 0;
3975 	if (stcb != NULL) {
3976 		vtag = stcb->asoc.peer_vtag;
3977 		vrf_id = stcb->asoc.vrf_id;
3978 	}
3979 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3980 	    mflowtype, mflowid, inp->fibnum,
3981 	    vrf_id, port);
3982 	if (stcb != NULL) {
3983 		/* We have a TCB to abort, send notification too */
3984 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3985 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3986 		/* Ok, now lets free it */
3987 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3988 		so = SCTP_INP_SO(inp);
3989 		atomic_add_int(&stcb->asoc.refcnt, 1);
3990 		SCTP_TCB_UNLOCK(stcb);
3991 		SCTP_SOCKET_LOCK(so, 1);
3992 		SCTP_TCB_LOCK(stcb);
3993 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3994 #endif
3995 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3996 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3997 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3998 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3999 		}
4000 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4001 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4002 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4003 		SCTP_SOCKET_UNLOCK(so, 1);
4004 #endif
4005 	}
4006 }
4007 #ifdef SCTP_ASOCLOG_OF_TSNS
4008 void
4009 sctp_print_out_track_log(struct sctp_tcb *stcb)
4010 {
4011 #ifdef NOSIY_PRINTS
4012 	int i;
4013 
4014 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4015 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4016 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4017 		SCTP_PRINTF("None rcvd\n");
4018 		goto none_in;
4019 	}
4020 	if (stcb->asoc.tsn_in_wrapped) {
4021 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4022 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4023 			    stcb->asoc.in_tsnlog[i].tsn,
4024 			    stcb->asoc.in_tsnlog[i].strm,
4025 			    stcb->asoc.in_tsnlog[i].seq,
4026 			    stcb->asoc.in_tsnlog[i].flgs,
4027 			    stcb->asoc.in_tsnlog[i].sz);
4028 		}
4029 	}
4030 	if (stcb->asoc.tsn_in_at) {
4031 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4032 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4033 			    stcb->asoc.in_tsnlog[i].tsn,
4034 			    stcb->asoc.in_tsnlog[i].strm,
4035 			    stcb->asoc.in_tsnlog[i].seq,
4036 			    stcb->asoc.in_tsnlog[i].flgs,
4037 			    stcb->asoc.in_tsnlog[i].sz);
4038 		}
4039 	}
4040 none_in:
4041 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4042 	if ((stcb->asoc.tsn_out_at == 0) &&
4043 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4044 		SCTP_PRINTF("None sent\n");
4045 	}
4046 	if (stcb->asoc.tsn_out_wrapped) {
4047 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4048 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4049 			    stcb->asoc.out_tsnlog[i].tsn,
4050 			    stcb->asoc.out_tsnlog[i].strm,
4051 			    stcb->asoc.out_tsnlog[i].seq,
4052 			    stcb->asoc.out_tsnlog[i].flgs,
4053 			    stcb->asoc.out_tsnlog[i].sz);
4054 		}
4055 	}
4056 	if (stcb->asoc.tsn_out_at) {
4057 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4058 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4059 			    stcb->asoc.out_tsnlog[i].tsn,
4060 			    stcb->asoc.out_tsnlog[i].strm,
4061 			    stcb->asoc.out_tsnlog[i].seq,
4062 			    stcb->asoc.out_tsnlog[i].flgs,
4063 			    stcb->asoc.out_tsnlog[i].sz);
4064 		}
4065 	}
4066 #endif
4067 }
4068 #endif
4069 
4070 void
4071 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4072     struct mbuf *op_err,
4073     int so_locked
4074 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4075     SCTP_UNUSED
4076 #endif
4077 )
4078 {
4079 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4080 	struct socket *so;
4081 #endif
4082 
4083 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4084 	so = SCTP_INP_SO(inp);
4085 #endif
4086 	if (stcb == NULL) {
4087 		/* Got to have a TCB */
4088 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4089 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4090 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4091 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4092 			}
4093 		}
4094 		return;
4095 	} else {
4096 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4097 	}
4098 	/* notify the peer */
4099 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4100 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4101 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4102 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4103 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4104 	}
4105 	/* notify the ulp */
4106 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4107 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4108 	}
4109 	/* now free the asoc */
4110 #ifdef SCTP_ASOCLOG_OF_TSNS
4111 	sctp_print_out_track_log(stcb);
4112 #endif
4113 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4114 	if (!so_locked) {
4115 		atomic_add_int(&stcb->asoc.refcnt, 1);
4116 		SCTP_TCB_UNLOCK(stcb);
4117 		SCTP_SOCKET_LOCK(so, 1);
4118 		SCTP_TCB_LOCK(stcb);
4119 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4120 	}
4121 #endif
4122 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4123 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4124 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4125 	if (!so_locked) {
4126 		SCTP_SOCKET_UNLOCK(so, 1);
4127 	}
4128 #endif
4129 }
4130 
4131 void
4132 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4133     struct sockaddr *src, struct sockaddr *dst,
4134     struct sctphdr *sh, struct sctp_inpcb *inp,
4135     struct mbuf *cause,
4136     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4137     uint32_t vrf_id, uint16_t port)
4138 {
4139 	struct sctp_chunkhdr *ch, chunk_buf;
4140 	unsigned int chk_length;
4141 	int contains_init_chunk;
4142 
4143 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4144 	/* Generate a TO address for future reference */
4145 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4146 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4147 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4148 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4149 		}
4150 	}
4151 	contains_init_chunk = 0;
4152 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4153 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4154 	while (ch != NULL) {
4155 		chk_length = ntohs(ch->chunk_length);
4156 		if (chk_length < sizeof(*ch)) {
4157 			/* break to abort land */
4158 			break;
4159 		}
4160 		switch (ch->chunk_type) {
4161 		case SCTP_INIT:
4162 			contains_init_chunk = 1;
4163 			break;
4164 		case SCTP_PACKET_DROPPED:
4165 			/* we don't respond to pkt-dropped */
4166 			return;
4167 		case SCTP_ABORT_ASSOCIATION:
4168 			/* we don't respond with an ABORT to an ABORT */
4169 			return;
4170 		case SCTP_SHUTDOWN_COMPLETE:
4171 			/*
4172 			 * we ignore it since we are not waiting for it and
4173 			 * peer is gone
4174 			 */
4175 			return;
4176 		case SCTP_SHUTDOWN_ACK:
4177 			sctp_send_shutdown_complete2(src, dst, sh,
4178 			    mflowtype, mflowid, fibnum,
4179 			    vrf_id, port);
4180 			return;
4181 		default:
4182 			break;
4183 		}
4184 		offset += SCTP_SIZE32(chk_length);
4185 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4186 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4187 	}
4188 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4189 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4190 	    (contains_init_chunk == 0))) {
4191 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4192 		    mflowtype, mflowid, fibnum,
4193 		    vrf_id, port);
4194 	}
4195 }
4196 
4197 /*
4198  * check the inbound datagram to make sure there is not an abort inside it,
4199  * if there is return 1, else return 0.
4200  */
4201 int
4202 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4203 {
4204 	struct sctp_chunkhdr *ch;
4205 	struct sctp_init_chunk *init_chk, chunk_buf;
4206 	int offset;
4207 	unsigned int chk_length;
4208 
4209 	offset = iphlen + sizeof(struct sctphdr);
4210 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4211 	    (uint8_t *)&chunk_buf);
4212 	while (ch != NULL) {
4213 		chk_length = ntohs(ch->chunk_length);
4214 		if (chk_length < sizeof(*ch)) {
4215 			/* packet is probably corrupt */
4216 			break;
4217 		}
4218 		/* we seem to be ok, is it an abort? */
4219 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4220 			/* yep, tell them */
4221 			return (1);
4222 		}
4223 		if (ch->chunk_type == SCTP_INITIATION) {
4224 			/* need to update the Vtag */
4225 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4226 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4227 			if (init_chk != NULL) {
4228 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4229 			}
4230 		}
4231 		/* Nope, move to the next chunk */
4232 		offset += SCTP_SIZE32(chk_length);
4233 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4234 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4235 	}
4236 	return (0);
4237 }
4238 
4239 /*
4240  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4241  * set (i.e. it's 0) so, create this function to compare link local scopes
4242  */
4243 #ifdef INET6
4244 uint32_t
4245 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4246 {
4247 	struct sockaddr_in6 a, b;
4248 
4249 	/* save copies */
4250 	a = *addr1;
4251 	b = *addr2;
4252 
4253 	if (a.sin6_scope_id == 0)
4254 		if (sa6_recoverscope(&a)) {
4255 			/* can't get scope, so can't match */
4256 			return (0);
4257 		}
4258 	if (b.sin6_scope_id == 0)
4259 		if (sa6_recoverscope(&b)) {
4260 			/* can't get scope, so can't match */
4261 			return (0);
4262 		}
4263 	if (a.sin6_scope_id != b.sin6_scope_id)
4264 		return (0);
4265 
4266 	return (1);
4267 }
4268 
4269 /*
4270  * returns a sockaddr_in6 with embedded scope recovered and removed
4271  */
4272 struct sockaddr_in6 *
4273 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4274 {
4275 	/* check and strip embedded scope junk */
4276 	if (addr->sin6_family == AF_INET6) {
4277 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4278 			if (addr->sin6_scope_id == 0) {
4279 				*store = *addr;
4280 				if (!sa6_recoverscope(store)) {
4281 					/* use the recovered scope */
4282 					addr = store;
4283 				}
4284 			} else {
4285 				/* else, return the original "to" addr */
4286 				in6_clearscope(&addr->sin6_addr);
4287 			}
4288 		}
4289 	}
4290 	return (addr);
4291 }
4292 #endif
4293 
4294 /*
4295  * are the two addresses the same?  currently a "scopeless" check returns: 1
4296  * if same, 0 if not
4297  */
4298 int
4299 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4300 {
4301 
4302 	/* must be valid */
4303 	if (sa1 == NULL || sa2 == NULL)
4304 		return (0);
4305 
4306 	/* must be the same family */
4307 	if (sa1->sa_family != sa2->sa_family)
4308 		return (0);
4309 
4310 	switch (sa1->sa_family) {
4311 #ifdef INET6
4312 	case AF_INET6:
4313 		{
4314 			/* IPv6 addresses */
4315 			struct sockaddr_in6 *sin6_1, *sin6_2;
4316 
4317 			sin6_1 = (struct sockaddr_in6 *)sa1;
4318 			sin6_2 = (struct sockaddr_in6 *)sa2;
4319 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4320 			    sin6_2));
4321 		}
4322 #endif
4323 #ifdef INET
4324 	case AF_INET:
4325 		{
4326 			/* IPv4 addresses */
4327 			struct sockaddr_in *sin_1, *sin_2;
4328 
4329 			sin_1 = (struct sockaddr_in *)sa1;
4330 			sin_2 = (struct sockaddr_in *)sa2;
4331 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4332 		}
4333 #endif
4334 	default:
4335 		/* we don't do these... */
4336 		return (0);
4337 	}
4338 }
4339 
4340 void
4341 sctp_print_address(struct sockaddr *sa)
4342 {
4343 #ifdef INET6
4344 	char ip6buf[INET6_ADDRSTRLEN];
4345 #endif
4346 
4347 	switch (sa->sa_family) {
4348 #ifdef INET6
4349 	case AF_INET6:
4350 		{
4351 			struct sockaddr_in6 *sin6;
4352 
4353 			sin6 = (struct sockaddr_in6 *)sa;
4354 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4355 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4356 			    ntohs(sin6->sin6_port),
4357 			    sin6->sin6_scope_id);
4358 			break;
4359 		}
4360 #endif
4361 #ifdef INET
4362 	case AF_INET:
4363 		{
4364 			struct sockaddr_in *sin;
4365 			unsigned char *p;
4366 
4367 			sin = (struct sockaddr_in *)sa;
4368 			p = (unsigned char *)&sin->sin_addr;
4369 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4370 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4371 			break;
4372 		}
4373 #endif
4374 	default:
4375 		SCTP_PRINTF("?\n");
4376 		break;
4377 	}
4378 }
4379 
4380 void
4381 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4382     struct sctp_inpcb *new_inp,
4383     struct sctp_tcb *stcb,
4384     int waitflags)
4385 {
4386 	/*
4387 	 * go through our old INP and pull off any control structures that
4388 	 * belong to stcb and move then to the new inp.
4389 	 */
4390 	struct socket *old_so, *new_so;
4391 	struct sctp_queued_to_read *control, *nctl;
4392 	struct sctp_readhead tmp_queue;
4393 	struct mbuf *m;
4394 	int error = 0;
4395 
4396 	old_so = old_inp->sctp_socket;
4397 	new_so = new_inp->sctp_socket;
4398 	TAILQ_INIT(&tmp_queue);
4399 	error = sblock(&old_so->so_rcv, waitflags);
4400 	if (error) {
4401 		/*
4402 		 * Gak, can't get sblock, we have a problem. data will be
4403 		 * left stranded.. and we don't dare look at it since the
4404 		 * other thread may be reading something. Oh well, its a
4405 		 * screwed up app that does a peeloff OR a accept while
4406 		 * reading from the main socket... actually its only the
4407 		 * peeloff() case, since I think read will fail on a
4408 		 * listening socket..
4409 		 */
4410 		return;
4411 	}
4412 	/* lock the socket buffers */
4413 	SCTP_INP_READ_LOCK(old_inp);
4414 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4415 		/* Pull off all for out target stcb */
4416 		if (control->stcb == stcb) {
4417 			/* remove it we want it */
4418 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4419 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4420 			m = control->data;
4421 			while (m) {
4422 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4423 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4424 				}
4425 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4426 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4427 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4428 				}
4429 				m = SCTP_BUF_NEXT(m);
4430 			}
4431 		}
4432 	}
4433 	SCTP_INP_READ_UNLOCK(old_inp);
4434 	/* Remove the sb-lock on the old socket */
4435 
4436 	sbunlock(&old_so->so_rcv);
4437 	/* Now we move them over to the new socket buffer */
4438 	SCTP_INP_READ_LOCK(new_inp);
4439 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4440 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4441 		m = control->data;
4442 		while (m) {
4443 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4444 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4445 			}
4446 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4448 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4449 			}
4450 			m = SCTP_BUF_NEXT(m);
4451 		}
4452 	}
4453 	SCTP_INP_READ_UNLOCK(new_inp);
4454 }
4455 
4456 void
4457 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4458     struct sctp_tcb *stcb,
4459     int so_locked
4460 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4461     SCTP_UNUSED
4462 #endif
4463 )
4464 {
4465 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4466 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4467 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4468 		} else {
4469 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4470 			struct socket *so;
4471 
4472 			so = SCTP_INP_SO(inp);
4473 			if (!so_locked) {
4474 				if (stcb) {
4475 					atomic_add_int(&stcb->asoc.refcnt, 1);
4476 					SCTP_TCB_UNLOCK(stcb);
4477 				}
4478 				SCTP_SOCKET_LOCK(so, 1);
4479 				if (stcb) {
4480 					SCTP_TCB_LOCK(stcb);
4481 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4482 				}
4483 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4484 					SCTP_SOCKET_UNLOCK(so, 1);
4485 					return;
4486 				}
4487 			}
4488 #endif
4489 			sctp_sorwakeup(inp, inp->sctp_socket);
4490 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4491 			if (!so_locked) {
4492 				SCTP_SOCKET_UNLOCK(so, 1);
4493 			}
4494 #endif
4495 		}
4496 	}
4497 }
4498 
4499 void
4500 sctp_add_to_readq(struct sctp_inpcb *inp,
4501     struct sctp_tcb *stcb,
4502     struct sctp_queued_to_read *control,
4503     struct sockbuf *sb,
4504     int end,
4505     int inp_read_lock_held,
4506     int so_locked
4507 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4508     SCTP_UNUSED
4509 #endif
4510 )
4511 {
4512 	/*
4513 	 * Here we must place the control on the end of the socket read
4514 	 * queue AND increment sb_cc so that select will work properly on
4515 	 * read.
4516 	 */
4517 	struct mbuf *m, *prev = NULL;
4518 
4519 	if (inp == NULL) {
4520 		/* Gak, TSNH!! */
4521 #ifdef INVARIANTS
4522 		panic("Gak, inp NULL on add_to_readq");
4523 #endif
4524 		return;
4525 	}
4526 	if (inp_read_lock_held == 0)
4527 		SCTP_INP_READ_LOCK(inp);
4528 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4529 		sctp_free_remote_addr(control->whoFrom);
4530 		if (control->data) {
4531 			sctp_m_freem(control->data);
4532 			control->data = NULL;
4533 		}
4534 		sctp_free_a_readq(stcb, control);
4535 		if (inp_read_lock_held == 0)
4536 			SCTP_INP_READ_UNLOCK(inp);
4537 		return;
4538 	}
4539 	if (!(control->spec_flags & M_NOTIFICATION)) {
4540 		atomic_add_int(&inp->total_recvs, 1);
4541 		if (!control->do_not_ref_stcb) {
4542 			atomic_add_int(&stcb->total_recvs, 1);
4543 		}
4544 	}
4545 	m = control->data;
4546 	control->held_length = 0;
4547 	control->length = 0;
4548 	while (m) {
4549 		if (SCTP_BUF_LEN(m) == 0) {
4550 			/* Skip mbufs with NO length */
4551 			if (prev == NULL) {
4552 				/* First one */
4553 				control->data = sctp_m_free(m);
4554 				m = control->data;
4555 			} else {
4556 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4557 				m = SCTP_BUF_NEXT(prev);
4558 			}
4559 			if (m == NULL) {
4560 				control->tail_mbuf = prev;
4561 			}
4562 			continue;
4563 		}
4564 		prev = m;
4565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4566 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4567 		}
4568 		sctp_sballoc(stcb, sb, m);
4569 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4570 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4571 		}
4572 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4573 		m = SCTP_BUF_NEXT(m);
4574 	}
4575 	if (prev != NULL) {
4576 		control->tail_mbuf = prev;
4577 	} else {
4578 		/* Everything got collapsed out?? */
4579 		sctp_free_remote_addr(control->whoFrom);
4580 		sctp_free_a_readq(stcb, control);
4581 		if (inp_read_lock_held == 0)
4582 			SCTP_INP_READ_UNLOCK(inp);
4583 		return;
4584 	}
4585 	if (end) {
4586 		control->end_added = 1;
4587 	}
4588 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4589 	control->on_read_q = 1;
4590 	if (inp_read_lock_held == 0)
4591 		SCTP_INP_READ_UNLOCK(inp);
4592 	if (inp && inp->sctp_socket) {
4593 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4594 	}
4595 }
4596 
4597 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4598  *************ALTERNATE ROUTING CODE
4599  */
4600 
4601 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4602  *************ALTERNATE ROUTING CODE
4603  */
4604 
4605 struct mbuf *
4606 sctp_generate_cause(uint16_t code, char *info)
4607 {
4608 	struct mbuf *m;
4609 	struct sctp_gen_error_cause *cause;
4610 	size_t info_len;
4611 	uint16_t len;
4612 
4613 	if ((code == 0) || (info == NULL)) {
4614 		return (NULL);
4615 	}
4616 	info_len = strlen(info);
4617 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4618 		return (NULL);
4619 	}
4620 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4621 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4622 	if (m != NULL) {
4623 		SCTP_BUF_LEN(m) = len;
4624 		cause = mtod(m, struct sctp_gen_error_cause *);
4625 		cause->code = htons(code);
4626 		cause->length = htons(len);
4627 		memcpy(cause->info, info, info_len);
4628 	}
4629 	return (m);
4630 }
4631 
4632 struct mbuf *
4633 sctp_generate_no_user_data_cause(uint32_t tsn)
4634 {
4635 	struct mbuf *m;
4636 	struct sctp_error_no_user_data *no_user_data_cause;
4637 	uint16_t len;
4638 
4639 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4640 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4641 	if (m != NULL) {
4642 		SCTP_BUF_LEN(m) = len;
4643 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4644 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4645 		no_user_data_cause->cause.length = htons(len);
4646 		no_user_data_cause->tsn = htonl(tsn);
4647 	}
4648 	return (m);
4649 }
4650 
4651 #ifdef SCTP_MBCNT_LOGGING
4652 void
4653 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4654     struct sctp_tmit_chunk *tp1, int chk_cnt)
4655 {
4656 	if (tp1->data == NULL) {
4657 		return;
4658 	}
4659 	asoc->chunks_on_out_queue -= chk_cnt;
4660 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4661 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4662 		    asoc->total_output_queue_size,
4663 		    tp1->book_size,
4664 		    0,
4665 		    tp1->mbcnt);
4666 	}
4667 	if (asoc->total_output_queue_size >= tp1->book_size) {
4668 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4669 	} else {
4670 		asoc->total_output_queue_size = 0;
4671 	}
4672 
4673 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4674 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4675 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4676 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4677 		} else {
4678 			stcb->sctp_socket->so_snd.sb_cc = 0;
4679 
4680 		}
4681 	}
4682 }
4683 
4684 #endif
4685 
4686 int
4687 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4688     uint8_t sent, int so_locked
4689 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4690     SCTP_UNUSED
4691 #endif
4692 )
4693 {
4694 	struct sctp_stream_out *strq;
4695 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4696 	struct sctp_stream_queue_pending *sp;
4697 	uint32_t mid;
4698 	uint16_t sid;
4699 	uint8_t foundeom = 0;
4700 	int ret_sz = 0;
4701 	int notdone;
4702 	int do_wakeup_routine = 0;
4703 
4704 	sid = tp1->rec.data.sid;
4705 	mid = tp1->rec.data.mid;
4706 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4707 		stcb->asoc.abandoned_sent[0]++;
4708 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4709 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4710 #if defined(SCTP_DETAILED_STR_STATS)
4711 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4712 #endif
4713 	} else {
4714 		stcb->asoc.abandoned_unsent[0]++;
4715 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4716 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4717 #if defined(SCTP_DETAILED_STR_STATS)
4718 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4719 #endif
4720 	}
4721 	do {
4722 		ret_sz += tp1->book_size;
4723 		if (tp1->data != NULL) {
4724 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4725 				sctp_flight_size_decrease(tp1);
4726 				sctp_total_flight_decrease(stcb, tp1);
4727 			}
4728 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4729 			stcb->asoc.peers_rwnd += tp1->send_size;
4730 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4731 			if (sent) {
4732 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4733 			} else {
4734 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4735 			}
4736 			if (tp1->data) {
4737 				sctp_m_freem(tp1->data);
4738 				tp1->data = NULL;
4739 			}
4740 			do_wakeup_routine = 1;
4741 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4742 				stcb->asoc.sent_queue_cnt_removeable--;
4743 			}
4744 		}
4745 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4746 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4747 		    SCTP_DATA_NOT_FRAG) {
4748 			/* not frag'ed we ae done   */
4749 			notdone = 0;
4750 			foundeom = 1;
4751 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4752 			/* end of frag, we are done */
4753 			notdone = 0;
4754 			foundeom = 1;
4755 		} else {
4756 			/*
4757 			 * Its a begin or middle piece, we must mark all of
4758 			 * it
4759 			 */
4760 			notdone = 1;
4761 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4762 		}
4763 	} while (tp1 && notdone);
4764 	if (foundeom == 0) {
4765 		/*
4766 		 * The multi-part message was scattered across the send and
4767 		 * sent queue.
4768 		 */
4769 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4770 			if ((tp1->rec.data.sid != sid) ||
4771 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4772 				break;
4773 			}
4774 			/*
4775 			 * save to chk in case we have some on stream out
4776 			 * queue. If so and we have an un-transmitted one we
4777 			 * don't have to fudge the TSN.
4778 			 */
4779 			chk = tp1;
4780 			ret_sz += tp1->book_size;
4781 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4782 			if (sent) {
4783 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4784 			} else {
4785 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4786 			}
4787 			if (tp1->data) {
4788 				sctp_m_freem(tp1->data);
4789 				tp1->data = NULL;
4790 			}
4791 			/* No flight involved here book the size to 0 */
4792 			tp1->book_size = 0;
4793 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4794 				foundeom = 1;
4795 			}
4796 			do_wakeup_routine = 1;
4797 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4798 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4799 			/*
4800 			 * on to the sent queue so we can wait for it to be
4801 			 * passed by.
4802 			 */
4803 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4804 			    sctp_next);
4805 			stcb->asoc.send_queue_cnt--;
4806 			stcb->asoc.sent_queue_cnt++;
4807 		}
4808 	}
4809 	if (foundeom == 0) {
4810 		/*
4811 		 * Still no eom found. That means there is stuff left on the
4812 		 * stream out queue.. yuck.
4813 		 */
4814 		SCTP_TCB_SEND_LOCK(stcb);
4815 		strq = &stcb->asoc.strmout[sid];
4816 		sp = TAILQ_FIRST(&strq->outqueue);
4817 		if (sp != NULL) {
4818 			sp->discard_rest = 1;
4819 			/*
4820 			 * We may need to put a chunk on the queue that
4821 			 * holds the TSN that would have been sent with the
4822 			 * LAST bit.
4823 			 */
4824 			if (chk == NULL) {
4825 				/* Yep, we have to */
4826 				sctp_alloc_a_chunk(stcb, chk);
4827 				if (chk == NULL) {
4828 					/*
4829 					 * we are hosed. All we can do is
4830 					 * nothing.. which will cause an
4831 					 * abort if the peer is paying
4832 					 * attention.
4833 					 */
4834 					goto oh_well;
4835 				}
4836 				memset(chk, 0, sizeof(*chk));
4837 				chk->rec.data.rcv_flags = 0;
4838 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4839 				chk->asoc = &stcb->asoc;
4840 				if (stcb->asoc.idata_supported == 0) {
4841 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4842 						chk->rec.data.mid = 0;
4843 					} else {
4844 						chk->rec.data.mid = strq->next_mid_ordered;
4845 					}
4846 				} else {
4847 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4848 						chk->rec.data.mid = strq->next_mid_unordered;
4849 					} else {
4850 						chk->rec.data.mid = strq->next_mid_ordered;
4851 					}
4852 				}
4853 				chk->rec.data.sid = sp->sid;
4854 				chk->rec.data.ppid = sp->ppid;
4855 				chk->rec.data.context = sp->context;
4856 				chk->flags = sp->act_flags;
4857 				chk->whoTo = NULL;
4858 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4859 				strq->chunks_on_queues++;
4860 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4861 				stcb->asoc.sent_queue_cnt++;
4862 				stcb->asoc.pr_sctp_cnt++;
4863 			}
4864 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4865 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4866 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4867 			}
4868 			if (stcb->asoc.idata_supported == 0) {
4869 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4870 					strq->next_mid_ordered++;
4871 				}
4872 			} else {
4873 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4874 					strq->next_mid_unordered++;
4875 				} else {
4876 					strq->next_mid_ordered++;
4877 				}
4878 			}
4879 	oh_well:
4880 			if (sp->data) {
4881 				/*
4882 				 * Pull any data to free up the SB and allow
4883 				 * sender to "add more" while we will throw
4884 				 * away :-)
4885 				 */
4886 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4887 				ret_sz += sp->length;
4888 				do_wakeup_routine = 1;
4889 				sp->some_taken = 1;
4890 				sctp_m_freem(sp->data);
4891 				sp->data = NULL;
4892 				sp->tail_mbuf = NULL;
4893 				sp->length = 0;
4894 			}
4895 		}
4896 		SCTP_TCB_SEND_UNLOCK(stcb);
4897 	}
4898 	if (do_wakeup_routine) {
4899 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4900 		struct socket *so;
4901 
4902 		so = SCTP_INP_SO(stcb->sctp_ep);
4903 		if (!so_locked) {
4904 			atomic_add_int(&stcb->asoc.refcnt, 1);
4905 			SCTP_TCB_UNLOCK(stcb);
4906 			SCTP_SOCKET_LOCK(so, 1);
4907 			SCTP_TCB_LOCK(stcb);
4908 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4909 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4910 				/* assoc was freed while we were unlocked */
4911 				SCTP_SOCKET_UNLOCK(so, 1);
4912 				return (ret_sz);
4913 			}
4914 		}
4915 #endif
4916 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4917 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4918 		if (!so_locked) {
4919 			SCTP_SOCKET_UNLOCK(so, 1);
4920 		}
4921 #endif
4922 	}
4923 	return (ret_sz);
4924 }
4925 
4926 /*
4927  * checks to see if the given address, sa, is one that is currently known by
4928  * the kernel note: can't distinguish the same address on multiple interfaces
4929  * and doesn't handle multiple addresses with different zone/scope id's note:
4930  * ifa_ifwithaddr() compares the entire sockaddr struct
4931  */
4932 struct sctp_ifa *
4933 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4934     int holds_lock)
4935 {
4936 	struct sctp_laddr *laddr;
4937 
4938 	if (holds_lock == 0) {
4939 		SCTP_INP_RLOCK(inp);
4940 	}
4941 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4942 		if (laddr->ifa == NULL)
4943 			continue;
4944 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4945 			continue;
4946 #ifdef INET
4947 		if (addr->sa_family == AF_INET) {
4948 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4949 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4950 				/* found him. */
4951 				if (holds_lock == 0) {
4952 					SCTP_INP_RUNLOCK(inp);
4953 				}
4954 				return (laddr->ifa);
4955 				break;
4956 			}
4957 		}
4958 #endif
4959 #ifdef INET6
4960 		if (addr->sa_family == AF_INET6) {
4961 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4962 			    &laddr->ifa->address.sin6)) {
4963 				/* found him. */
4964 				if (holds_lock == 0) {
4965 					SCTP_INP_RUNLOCK(inp);
4966 				}
4967 				return (laddr->ifa);
4968 				break;
4969 			}
4970 		}
4971 #endif
4972 	}
4973 	if (holds_lock == 0) {
4974 		SCTP_INP_RUNLOCK(inp);
4975 	}
4976 	return (NULL);
4977 }
4978 
4979 uint32_t
4980 sctp_get_ifa_hash_val(struct sockaddr *addr)
4981 {
4982 	switch (addr->sa_family) {
4983 #ifdef INET
4984 	case AF_INET:
4985 		{
4986 			struct sockaddr_in *sin;
4987 
4988 			sin = (struct sockaddr_in *)addr;
4989 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4990 		}
4991 #endif
4992 #ifdef INET6
4993 	case AF_INET6:
4994 		{
4995 			struct sockaddr_in6 *sin6;
4996 			uint32_t hash_of_addr;
4997 
4998 			sin6 = (struct sockaddr_in6 *)addr;
4999 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5000 			    sin6->sin6_addr.s6_addr32[1] +
5001 			    sin6->sin6_addr.s6_addr32[2] +
5002 			    sin6->sin6_addr.s6_addr32[3]);
5003 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5004 			return (hash_of_addr);
5005 		}
5006 #endif
5007 	default:
5008 		break;
5009 	}
5010 	return (0);
5011 }
5012 
5013 struct sctp_ifa *
5014 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5015 {
5016 	struct sctp_ifa *sctp_ifap;
5017 	struct sctp_vrf *vrf;
5018 	struct sctp_ifalist *hash_head;
5019 	uint32_t hash_of_addr;
5020 
5021 	if (holds_lock == 0)
5022 		SCTP_IPI_ADDR_RLOCK();
5023 
5024 	vrf = sctp_find_vrf(vrf_id);
5025 	if (vrf == NULL) {
5026 		if (holds_lock == 0)
5027 			SCTP_IPI_ADDR_RUNLOCK();
5028 		return (NULL);
5029 	}
5030 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5031 
5032 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5033 	if (hash_head == NULL) {
5034 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5035 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5036 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5037 		sctp_print_address(addr);
5038 		SCTP_PRINTF("No such bucket for address\n");
5039 		if (holds_lock == 0)
5040 			SCTP_IPI_ADDR_RUNLOCK();
5041 
5042 		return (NULL);
5043 	}
5044 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5045 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5046 			continue;
5047 #ifdef INET
5048 		if (addr->sa_family == AF_INET) {
5049 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5050 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5051 				/* found him. */
5052 				if (holds_lock == 0)
5053 					SCTP_IPI_ADDR_RUNLOCK();
5054 				return (sctp_ifap);
5055 				break;
5056 			}
5057 		}
5058 #endif
5059 #ifdef INET6
5060 		if (addr->sa_family == AF_INET6) {
5061 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5062 			    &sctp_ifap->address.sin6)) {
5063 				/* found him. */
5064 				if (holds_lock == 0)
5065 					SCTP_IPI_ADDR_RUNLOCK();
5066 				return (sctp_ifap);
5067 				break;
5068 			}
5069 		}
5070 #endif
5071 	}
5072 	if (holds_lock == 0)
5073 		SCTP_IPI_ADDR_RUNLOCK();
5074 	return (NULL);
5075 }
5076 
5077 static void
5078 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5079     uint32_t rwnd_req)
5080 {
5081 	/* User pulled some data, do we need a rwnd update? */
5082 	int r_unlocked = 0;
5083 	uint32_t dif, rwnd;
5084 	struct socket *so = NULL;
5085 
5086 	if (stcb == NULL)
5087 		return;
5088 
5089 	atomic_add_int(&stcb->asoc.refcnt, 1);
5090 
5091 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5092 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5093 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5094 		/* Pre-check If we are freeing no update */
5095 		goto no_lock;
5096 	}
5097 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5098 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5099 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5100 		goto out;
5101 	}
5102 	so = stcb->sctp_socket;
5103 	if (so == NULL) {
5104 		goto out;
5105 	}
5106 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5107 	/* Have you have freed enough to look */
5108 	*freed_so_far = 0;
5109 	/* Yep, its worth a look and the lock overhead */
5110 
5111 	/* Figure out what the rwnd would be */
5112 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5113 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5114 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5115 	} else {
5116 		dif = 0;
5117 	}
5118 	if (dif >= rwnd_req) {
5119 		if (hold_rlock) {
5120 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5121 			r_unlocked = 1;
5122 		}
5123 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5124 			/*
5125 			 * One last check before we allow the guy possibly
5126 			 * to get in. There is a race, where the guy has not
5127 			 * reached the gate. In that case
5128 			 */
5129 			goto out;
5130 		}
5131 		SCTP_TCB_LOCK(stcb);
5132 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5133 			/* No reports here */
5134 			SCTP_TCB_UNLOCK(stcb);
5135 			goto out;
5136 		}
5137 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5138 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5139 
5140 		sctp_chunk_output(stcb->sctp_ep, stcb,
5141 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5142 		/* make sure no timer is running */
5143 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5144 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5145 		SCTP_TCB_UNLOCK(stcb);
5146 	} else {
5147 		/* Update how much we have pending */
5148 		stcb->freed_by_sorcv_sincelast = dif;
5149 	}
5150 out:
5151 	if (so && r_unlocked && hold_rlock) {
5152 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5153 	}
5154 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5155 no_lock:
5156 	atomic_add_int(&stcb->asoc.refcnt, -1);
5157 	return;
5158 }
5159 
5160 int
5161 sctp_sorecvmsg(struct socket *so,
5162     struct uio *uio,
5163     struct mbuf **mp,
5164     struct sockaddr *from,
5165     int fromlen,
5166     int *msg_flags,
5167     struct sctp_sndrcvinfo *sinfo,
5168     int filling_sinfo)
5169 {
5170 	/*
5171 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5172 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5173 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5174 	 * On the way out we may send out any combination of:
5175 	 * MSG_NOTIFICATION MSG_EOR
5176 	 *
5177 	 */
5178 	struct sctp_inpcb *inp = NULL;
5179 	int my_len = 0;
5180 	int cp_len = 0, error = 0;
5181 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5182 	struct mbuf *m = NULL;
5183 	struct sctp_tcb *stcb = NULL;
5184 	int wakeup_read_socket = 0;
5185 	int freecnt_applied = 0;
5186 	int out_flags = 0, in_flags = 0;
5187 	int block_allowed = 1;
5188 	uint32_t freed_so_far = 0;
5189 	uint32_t copied_so_far = 0;
5190 	int in_eeor_mode = 0;
5191 	int no_rcv_needed = 0;
5192 	uint32_t rwnd_req = 0;
5193 	int hold_sblock = 0;
5194 	int hold_rlock = 0;
5195 	ssize_t slen = 0;
5196 	uint32_t held_length = 0;
5197 	int sockbuf_lock = 0;
5198 
5199 	if (uio == NULL) {
5200 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5201 		return (EINVAL);
5202 	}
5203 	if (msg_flags) {
5204 		in_flags = *msg_flags;
5205 		if (in_flags & MSG_PEEK)
5206 			SCTP_STAT_INCR(sctps_read_peeks);
5207 	} else {
5208 		in_flags = 0;
5209 	}
5210 	slen = uio->uio_resid;
5211 
5212 	/* Pull in and set up our int flags */
5213 	if (in_flags & MSG_OOB) {
5214 		/* Out of band's NOT supported */
5215 		return (EOPNOTSUPP);
5216 	}
5217 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5218 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5219 		return (EINVAL);
5220 	}
5221 	if ((in_flags & (MSG_DONTWAIT
5222 	    | MSG_NBIO
5223 	    )) ||
5224 	    SCTP_SO_IS_NBIO(so)) {
5225 		block_allowed = 0;
5226 	}
5227 	/* setup the endpoint */
5228 	inp = (struct sctp_inpcb *)so->so_pcb;
5229 	if (inp == NULL) {
5230 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5231 		return (EFAULT);
5232 	}
5233 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5234 	/* Must be at least a MTU's worth */
5235 	if (rwnd_req < SCTP_MIN_RWND)
5236 		rwnd_req = SCTP_MIN_RWND;
5237 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5239 		sctp_misc_ints(SCTP_SORECV_ENTER,
5240 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5241 	}
5242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5243 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5244 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5245 	}
5246 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5247 	if (error) {
5248 		goto release_unlocked;
5249 	}
5250 	sockbuf_lock = 1;
5251 restart:
5252 
5253 
5254 restart_nosblocks:
5255 	if (hold_sblock == 0) {
5256 		SOCKBUF_LOCK(&so->so_rcv);
5257 		hold_sblock = 1;
5258 	}
5259 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5260 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5261 		goto out;
5262 	}
5263 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5264 		if (so->so_error) {
5265 			error = so->so_error;
5266 			if ((in_flags & MSG_PEEK) == 0)
5267 				so->so_error = 0;
5268 			goto out;
5269 		} else {
5270 			if (so->so_rcv.sb_cc == 0) {
5271 				/* indicate EOF */
5272 				error = 0;
5273 				goto out;
5274 			}
5275 		}
5276 	}
5277 	if (so->so_rcv.sb_cc <= held_length) {
5278 		if (so->so_error) {
5279 			error = so->so_error;
5280 			if ((in_flags & MSG_PEEK) == 0) {
5281 				so->so_error = 0;
5282 			}
5283 			goto out;
5284 		}
5285 		if ((so->so_rcv.sb_cc == 0) &&
5286 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5287 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5288 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5289 				/*
5290 				 * For active open side clear flags for
5291 				 * re-use passive open is blocked by
5292 				 * connect.
5293 				 */
5294 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5295 					/*
5296 					 * You were aborted, passive side
5297 					 * always hits here
5298 					 */
5299 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5300 					error = ECONNRESET;
5301 				}
5302 				so->so_state &= ~(SS_ISCONNECTING |
5303 				    SS_ISDISCONNECTING |
5304 				    SS_ISCONFIRMING |
5305 				    SS_ISCONNECTED);
5306 				if (error == 0) {
5307 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5308 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5309 						error = ENOTCONN;
5310 					}
5311 				}
5312 				goto out;
5313 			}
5314 		}
5315 		if (block_allowed) {
5316 			error = sbwait(&so->so_rcv);
5317 			if (error) {
5318 				goto out;
5319 			}
5320 			held_length = 0;
5321 			goto restart_nosblocks;
5322 		} else {
5323 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5324 			error = EWOULDBLOCK;
5325 			goto out;
5326 		}
5327 	}
5328 	if (hold_sblock == 1) {
5329 		SOCKBUF_UNLOCK(&so->so_rcv);
5330 		hold_sblock = 0;
5331 	}
5332 	/* we possibly have data we can read */
5333 	/* sa_ignore FREED_MEMORY */
5334 	control = TAILQ_FIRST(&inp->read_queue);
5335 	if (control == NULL) {
5336 		/*
5337 		 * This could be happening since the appender did the
5338 		 * increment but as not yet did the tailq insert onto the
5339 		 * read_queue
5340 		 */
5341 		if (hold_rlock == 0) {
5342 			SCTP_INP_READ_LOCK(inp);
5343 		}
5344 		control = TAILQ_FIRST(&inp->read_queue);
5345 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5346 #ifdef INVARIANTS
5347 			panic("Huh, its non zero and nothing on control?");
5348 #endif
5349 			so->so_rcv.sb_cc = 0;
5350 		}
5351 		SCTP_INP_READ_UNLOCK(inp);
5352 		hold_rlock = 0;
5353 		goto restart;
5354 	}
5355 	if ((control->length == 0) &&
5356 	    (control->do_not_ref_stcb)) {
5357 		/*
5358 		 * Clean up code for freeing assoc that left behind a
5359 		 * pdapi.. maybe a peer in EEOR that just closed after
5360 		 * sending and never indicated a EOR.
5361 		 */
5362 		if (hold_rlock == 0) {
5363 			hold_rlock = 1;
5364 			SCTP_INP_READ_LOCK(inp);
5365 		}
5366 		control->held_length = 0;
5367 		if (control->data) {
5368 			/* Hmm there is data here .. fix */
5369 			struct mbuf *m_tmp;
5370 			int cnt = 0;
5371 
5372 			m_tmp = control->data;
5373 			while (m_tmp) {
5374 				cnt += SCTP_BUF_LEN(m_tmp);
5375 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5376 					control->tail_mbuf = m_tmp;
5377 					control->end_added = 1;
5378 				}
5379 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5380 			}
5381 			control->length = cnt;
5382 		} else {
5383 			/* remove it */
5384 			TAILQ_REMOVE(&inp->read_queue, control, next);
5385 			/* Add back any hiddend data */
5386 			sctp_free_remote_addr(control->whoFrom);
5387 			sctp_free_a_readq(stcb, control);
5388 		}
5389 		if (hold_rlock) {
5390 			hold_rlock = 0;
5391 			SCTP_INP_READ_UNLOCK(inp);
5392 		}
5393 		goto restart;
5394 	}
5395 	if ((control->length == 0) &&
5396 	    (control->end_added == 1)) {
5397 		/*
5398 		 * Do we also need to check for (control->pdapi_aborted ==
5399 		 * 1)?
5400 		 */
5401 		if (hold_rlock == 0) {
5402 			hold_rlock = 1;
5403 			SCTP_INP_READ_LOCK(inp);
5404 		}
5405 		TAILQ_REMOVE(&inp->read_queue, control, next);
5406 		if (control->data) {
5407 #ifdef INVARIANTS
5408 			panic("control->data not null but control->length == 0");
5409 #else
5410 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5411 			sctp_m_freem(control->data);
5412 			control->data = NULL;
5413 #endif
5414 		}
5415 		if (control->aux_data) {
5416 			sctp_m_free(control->aux_data);
5417 			control->aux_data = NULL;
5418 		}
5419 #ifdef INVARIANTS
5420 		if (control->on_strm_q) {
5421 			panic("About to free ctl:%p so:%p and its in %d",
5422 			    control, so, control->on_strm_q);
5423 		}
5424 #endif
5425 		sctp_free_remote_addr(control->whoFrom);
5426 		sctp_free_a_readq(stcb, control);
5427 		if (hold_rlock) {
5428 			hold_rlock = 0;
5429 			SCTP_INP_READ_UNLOCK(inp);
5430 		}
5431 		goto restart;
5432 	}
5433 	if (control->length == 0) {
5434 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5435 		    (filling_sinfo)) {
5436 			/* find a more suitable one then this */
5437 			ctl = TAILQ_NEXT(control, next);
5438 			while (ctl) {
5439 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5440 				    (ctl->some_taken ||
5441 				    (ctl->spec_flags & M_NOTIFICATION) ||
5442 				    ((ctl->do_not_ref_stcb == 0) &&
5443 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5444 				    ) {
5445 					/*-
5446 					 * If we have a different TCB next, and there is data
5447 					 * present. If we have already taken some (pdapi), OR we can
5448 					 * ref the tcb and no delivery as started on this stream, we
5449 					 * take it. Note we allow a notification on a different
5450 					 * assoc to be delivered..
5451 					 */
5452 					control = ctl;
5453 					goto found_one;
5454 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5455 					    (ctl->length) &&
5456 					    ((ctl->some_taken) ||
5457 					    ((ctl->do_not_ref_stcb == 0) &&
5458 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5459 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5460 					/*-
5461 					 * If we have the same tcb, and there is data present, and we
5462 					 * have the strm interleave feature present. Then if we have
5463 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5464 					 * not started a delivery for this stream, we can take it.
5465 					 * Note we do NOT allow a notificaiton on the same assoc to
5466 					 * be delivered.
5467 					 */
5468 					control = ctl;
5469 					goto found_one;
5470 				}
5471 				ctl = TAILQ_NEXT(ctl, next);
5472 			}
5473 		}
5474 		/*
5475 		 * if we reach here, not suitable replacement is available
5476 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5477 		 * into the our held count, and its time to sleep again.
5478 		 */
5479 		held_length = so->so_rcv.sb_cc;
5480 		control->held_length = so->so_rcv.sb_cc;
5481 		goto restart;
5482 	}
5483 	/* Clear the held length since there is something to read */
5484 	control->held_length = 0;
5485 found_one:
5486 	/*
5487 	 * If we reach here, control has a some data for us to read off.
5488 	 * Note that stcb COULD be NULL.
5489 	 */
5490 	if (hold_rlock == 0) {
5491 		hold_rlock = 1;
5492 		SCTP_INP_READ_LOCK(inp);
5493 	}
5494 	control->some_taken++;
5495 	stcb = control->stcb;
5496 	if (stcb) {
5497 		if ((control->do_not_ref_stcb == 0) &&
5498 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5499 			if (freecnt_applied == 0)
5500 				stcb = NULL;
5501 		} else if (control->do_not_ref_stcb == 0) {
5502 			/* you can't free it on me please */
5503 			/*
5504 			 * The lock on the socket buffer protects us so the
5505 			 * free code will stop. But since we used the
5506 			 * socketbuf lock and the sender uses the tcb_lock
5507 			 * to increment, we need to use the atomic add to
5508 			 * the refcnt
5509 			 */
5510 			if (freecnt_applied) {
5511 #ifdef INVARIANTS
5512 				panic("refcnt already incremented");
5513 #else
5514 				SCTP_PRINTF("refcnt already incremented?\n");
5515 #endif
5516 			} else {
5517 				atomic_add_int(&stcb->asoc.refcnt, 1);
5518 				freecnt_applied = 1;
5519 			}
5520 			/*
5521 			 * Setup to remember how much we have not yet told
5522 			 * the peer our rwnd has opened up. Note we grab the
5523 			 * value from the tcb from last time. Note too that
5524 			 * sack sending clears this when a sack is sent,
5525 			 * which is fine. Once we hit the rwnd_req, we then
5526 			 * will go to the sctp_user_rcvd() that will not
5527 			 * lock until it KNOWs it MUST send a WUP-SACK.
5528 			 */
5529 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5530 			stcb->freed_by_sorcv_sincelast = 0;
5531 		}
5532 	}
5533 	if (stcb &&
5534 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5535 	    control->do_not_ref_stcb == 0) {
5536 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5537 	}
5538 	/* First lets get off the sinfo and sockaddr info */
5539 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5540 		sinfo->sinfo_stream = control->sinfo_stream;
5541 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5542 		sinfo->sinfo_flags = control->sinfo_flags;
5543 		sinfo->sinfo_ppid = control->sinfo_ppid;
5544 		sinfo->sinfo_context = control->sinfo_context;
5545 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5546 		sinfo->sinfo_tsn = control->sinfo_tsn;
5547 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5548 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5549 		nxt = TAILQ_NEXT(control, next);
5550 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5551 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5552 			struct sctp_extrcvinfo *s_extra;
5553 
5554 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5555 			if ((nxt) &&
5556 			    (nxt->length)) {
5557 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5558 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5559 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5560 				}
5561 				if (nxt->spec_flags & M_NOTIFICATION) {
5562 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5563 				}
5564 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5565 				s_extra->serinfo_next_length = nxt->length;
5566 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5567 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5568 				if (nxt->tail_mbuf != NULL) {
5569 					if (nxt->end_added) {
5570 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5571 					}
5572 				}
5573 			} else {
5574 				/*
5575 				 * we explicitly 0 this, since the memcpy
5576 				 * got some other things beyond the older
5577 				 * sinfo_ that is on the control's structure
5578 				 * :-D
5579 				 */
5580 				nxt = NULL;
5581 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5582 				s_extra->serinfo_next_aid = 0;
5583 				s_extra->serinfo_next_length = 0;
5584 				s_extra->serinfo_next_ppid = 0;
5585 				s_extra->serinfo_next_stream = 0;
5586 			}
5587 		}
5588 		/*
5589 		 * update off the real current cum-ack, if we have an stcb.
5590 		 */
5591 		if ((control->do_not_ref_stcb == 0) && stcb)
5592 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5593 		/*
5594 		 * mask off the high bits, we keep the actual chunk bits in
5595 		 * there.
5596 		 */
5597 		sinfo->sinfo_flags &= 0x00ff;
5598 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5599 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5600 		}
5601 	}
5602 #ifdef SCTP_ASOCLOG_OF_TSNS
5603 	{
5604 		int index, newindex;
5605 		struct sctp_pcbtsn_rlog *entry;
5606 
5607 		do {
5608 			index = inp->readlog_index;
5609 			newindex = index + 1;
5610 			if (newindex >= SCTP_READ_LOG_SIZE) {
5611 				newindex = 0;
5612 			}
5613 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5614 		entry = &inp->readlog[index];
5615 		entry->vtag = control->sinfo_assoc_id;
5616 		entry->strm = control->sinfo_stream;
5617 		entry->seq = (uint16_t)control->mid;
5618 		entry->sz = control->length;
5619 		entry->flgs = control->sinfo_flags;
5620 	}
5621 #endif
5622 	if ((fromlen > 0) && (from != NULL)) {
5623 		union sctp_sockstore store;
5624 		size_t len;
5625 
5626 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5627 #ifdef INET6
5628 		case AF_INET6:
5629 			len = sizeof(struct sockaddr_in6);
5630 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5631 			store.sin6.sin6_port = control->port_from;
5632 			break;
5633 #endif
5634 #ifdef INET
5635 		case AF_INET:
5636 #ifdef INET6
5637 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5638 				len = sizeof(struct sockaddr_in6);
5639 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5640 				    &store.sin6);
5641 				store.sin6.sin6_port = control->port_from;
5642 			} else {
5643 				len = sizeof(struct sockaddr_in);
5644 				store.sin = control->whoFrom->ro._l_addr.sin;
5645 				store.sin.sin_port = control->port_from;
5646 			}
5647 #else
5648 			len = sizeof(struct sockaddr_in);
5649 			store.sin = control->whoFrom->ro._l_addr.sin;
5650 			store.sin.sin_port = control->port_from;
5651 #endif
5652 			break;
5653 #endif
5654 		default:
5655 			len = 0;
5656 			break;
5657 		}
5658 		memcpy(from, &store, min((size_t)fromlen, len));
5659 #ifdef INET6
5660 		{
5661 			struct sockaddr_in6 lsa6, *from6;
5662 
5663 			from6 = (struct sockaddr_in6 *)from;
5664 			sctp_recover_scope_mac(from6, (&lsa6));
5665 		}
5666 #endif
5667 	}
5668 	if (hold_rlock) {
5669 		SCTP_INP_READ_UNLOCK(inp);
5670 		hold_rlock = 0;
5671 	}
5672 	if (hold_sblock) {
5673 		SOCKBUF_UNLOCK(&so->so_rcv);
5674 		hold_sblock = 0;
5675 	}
5676 	/* now copy out what data we can */
5677 	if (mp == NULL) {
5678 		/* copy out each mbuf in the chain up to length */
5679 get_more_data:
5680 		m = control->data;
5681 		while (m) {
5682 			/* Move out all we can */
5683 			cp_len = (int)uio->uio_resid;
5684 			my_len = (int)SCTP_BUF_LEN(m);
5685 			if (cp_len > my_len) {
5686 				/* not enough in this buf */
5687 				cp_len = my_len;
5688 			}
5689 			if (hold_rlock) {
5690 				SCTP_INP_READ_UNLOCK(inp);
5691 				hold_rlock = 0;
5692 			}
5693 			if (cp_len > 0)
5694 				error = uiomove(mtod(m, char *), cp_len, uio);
5695 			/* re-read */
5696 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5697 				goto release;
5698 			}
5699 			if ((control->do_not_ref_stcb == 0) && stcb &&
5700 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5701 				no_rcv_needed = 1;
5702 			}
5703 			if (error) {
5704 				/* error we are out of here */
5705 				goto release;
5706 			}
5707 			SCTP_INP_READ_LOCK(inp);
5708 			hold_rlock = 1;
5709 			if (cp_len == SCTP_BUF_LEN(m)) {
5710 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5711 				    (control->end_added)) {
5712 					out_flags |= MSG_EOR;
5713 					if ((control->do_not_ref_stcb == 0) &&
5714 					    (control->stcb != NULL) &&
5715 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5716 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5717 				}
5718 				if (control->spec_flags & M_NOTIFICATION) {
5719 					out_flags |= MSG_NOTIFICATION;
5720 				}
5721 				/* we ate up the mbuf */
5722 				if (in_flags & MSG_PEEK) {
5723 					/* just looking */
5724 					m = SCTP_BUF_NEXT(m);
5725 					copied_so_far += cp_len;
5726 				} else {
5727 					/* dispose of the mbuf */
5728 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5729 						sctp_sblog(&so->so_rcv,
5730 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5731 					}
5732 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5733 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5734 						sctp_sblog(&so->so_rcv,
5735 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5736 					}
5737 					copied_so_far += cp_len;
5738 					freed_so_far += cp_len;
5739 					freed_so_far += MSIZE;
5740 					atomic_subtract_int(&control->length, cp_len);
5741 					control->data = sctp_m_free(m);
5742 					m = control->data;
5743 					/*
5744 					 * been through it all, must hold sb
5745 					 * lock ok to null tail
5746 					 */
5747 					if (control->data == NULL) {
5748 #ifdef INVARIANTS
5749 						if ((control->end_added == 0) ||
5750 						    (TAILQ_NEXT(control, next) == NULL)) {
5751 							/*
5752 							 * If the end is not
5753 							 * added, OR the
5754 							 * next is NOT null
5755 							 * we MUST have the
5756 							 * lock.
5757 							 */
5758 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5759 								panic("Hmm we don't own the lock?");
5760 							}
5761 						}
5762 #endif
5763 						control->tail_mbuf = NULL;
5764 #ifdef INVARIANTS
5765 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5766 							panic("end_added, nothing left and no MSG_EOR");
5767 						}
5768 #endif
5769 					}
5770 				}
5771 			} else {
5772 				/* Do we need to trim the mbuf? */
5773 				if (control->spec_flags & M_NOTIFICATION) {
5774 					out_flags |= MSG_NOTIFICATION;
5775 				}
5776 				if ((in_flags & MSG_PEEK) == 0) {
5777 					SCTP_BUF_RESV_UF(m, cp_len);
5778 					SCTP_BUF_LEN(m) -= cp_len;
5779 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5780 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5781 					}
5782 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5783 					if ((control->do_not_ref_stcb == 0) &&
5784 					    stcb) {
5785 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5786 					}
5787 					copied_so_far += cp_len;
5788 					freed_so_far += cp_len;
5789 					freed_so_far += MSIZE;
5790 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5791 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5792 						    SCTP_LOG_SBRESULT, 0);
5793 					}
5794 					atomic_subtract_int(&control->length, cp_len);
5795 				} else {
5796 					copied_so_far += cp_len;
5797 				}
5798 			}
5799 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5800 				break;
5801 			}
5802 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5803 			    (control->do_not_ref_stcb == 0) &&
5804 			    (freed_so_far >= rwnd_req)) {
5805 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5806 			}
5807 		}		/* end while(m) */
5808 		/*
5809 		 * At this point we have looked at it all and we either have
5810 		 * a MSG_EOR/or read all the user wants... <OR>
5811 		 * control->length == 0.
5812 		 */
5813 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5814 			/* we are done with this control */
5815 			if (control->length == 0) {
5816 				if (control->data) {
5817 #ifdef INVARIANTS
5818 					panic("control->data not null at read eor?");
5819 #else
5820 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5821 					sctp_m_freem(control->data);
5822 					control->data = NULL;
5823 #endif
5824 				}
5825 		done_with_control:
5826 				if (hold_rlock == 0) {
5827 					SCTP_INP_READ_LOCK(inp);
5828 					hold_rlock = 1;
5829 				}
5830 				TAILQ_REMOVE(&inp->read_queue, control, next);
5831 				/* Add back any hiddend data */
5832 				if (control->held_length) {
5833 					held_length = 0;
5834 					control->held_length = 0;
5835 					wakeup_read_socket = 1;
5836 				}
5837 				if (control->aux_data) {
5838 					sctp_m_free(control->aux_data);
5839 					control->aux_data = NULL;
5840 				}
5841 				no_rcv_needed = control->do_not_ref_stcb;
5842 				sctp_free_remote_addr(control->whoFrom);
5843 				control->data = NULL;
5844 #ifdef INVARIANTS
5845 				if (control->on_strm_q) {
5846 					panic("About to free ctl:%p so:%p and its in %d",
5847 					    control, so, control->on_strm_q);
5848 				}
5849 #endif
5850 				sctp_free_a_readq(stcb, control);
5851 				control = NULL;
5852 				if ((freed_so_far >= rwnd_req) &&
5853 				    (no_rcv_needed == 0))
5854 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5855 
5856 			} else {
5857 				/*
5858 				 * The user did not read all of this
5859 				 * message, turn off the returned MSG_EOR
5860 				 * since we are leaving more behind on the
5861 				 * control to read.
5862 				 */
5863 #ifdef INVARIANTS
5864 				if (control->end_added &&
5865 				    (control->data == NULL) &&
5866 				    (control->tail_mbuf == NULL)) {
5867 					panic("Gak, control->length is corrupt?");
5868 				}
5869 #endif
5870 				no_rcv_needed = control->do_not_ref_stcb;
5871 				out_flags &= ~MSG_EOR;
5872 			}
5873 		}
5874 		if (out_flags & MSG_EOR) {
5875 			goto release;
5876 		}
5877 		if ((uio->uio_resid == 0) ||
5878 		    ((in_eeor_mode) &&
5879 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5880 			goto release;
5881 		}
5882 		/*
5883 		 * If I hit here the receiver wants more and this message is
5884 		 * NOT done (pd-api). So two questions. Can we block? if not
5885 		 * we are done. Did the user NOT set MSG_WAITALL?
5886 		 */
5887 		if (block_allowed == 0) {
5888 			goto release;
5889 		}
5890 		/*
5891 		 * We need to wait for more data a few things: - We don't
5892 		 * sbunlock() so we don't get someone else reading. - We
5893 		 * must be sure to account for the case where what is added
5894 		 * is NOT to our control when we wakeup.
5895 		 */
5896 
5897 		/*
5898 		 * Do we need to tell the transport a rwnd update might be
5899 		 * needed before we go to sleep?
5900 		 */
5901 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5902 		    ((freed_so_far >= rwnd_req) &&
5903 		    (control->do_not_ref_stcb == 0) &&
5904 		    (no_rcv_needed == 0))) {
5905 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5906 		}
5907 wait_some_more:
5908 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5909 			goto release;
5910 		}
5911 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5912 			goto release;
5913 
5914 		if (hold_rlock == 1) {
5915 			SCTP_INP_READ_UNLOCK(inp);
5916 			hold_rlock = 0;
5917 		}
5918 		if (hold_sblock == 0) {
5919 			SOCKBUF_LOCK(&so->so_rcv);
5920 			hold_sblock = 1;
5921 		}
5922 		if ((copied_so_far) && (control->length == 0) &&
5923 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5924 			goto release;
5925 		}
5926 		if (so->so_rcv.sb_cc <= control->held_length) {
5927 			error = sbwait(&so->so_rcv);
5928 			if (error) {
5929 				goto release;
5930 			}
5931 			control->held_length = 0;
5932 		}
5933 		if (hold_sblock) {
5934 			SOCKBUF_UNLOCK(&so->so_rcv);
5935 			hold_sblock = 0;
5936 		}
5937 		if (control->length == 0) {
5938 			/* still nothing here */
5939 			if (control->end_added == 1) {
5940 				/* he aborted, or is done i.e.did a shutdown */
5941 				out_flags |= MSG_EOR;
5942 				if (control->pdapi_aborted) {
5943 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5944 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5945 
5946 					out_flags |= MSG_TRUNC;
5947 				} else {
5948 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5949 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5950 				}
5951 				goto done_with_control;
5952 			}
5953 			if (so->so_rcv.sb_cc > held_length) {
5954 				control->held_length = so->so_rcv.sb_cc;
5955 				held_length = 0;
5956 			}
5957 			goto wait_some_more;
5958 		} else if (control->data == NULL) {
5959 			/*
5960 			 * we must re-sync since data is probably being
5961 			 * added
5962 			 */
5963 			SCTP_INP_READ_LOCK(inp);
5964 			if ((control->length > 0) && (control->data == NULL)) {
5965 				/*
5966 				 * big trouble.. we have the lock and its
5967 				 * corrupt?
5968 				 */
5969 #ifdef INVARIANTS
5970 				panic("Impossible data==NULL length !=0");
5971 #endif
5972 				out_flags |= MSG_EOR;
5973 				out_flags |= MSG_TRUNC;
5974 				control->length = 0;
5975 				SCTP_INP_READ_UNLOCK(inp);
5976 				goto done_with_control;
5977 			}
5978 			SCTP_INP_READ_UNLOCK(inp);
5979 			/* We will fall around to get more data */
5980 		}
5981 		goto get_more_data;
5982 	} else {
5983 		/*-
5984 		 * Give caller back the mbuf chain,
5985 		 * store in uio_resid the length
5986 		 */
5987 		wakeup_read_socket = 0;
5988 		if ((control->end_added == 0) ||
5989 		    (TAILQ_NEXT(control, next) == NULL)) {
5990 			/* Need to get rlock */
5991 			if (hold_rlock == 0) {
5992 				SCTP_INP_READ_LOCK(inp);
5993 				hold_rlock = 1;
5994 			}
5995 		}
5996 		if (control->end_added) {
5997 			out_flags |= MSG_EOR;
5998 			if ((control->do_not_ref_stcb == 0) &&
5999 			    (control->stcb != NULL) &&
6000 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6001 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6002 		}
6003 		if (control->spec_flags & M_NOTIFICATION) {
6004 			out_flags |= MSG_NOTIFICATION;
6005 		}
6006 		uio->uio_resid = control->length;
6007 		*mp = control->data;
6008 		m = control->data;
6009 		while (m) {
6010 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6011 				sctp_sblog(&so->so_rcv,
6012 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6013 			}
6014 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6015 			freed_so_far += SCTP_BUF_LEN(m);
6016 			freed_so_far += MSIZE;
6017 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6018 				sctp_sblog(&so->so_rcv,
6019 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6020 			}
6021 			m = SCTP_BUF_NEXT(m);
6022 		}
6023 		control->data = control->tail_mbuf = NULL;
6024 		control->length = 0;
6025 		if (out_flags & MSG_EOR) {
6026 			/* Done with this control */
6027 			goto done_with_control;
6028 		}
6029 	}
6030 release:
6031 	if (hold_rlock == 1) {
6032 		SCTP_INP_READ_UNLOCK(inp);
6033 		hold_rlock = 0;
6034 	}
6035 	if (hold_sblock == 1) {
6036 		SOCKBUF_UNLOCK(&so->so_rcv);
6037 		hold_sblock = 0;
6038 	}
6039 	sbunlock(&so->so_rcv);
6040 	sockbuf_lock = 0;
6041 
6042 release_unlocked:
6043 	if (hold_sblock) {
6044 		SOCKBUF_UNLOCK(&so->so_rcv);
6045 		hold_sblock = 0;
6046 	}
6047 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6048 		if ((freed_so_far >= rwnd_req) &&
6049 		    (control && (control->do_not_ref_stcb == 0)) &&
6050 		    (no_rcv_needed == 0))
6051 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6052 	}
6053 out:
6054 	if (msg_flags) {
6055 		*msg_flags = out_flags;
6056 	}
6057 	if (((out_flags & MSG_EOR) == 0) &&
6058 	    ((in_flags & MSG_PEEK) == 0) &&
6059 	    (sinfo) &&
6060 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6061 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6062 		struct sctp_extrcvinfo *s_extra;
6063 
6064 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6065 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6066 	}
6067 	if (hold_rlock == 1) {
6068 		SCTP_INP_READ_UNLOCK(inp);
6069 	}
6070 	if (hold_sblock) {
6071 		SOCKBUF_UNLOCK(&so->so_rcv);
6072 	}
6073 	if (sockbuf_lock) {
6074 		sbunlock(&so->so_rcv);
6075 	}
6076 	if (freecnt_applied) {
6077 		/*
6078 		 * The lock on the socket buffer protects us so the free
6079 		 * code will stop. But since we used the socketbuf lock and
6080 		 * the sender uses the tcb_lock to increment, we need to use
6081 		 * the atomic add to the refcnt.
6082 		 */
6083 		if (stcb == NULL) {
6084 #ifdef INVARIANTS
6085 			panic("stcb for refcnt has gone NULL?");
6086 			goto stage_left;
6087 #else
6088 			goto stage_left;
6089 #endif
6090 		}
6091 		/* Save the value back for next time */
6092 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6093 		atomic_add_int(&stcb->asoc.refcnt, -1);
6094 	}
6095 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6096 		if (stcb) {
6097 			sctp_misc_ints(SCTP_SORECV_DONE,
6098 			    freed_so_far,
6099 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6100 			    stcb->asoc.my_rwnd,
6101 			    so->so_rcv.sb_cc);
6102 		} else {
6103 			sctp_misc_ints(SCTP_SORECV_DONE,
6104 			    freed_so_far,
6105 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6106 			    0,
6107 			    so->so_rcv.sb_cc);
6108 		}
6109 	}
6110 stage_left:
6111 	if (wakeup_read_socket) {
6112 		sctp_sorwakeup(inp, so);
6113 	}
6114 	return (error);
6115 }
6116 
6117 
6118 #ifdef SCTP_MBUF_LOGGING
6119 struct mbuf *
6120 sctp_m_free(struct mbuf *m)
6121 {
6122 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6123 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6124 	}
6125 	return (m_free(m));
6126 }
6127 
6128 void
6129 sctp_m_freem(struct mbuf *mb)
6130 {
6131 	while (mb != NULL)
6132 		mb = sctp_m_free(mb);
6133 }
6134 
6135 #endif
6136 
6137 int
6138 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6139 {
6140 	/*
6141 	 * Given a local address. For all associations that holds the
6142 	 * address, request a peer-set-primary.
6143 	 */
6144 	struct sctp_ifa *ifa;
6145 	struct sctp_laddr *wi;
6146 
6147 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6148 	if (ifa == NULL) {
6149 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6150 		return (EADDRNOTAVAIL);
6151 	}
6152 	/*
6153 	 * Now that we have the ifa we must awaken the iterator with this
6154 	 * message.
6155 	 */
6156 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6157 	if (wi == NULL) {
6158 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6159 		return (ENOMEM);
6160 	}
6161 	/* Now incr the count and int wi structure */
6162 	SCTP_INCR_LADDR_COUNT();
6163 	bzero(wi, sizeof(*wi));
6164 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6165 	wi->ifa = ifa;
6166 	wi->action = SCTP_SET_PRIM_ADDR;
6167 	atomic_add_int(&ifa->refcount, 1);
6168 
6169 	/* Now add it to the work queue */
6170 	SCTP_WQ_ADDR_LOCK();
6171 	/*
6172 	 * Should this really be a tailq? As it is we will process the
6173 	 * newest first :-0
6174 	 */
6175 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6176 	SCTP_WQ_ADDR_UNLOCK();
6177 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6178 	    (struct sctp_inpcb *)NULL,
6179 	    (struct sctp_tcb *)NULL,
6180 	    (struct sctp_nets *)NULL);
6181 	return (0);
6182 }
6183 
6184 
6185 int
6186 sctp_soreceive(struct socket *so,
6187     struct sockaddr **psa,
6188     struct uio *uio,
6189     struct mbuf **mp0,
6190     struct mbuf **controlp,
6191     int *flagsp)
6192 {
6193 	int error, fromlen;
6194 	uint8_t sockbuf[256];
6195 	struct sockaddr *from;
6196 	struct sctp_extrcvinfo sinfo;
6197 	int filling_sinfo = 1;
6198 	struct sctp_inpcb *inp;
6199 
6200 	inp = (struct sctp_inpcb *)so->so_pcb;
6201 	/* pickup the assoc we are reading from */
6202 	if (inp == NULL) {
6203 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6204 		return (EINVAL);
6205 	}
6206 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6207 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6208 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6209 	    (controlp == NULL)) {
6210 		/* user does not want the sndrcv ctl */
6211 		filling_sinfo = 0;
6212 	}
6213 	if (psa) {
6214 		from = (struct sockaddr *)sockbuf;
6215 		fromlen = sizeof(sockbuf);
6216 		from->sa_len = 0;
6217 	} else {
6218 		from = NULL;
6219 		fromlen = 0;
6220 	}
6221 
6222 	if (filling_sinfo) {
6223 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6224 	}
6225 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6226 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6227 	if (controlp != NULL) {
6228 		/* copy back the sinfo in a CMSG format */
6229 		if (filling_sinfo)
6230 			*controlp = sctp_build_ctl_nchunk(inp,
6231 			    (struct sctp_sndrcvinfo *)&sinfo);
6232 		else
6233 			*controlp = NULL;
6234 	}
6235 	if (psa) {
6236 		/* copy back the address info */
6237 		if (from && from->sa_len) {
6238 			*psa = sodupsockaddr(from, M_NOWAIT);
6239 		} else {
6240 			*psa = NULL;
6241 		}
6242 	}
6243 	return (error);
6244 }
6245 
6246 
6247 
6248 
6249 
6250 int
6251 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6252     int totaddr, int *error)
6253 {
6254 	int added = 0;
6255 	int i;
6256 	struct sctp_inpcb *inp;
6257 	struct sockaddr *sa;
6258 	size_t incr = 0;
6259 #ifdef INET
6260 	struct sockaddr_in *sin;
6261 #endif
6262 #ifdef INET6
6263 	struct sockaddr_in6 *sin6;
6264 #endif
6265 
6266 	sa = addr;
6267 	inp = stcb->sctp_ep;
6268 	*error = 0;
6269 	for (i = 0; i < totaddr; i++) {
6270 		switch (sa->sa_family) {
6271 #ifdef INET
6272 		case AF_INET:
6273 			incr = sizeof(struct sockaddr_in);
6274 			sin = (struct sockaddr_in *)sa;
6275 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6276 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6277 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6278 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6279 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6280 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6281 				*error = EINVAL;
6282 				goto out_now;
6283 			}
6284 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6285 			    SCTP_DONOT_SETSCOPE,
6286 			    SCTP_ADDR_IS_CONFIRMED)) {
6287 				/* assoc gone no un-lock */
6288 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6289 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6290 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6291 				*error = ENOBUFS;
6292 				goto out_now;
6293 			}
6294 			added++;
6295 			break;
6296 #endif
6297 #ifdef INET6
6298 		case AF_INET6:
6299 			incr = sizeof(struct sockaddr_in6);
6300 			sin6 = (struct sockaddr_in6 *)sa;
6301 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6302 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6303 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6304 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6305 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6306 				*error = EINVAL;
6307 				goto out_now;
6308 			}
6309 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6310 			    SCTP_DONOT_SETSCOPE,
6311 			    SCTP_ADDR_IS_CONFIRMED)) {
6312 				/* assoc gone no un-lock */
6313 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6314 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6315 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6316 				*error = ENOBUFS;
6317 				goto out_now;
6318 			}
6319 			added++;
6320 			break;
6321 #endif
6322 		default:
6323 			break;
6324 		}
6325 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6326 	}
6327 out_now:
6328 	return (added);
6329 }
6330 
6331 struct sctp_tcb *
6332 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6333     unsigned int *totaddr,
6334     unsigned int *num_v4, unsigned int *num_v6, int *error,
6335     unsigned int limit, int *bad_addr)
6336 {
6337 	struct sockaddr *sa;
6338 	struct sctp_tcb *stcb = NULL;
6339 	unsigned int incr, at, i;
6340 
6341 	at = 0;
6342 	sa = addr;
6343 	*error = *num_v6 = *num_v4 = 0;
6344 	/* account and validate addresses */
6345 	for (i = 0; i < *totaddr; i++) {
6346 		switch (sa->sa_family) {
6347 #ifdef INET
6348 		case AF_INET:
6349 			incr = (unsigned int)sizeof(struct sockaddr_in);
6350 			if (sa->sa_len != incr) {
6351 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6352 				*error = EINVAL;
6353 				*bad_addr = 1;
6354 				return (NULL);
6355 			}
6356 			(*num_v4) += 1;
6357 			break;
6358 #endif
6359 #ifdef INET6
6360 		case AF_INET6:
6361 			{
6362 				struct sockaddr_in6 *sin6;
6363 
6364 				sin6 = (struct sockaddr_in6 *)sa;
6365 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6366 					/* Must be non-mapped for connectx */
6367 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6368 					*error = EINVAL;
6369 					*bad_addr = 1;
6370 					return (NULL);
6371 				}
6372 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6373 				if (sa->sa_len != incr) {
6374 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6375 					*error = EINVAL;
6376 					*bad_addr = 1;
6377 					return (NULL);
6378 				}
6379 				(*num_v6) += 1;
6380 				break;
6381 			}
6382 #endif
6383 		default:
6384 			*totaddr = i;
6385 			incr = 0;
6386 			/* we are done */
6387 			break;
6388 		}
6389 		if (i == *totaddr) {
6390 			break;
6391 		}
6392 		SCTP_INP_INCR_REF(inp);
6393 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6394 		if (stcb != NULL) {
6395 			/* Already have or am bring up an association */
6396 			return (stcb);
6397 		} else {
6398 			SCTP_INP_DECR_REF(inp);
6399 		}
6400 		if ((at + incr) > limit) {
6401 			*totaddr = i;
6402 			break;
6403 		}
6404 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6405 	}
6406 	return ((struct sctp_tcb *)NULL);
6407 }
6408 
6409 /*
6410  * sctp_bindx(ADD) for one address.
6411  * assumes all arguments are valid/checked by caller.
6412  */
6413 void
6414 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6415     struct sockaddr *sa, sctp_assoc_t assoc_id,
6416     uint32_t vrf_id, int *error, void *p)
6417 {
6418 	struct sockaddr *addr_touse;
6419 #if defined(INET) && defined(INET6)
6420 	struct sockaddr_in sin;
6421 #endif
6422 
6423 	/* see if we're bound all already! */
6424 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6425 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6426 		*error = EINVAL;
6427 		return;
6428 	}
6429 	addr_touse = sa;
6430 #ifdef INET6
6431 	if (sa->sa_family == AF_INET6) {
6432 #ifdef INET
6433 		struct sockaddr_in6 *sin6;
6434 
6435 #endif
6436 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6437 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6438 			*error = EINVAL;
6439 			return;
6440 		}
6441 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6442 			/* can only bind v6 on PF_INET6 sockets */
6443 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 			*error = EINVAL;
6445 			return;
6446 		}
6447 #ifdef INET
6448 		sin6 = (struct sockaddr_in6 *)addr_touse;
6449 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6450 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6451 			    SCTP_IPV6_V6ONLY(inp)) {
6452 				/* can't bind v4-mapped on PF_INET sockets */
6453 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6454 				*error = EINVAL;
6455 				return;
6456 			}
6457 			in6_sin6_2_sin(&sin, sin6);
6458 			addr_touse = (struct sockaddr *)&sin;
6459 		}
6460 #endif
6461 	}
6462 #endif
6463 #ifdef INET
6464 	if (sa->sa_family == AF_INET) {
6465 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6466 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467 			*error = EINVAL;
6468 			return;
6469 		}
6470 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6471 		    SCTP_IPV6_V6ONLY(inp)) {
6472 			/* can't bind v4 on PF_INET sockets */
6473 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6474 			*error = EINVAL;
6475 			return;
6476 		}
6477 	}
6478 #endif
6479 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6480 		if (p == NULL) {
6481 			/* Can't get proc for Net/Open BSD */
6482 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6483 			*error = EINVAL;
6484 			return;
6485 		}
6486 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6487 		return;
6488 	}
6489 	/*
6490 	 * No locks required here since bind and mgmt_ep_sa all do their own
6491 	 * locking. If we do something for the FIX: below we may need to
6492 	 * lock in that case.
6493 	 */
6494 	if (assoc_id == 0) {
6495 		/* add the address */
6496 		struct sctp_inpcb *lep;
6497 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6498 
6499 		/* validate the incoming port */
6500 		if ((lsin->sin_port != 0) &&
6501 		    (lsin->sin_port != inp->sctp_lport)) {
6502 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6503 			*error = EINVAL;
6504 			return;
6505 		} else {
6506 			/* user specified 0 port, set it to existing port */
6507 			lsin->sin_port = inp->sctp_lport;
6508 		}
6509 
6510 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6511 		if (lep != NULL) {
6512 			/*
6513 			 * We must decrement the refcount since we have the
6514 			 * ep already and are binding. No remove going on
6515 			 * here.
6516 			 */
6517 			SCTP_INP_DECR_REF(lep);
6518 		}
6519 		if (lep == inp) {
6520 			/* already bound to it.. ok */
6521 			return;
6522 		} else if (lep == NULL) {
6523 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6524 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6525 			    SCTP_ADD_IP_ADDRESS,
6526 			    vrf_id, NULL);
6527 		} else {
6528 			*error = EADDRINUSE;
6529 		}
6530 		if (*error)
6531 			return;
6532 	} else {
6533 		/*
6534 		 * FIX: decide whether we allow assoc based bindx
6535 		 */
6536 	}
6537 }
6538 
6539 /*
6540  * sctp_bindx(DELETE) for one address.
6541  * assumes all arguments are valid/checked by caller.
6542  */
6543 void
6544 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6545     struct sockaddr *sa, sctp_assoc_t assoc_id,
6546     uint32_t vrf_id, int *error)
6547 {
6548 	struct sockaddr *addr_touse;
6549 #if defined(INET) && defined(INET6)
6550 	struct sockaddr_in sin;
6551 #endif
6552 
6553 	/* see if we're bound all already! */
6554 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6555 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6556 		*error = EINVAL;
6557 		return;
6558 	}
6559 	addr_touse = sa;
6560 #ifdef INET6
6561 	if (sa->sa_family == AF_INET6) {
6562 #ifdef INET
6563 		struct sockaddr_in6 *sin6;
6564 #endif
6565 
6566 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6567 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6568 			*error = EINVAL;
6569 			return;
6570 		}
6571 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6572 			/* can only bind v6 on PF_INET6 sockets */
6573 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6574 			*error = EINVAL;
6575 			return;
6576 		}
6577 #ifdef INET
6578 		sin6 = (struct sockaddr_in6 *)addr_touse;
6579 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6580 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6581 			    SCTP_IPV6_V6ONLY(inp)) {
6582 				/* can't bind mapped-v4 on PF_INET sockets */
6583 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6584 				*error = EINVAL;
6585 				return;
6586 			}
6587 			in6_sin6_2_sin(&sin, sin6);
6588 			addr_touse = (struct sockaddr *)&sin;
6589 		}
6590 #endif
6591 	}
6592 #endif
6593 #ifdef INET
6594 	if (sa->sa_family == AF_INET) {
6595 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6596 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 			*error = EINVAL;
6598 			return;
6599 		}
6600 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6601 		    SCTP_IPV6_V6ONLY(inp)) {
6602 			/* can't bind v4 on PF_INET sockets */
6603 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6604 			*error = EINVAL;
6605 			return;
6606 		}
6607 	}
6608 #endif
6609 	/*
6610 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6611 	 * below is ever changed we may need to lock before calling
6612 	 * association level binding.
6613 	 */
6614 	if (assoc_id == 0) {
6615 		/* delete the address */
6616 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6617 		    SCTP_DEL_IP_ADDRESS,
6618 		    vrf_id, NULL);
6619 	} else {
6620 		/*
6621 		 * FIX: decide whether we allow assoc based bindx
6622 		 */
6623 	}
6624 }
6625 
6626 /*
6627  * returns the valid local address count for an assoc, taking into account
6628  * all scoping rules
6629  */
6630 int
6631 sctp_local_addr_count(struct sctp_tcb *stcb)
6632 {
6633 	int loopback_scope;
6634 #if defined(INET)
6635 	int ipv4_local_scope, ipv4_addr_legal;
6636 #endif
6637 #if defined (INET6)
6638 	int local_scope, site_scope, ipv6_addr_legal;
6639 #endif
6640 	struct sctp_vrf *vrf;
6641 	struct sctp_ifn *sctp_ifn;
6642 	struct sctp_ifa *sctp_ifa;
6643 	int count = 0;
6644 
6645 	/* Turn on all the appropriate scopes */
6646 	loopback_scope = stcb->asoc.scope.loopback_scope;
6647 #if defined(INET)
6648 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6649 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6650 #endif
6651 #if defined(INET6)
6652 	local_scope = stcb->asoc.scope.local_scope;
6653 	site_scope = stcb->asoc.scope.site_scope;
6654 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6655 #endif
6656 	SCTP_IPI_ADDR_RLOCK();
6657 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6658 	if (vrf == NULL) {
6659 		/* no vrf, no addresses */
6660 		SCTP_IPI_ADDR_RUNLOCK();
6661 		return (0);
6662 	}
6663 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6664 		/*
6665 		 * bound all case: go through all ifns on the vrf
6666 		 */
6667 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6668 			if ((loopback_scope == 0) &&
6669 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6670 				continue;
6671 			}
6672 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6673 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6674 					continue;
6675 				switch (sctp_ifa->address.sa.sa_family) {
6676 #ifdef INET
6677 				case AF_INET:
6678 					if (ipv4_addr_legal) {
6679 						struct sockaddr_in *sin;
6680 
6681 						sin = &sctp_ifa->address.sin;
6682 						if (sin->sin_addr.s_addr == 0) {
6683 							/*
6684 							 * skip unspecified
6685 							 * addrs
6686 							 */
6687 							continue;
6688 						}
6689 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6690 						    &sin->sin_addr) != 0) {
6691 							continue;
6692 						}
6693 						if ((ipv4_local_scope == 0) &&
6694 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6695 							continue;
6696 						}
6697 						/* count this one */
6698 						count++;
6699 					} else {
6700 						continue;
6701 					}
6702 					break;
6703 #endif
6704 #ifdef INET6
6705 				case AF_INET6:
6706 					if (ipv6_addr_legal) {
6707 						struct sockaddr_in6 *sin6;
6708 
6709 						sin6 = &sctp_ifa->address.sin6;
6710 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6711 							continue;
6712 						}
6713 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6714 						    &sin6->sin6_addr) != 0) {
6715 							continue;
6716 						}
6717 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6718 							if (local_scope == 0)
6719 								continue;
6720 							if (sin6->sin6_scope_id == 0) {
6721 								if (sa6_recoverscope(sin6) != 0)
6722 									/*
6723 									 *
6724 									 * bad
6725 									 * link
6726 									 *
6727 									 * local
6728 									 *
6729 									 * address
6730 									 */
6731 									continue;
6732 							}
6733 						}
6734 						if ((site_scope == 0) &&
6735 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6736 							continue;
6737 						}
6738 						/* count this one */
6739 						count++;
6740 					}
6741 					break;
6742 #endif
6743 				default:
6744 					/* TSNH */
6745 					break;
6746 				}
6747 			}
6748 		}
6749 	} else {
6750 		/*
6751 		 * subset bound case
6752 		 */
6753 		struct sctp_laddr *laddr;
6754 
6755 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6756 		    sctp_nxt_addr) {
6757 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6758 				continue;
6759 			}
6760 			/* count this one */
6761 			count++;
6762 		}
6763 	}
6764 	SCTP_IPI_ADDR_RUNLOCK();
6765 	return (count);
6766 }
6767 
6768 #if defined(SCTP_LOCAL_TRACE_BUF)
6769 
6770 void
6771 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6772 {
6773 	uint32_t saveindex, newindex;
6774 
6775 	do {
6776 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6777 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6778 			newindex = 1;
6779 		} else {
6780 			newindex = saveindex + 1;
6781 		}
6782 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6783 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6784 		saveindex = 0;
6785 	}
6786 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6787 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6788 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6789 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6790 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6791 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6792 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6793 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6794 }
6795 
6796 #endif
6797 static void
6798 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6799     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6800 {
6801 	struct ip *iph;
6802 #ifdef INET6
6803 	struct ip6_hdr *ip6;
6804 #endif
6805 	struct mbuf *sp, *last;
6806 	struct udphdr *uhdr;
6807 	uint16_t port;
6808 
6809 	if ((m->m_flags & M_PKTHDR) == 0) {
6810 		/* Can't handle one that is not a pkt hdr */
6811 		goto out;
6812 	}
6813 	/* Pull the src port */
6814 	iph = mtod(m, struct ip *);
6815 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6816 	port = uhdr->uh_sport;
6817 	/*
6818 	 * Split out the mbuf chain. Leave the IP header in m, place the
6819 	 * rest in the sp.
6820 	 */
6821 	sp = m_split(m, off, M_NOWAIT);
6822 	if (sp == NULL) {
6823 		/* Gak, drop packet, we can't do a split */
6824 		goto out;
6825 	}
6826 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6827 		/* Gak, packet can't have an SCTP header in it - too small */
6828 		m_freem(sp);
6829 		goto out;
6830 	}
6831 	/* Now pull up the UDP header and SCTP header together */
6832 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6833 	if (sp == NULL) {
6834 		/* Gak pullup failed */
6835 		goto out;
6836 	}
6837 	/* Trim out the UDP header */
6838 	m_adj(sp, sizeof(struct udphdr));
6839 
6840 	/* Now reconstruct the mbuf chain */
6841 	for (last = m; last->m_next; last = last->m_next);
6842 	last->m_next = sp;
6843 	m->m_pkthdr.len += sp->m_pkthdr.len;
6844 	/*
6845 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6846 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6847 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6848 	 * SCTP checksum. Therefore, clear the bit.
6849 	 */
6850 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6851 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6852 	    m->m_pkthdr.len,
6853 	    if_name(m->m_pkthdr.rcvif),
6854 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6855 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6856 	iph = mtod(m, struct ip *);
6857 	switch (iph->ip_v) {
6858 #ifdef INET
6859 	case IPVERSION:
6860 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6861 		sctp_input_with_port(m, off, port);
6862 		break;
6863 #endif
6864 #ifdef INET6
6865 	case IPV6_VERSION >> 4:
6866 		ip6 = mtod(m, struct ip6_hdr *);
6867 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6868 		sctp6_input_with_port(&m, &off, port);
6869 		break;
6870 #endif
6871 	default:
6872 		goto out;
6873 		break;
6874 	}
6875 	return;
6876 out:
6877 	m_freem(m);
6878 }
6879 
6880 #ifdef INET
6881 static void
6882 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6883 {
6884 	struct ip *outer_ip, *inner_ip;
6885 	struct sctphdr *sh;
6886 	struct icmp *icmp;
6887 	struct udphdr *udp;
6888 	struct sctp_inpcb *inp;
6889 	struct sctp_tcb *stcb;
6890 	struct sctp_nets *net;
6891 	struct sctp_init_chunk *ch;
6892 	struct sockaddr_in src, dst;
6893 	uint8_t type, code;
6894 
6895 	inner_ip = (struct ip *)vip;
6896 	icmp = (struct icmp *)((caddr_t)inner_ip -
6897 	    (sizeof(struct icmp) - sizeof(struct ip)));
6898 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6899 	if (ntohs(outer_ip->ip_len) <
6900 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6901 		return;
6902 	}
6903 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6904 	sh = (struct sctphdr *)(udp + 1);
6905 	memset(&src, 0, sizeof(struct sockaddr_in));
6906 	src.sin_family = AF_INET;
6907 	src.sin_len = sizeof(struct sockaddr_in);
6908 	src.sin_port = sh->src_port;
6909 	src.sin_addr = inner_ip->ip_src;
6910 	memset(&dst, 0, sizeof(struct sockaddr_in));
6911 	dst.sin_family = AF_INET;
6912 	dst.sin_len = sizeof(struct sockaddr_in);
6913 	dst.sin_port = sh->dest_port;
6914 	dst.sin_addr = inner_ip->ip_dst;
6915 	/*
6916 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6917 	 * holds our local endpoint address. Thus we reverse the dst and the
6918 	 * src in the lookup.
6919 	 */
6920 	inp = NULL;
6921 	net = NULL;
6922 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6923 	    (struct sockaddr *)&src,
6924 	    &inp, &net, 1,
6925 	    SCTP_DEFAULT_VRFID);
6926 	if ((stcb != NULL) &&
6927 	    (net != NULL) &&
6928 	    (inp != NULL)) {
6929 		/* Check the UDP port numbers */
6930 		if ((udp->uh_dport != net->port) ||
6931 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6932 			SCTP_TCB_UNLOCK(stcb);
6933 			return;
6934 		}
6935 		/* Check the verification tag */
6936 		if (ntohl(sh->v_tag) != 0) {
6937 			/*
6938 			 * This must be the verification tag used for
6939 			 * sending out packets. We don't consider packets
6940 			 * reflecting the verification tag.
6941 			 */
6942 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6943 				SCTP_TCB_UNLOCK(stcb);
6944 				return;
6945 			}
6946 		} else {
6947 			if (ntohs(outer_ip->ip_len) >=
6948 			    sizeof(struct ip) +
6949 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6950 				/*
6951 				 * In this case we can check if we got an
6952 				 * INIT chunk and if the initiate tag
6953 				 * matches.
6954 				 */
6955 				ch = (struct sctp_init_chunk *)(sh + 1);
6956 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6957 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6958 					SCTP_TCB_UNLOCK(stcb);
6959 					return;
6960 				}
6961 			} else {
6962 				SCTP_TCB_UNLOCK(stcb);
6963 				return;
6964 			}
6965 		}
6966 		type = icmp->icmp_type;
6967 		code = icmp->icmp_code;
6968 		if ((type == ICMP_UNREACH) &&
6969 		    (code == ICMP_UNREACH_PORT)) {
6970 			code = ICMP_UNREACH_PROTOCOL;
6971 		}
6972 		sctp_notify(inp, stcb, net, type, code,
6973 		    ntohs(inner_ip->ip_len),
6974 		    ntohs(icmp->icmp_nextmtu));
6975 	} else {
6976 		if ((stcb == NULL) && (inp != NULL)) {
6977 			/* reduce ref-count */
6978 			SCTP_INP_WLOCK(inp);
6979 			SCTP_INP_DECR_REF(inp);
6980 			SCTP_INP_WUNLOCK(inp);
6981 		}
6982 		if (stcb) {
6983 			SCTP_TCB_UNLOCK(stcb);
6984 		}
6985 	}
6986 	return;
6987 }
6988 #endif
6989 
6990 #ifdef INET6
6991 static void
6992 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6993 {
6994 	struct ip6ctlparam *ip6cp;
6995 	struct sctp_inpcb *inp;
6996 	struct sctp_tcb *stcb;
6997 	struct sctp_nets *net;
6998 	struct sctphdr sh;
6999 	struct udphdr udp;
7000 	struct sockaddr_in6 src, dst;
7001 	uint8_t type, code;
7002 
7003 	ip6cp = (struct ip6ctlparam *)d;
7004 	/*
7005 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7006 	 */
7007 	if (ip6cp->ip6c_m == NULL) {
7008 		return;
7009 	}
7010 	/*
7011 	 * Check if we can safely examine the ports and the verification tag
7012 	 * of the SCTP common header.
7013 	 */
7014 	if (ip6cp->ip6c_m->m_pkthdr.len <
7015 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7016 		return;
7017 	}
7018 	/* Copy out the UDP header. */
7019 	memset(&udp, 0, sizeof(struct udphdr));
7020 	m_copydata(ip6cp->ip6c_m,
7021 	    ip6cp->ip6c_off,
7022 	    sizeof(struct udphdr),
7023 	    (caddr_t)&udp);
7024 	/* Copy out the port numbers and the verification tag. */
7025 	memset(&sh, 0, sizeof(struct sctphdr));
7026 	m_copydata(ip6cp->ip6c_m,
7027 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7028 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7029 	    (caddr_t)&sh);
7030 	memset(&src, 0, sizeof(struct sockaddr_in6));
7031 	src.sin6_family = AF_INET6;
7032 	src.sin6_len = sizeof(struct sockaddr_in6);
7033 	src.sin6_port = sh.src_port;
7034 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7035 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7036 		return;
7037 	}
7038 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7039 	dst.sin6_family = AF_INET6;
7040 	dst.sin6_len = sizeof(struct sockaddr_in6);
7041 	dst.sin6_port = sh.dest_port;
7042 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7043 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7044 		return;
7045 	}
7046 	inp = NULL;
7047 	net = NULL;
7048 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7049 	    (struct sockaddr *)&src,
7050 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7051 	if ((stcb != NULL) &&
7052 	    (net != NULL) &&
7053 	    (inp != NULL)) {
7054 		/* Check the UDP port numbers */
7055 		if ((udp.uh_dport != net->port) ||
7056 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7057 			SCTP_TCB_UNLOCK(stcb);
7058 			return;
7059 		}
7060 		/* Check the verification tag */
7061 		if (ntohl(sh.v_tag) != 0) {
7062 			/*
7063 			 * This must be the verification tag used for
7064 			 * sending out packets. We don't consider packets
7065 			 * reflecting the verification tag.
7066 			 */
7067 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7068 				SCTP_TCB_UNLOCK(stcb);
7069 				return;
7070 			}
7071 		} else {
7072 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7073 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7074 			    sizeof(struct sctphdr) +
7075 			    sizeof(struct sctp_chunkhdr) +
7076 			    offsetof(struct sctp_init, a_rwnd)) {
7077 				/*
7078 				 * In this case we can check if we got an
7079 				 * INIT chunk and if the initiate tag
7080 				 * matches.
7081 				 */
7082 				uint32_t initiate_tag;
7083 				uint8_t chunk_type;
7084 
7085 				m_copydata(ip6cp->ip6c_m,
7086 				    ip6cp->ip6c_off +
7087 				    sizeof(struct udphdr) +
7088 				    sizeof(struct sctphdr),
7089 				    sizeof(uint8_t),
7090 				    (caddr_t)&chunk_type);
7091 				m_copydata(ip6cp->ip6c_m,
7092 				    ip6cp->ip6c_off +
7093 				    sizeof(struct udphdr) +
7094 				    sizeof(struct sctphdr) +
7095 				    sizeof(struct sctp_chunkhdr),
7096 				    sizeof(uint32_t),
7097 				    (caddr_t)&initiate_tag);
7098 				if ((chunk_type != SCTP_INITIATION) ||
7099 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7100 					SCTP_TCB_UNLOCK(stcb);
7101 					return;
7102 				}
7103 			} else {
7104 				SCTP_TCB_UNLOCK(stcb);
7105 				return;
7106 			}
7107 		}
7108 		type = ip6cp->ip6c_icmp6->icmp6_type;
7109 		code = ip6cp->ip6c_icmp6->icmp6_code;
7110 		if ((type == ICMP6_DST_UNREACH) &&
7111 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7112 			type = ICMP6_PARAM_PROB;
7113 			code = ICMP6_PARAMPROB_NEXTHEADER;
7114 		}
7115 		sctp6_notify(inp, stcb, net, type, code,
7116 		    (uint16_t)ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7117 	} else {
7118 		if ((stcb == NULL) && (inp != NULL)) {
7119 			/* reduce inp's ref-count */
7120 			SCTP_INP_WLOCK(inp);
7121 			SCTP_INP_DECR_REF(inp);
7122 			SCTP_INP_WUNLOCK(inp);
7123 		}
7124 		if (stcb) {
7125 			SCTP_TCB_UNLOCK(stcb);
7126 		}
7127 	}
7128 }
7129 #endif
7130 
7131 void
7132 sctp_over_udp_stop(void)
7133 {
7134 	/*
7135 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7136 	 * for writting!
7137 	 */
7138 #ifdef INET
7139 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7140 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7141 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7142 	}
7143 #endif
7144 #ifdef INET6
7145 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7146 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7147 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7148 	}
7149 #endif
7150 }
7151 
7152 int
7153 sctp_over_udp_start(void)
7154 {
7155 	uint16_t port;
7156 	int ret;
7157 #ifdef INET
7158 	struct sockaddr_in sin;
7159 #endif
7160 #ifdef INET6
7161 	struct sockaddr_in6 sin6;
7162 #endif
7163 	/*
7164 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7165 	 * for writting!
7166 	 */
7167 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7168 	if (ntohs(port) == 0) {
7169 		/* Must have a port set */
7170 		return (EINVAL);
7171 	}
7172 #ifdef INET
7173 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7174 		/* Already running -- must stop first */
7175 		return (EALREADY);
7176 	}
7177 #endif
7178 #ifdef INET6
7179 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7180 		/* Already running -- must stop first */
7181 		return (EALREADY);
7182 	}
7183 #endif
7184 #ifdef INET
7185 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7186 	    SOCK_DGRAM, IPPROTO_UDP,
7187 	    curthread->td_ucred, curthread))) {
7188 		sctp_over_udp_stop();
7189 		return (ret);
7190 	}
7191 	/* Call the special UDP hook. */
7192 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7193 	    sctp_recv_udp_tunneled_packet,
7194 	    sctp_recv_icmp_tunneled_packet,
7195 	    NULL))) {
7196 		sctp_over_udp_stop();
7197 		return (ret);
7198 	}
7199 	/* Ok, we have a socket, bind it to the port. */
7200 	memset(&sin, 0, sizeof(struct sockaddr_in));
7201 	sin.sin_len = sizeof(struct sockaddr_in);
7202 	sin.sin_family = AF_INET;
7203 	sin.sin_port = htons(port);
7204 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7205 	    (struct sockaddr *)&sin, curthread))) {
7206 		sctp_over_udp_stop();
7207 		return (ret);
7208 	}
7209 #endif
7210 #ifdef INET6
7211 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7212 	    SOCK_DGRAM, IPPROTO_UDP,
7213 	    curthread->td_ucred, curthread))) {
7214 		sctp_over_udp_stop();
7215 		return (ret);
7216 	}
7217 	/* Call the special UDP hook. */
7218 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7219 	    sctp_recv_udp_tunneled_packet,
7220 	    sctp_recv_icmp6_tunneled_packet,
7221 	    NULL))) {
7222 		sctp_over_udp_stop();
7223 		return (ret);
7224 	}
7225 	/* Ok, we have a socket, bind it to the port. */
7226 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7227 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7228 	sin6.sin6_family = AF_INET6;
7229 	sin6.sin6_port = htons(port);
7230 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7231 	    (struct sockaddr *)&sin6, curthread))) {
7232 		sctp_over_udp_stop();
7233 		return (ret);
7234 	}
7235 #endif
7236 	return (0);
7237 }
7238