xref: /freebsd/sys/netinet/sctputil.c (revision 3f68b24e10aeb1a1cd85f2d349da44138d52c501)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 #ifdef INET6
56 #include <netinet/icmp6.h>
57 #endif
58 
59 
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 
64 extern const struct sctp_cc_functions sctp_cc_functions[];
65 extern const struct sctp_ss_functions sctp_ss_functions[];
66 
67 void
68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->rtt / 1000;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 }
128 
129 void
130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
131 {
132 	struct sctp_cwnd_log sctp_clog;
133 
134 	sctp_clog.x.strlog.stcb = stcb;
135 	sctp_clog.x.strlog.n_tsn = tsn;
136 	sctp_clog.x.strlog.n_sseq = sseq;
137 	sctp_clog.x.strlog.e_tsn = 0;
138 	sctp_clog.x.strlog.e_sseq = 0;
139 	sctp_clog.x.strlog.strm = stream;
140 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
141 	    SCTP_LOG_EVENT_STRM,
142 	    from,
143 	    sctp_clog.x.misc.log1,
144 	    sctp_clog.x.misc.log2,
145 	    sctp_clog.x.misc.log3,
146 	    sctp_clog.x.misc.log4);
147 }
148 
149 void
150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
151 {
152 	struct sctp_cwnd_log sctp_clog;
153 
154 	sctp_clog.x.nagle.stcb = (void *)stcb;
155 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
156 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
157 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
158 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
159 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
160 	    SCTP_LOG_EVENT_NAGLE,
161 	    action,
162 	    sctp_clog.x.misc.log1,
163 	    sctp_clog.x.misc.log2,
164 	    sctp_clog.x.misc.log3,
165 	    sctp_clog.x.misc.log4);
166 }
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 }
222 
223 #ifdef SCTP_MBUF_LOGGING
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 void
250 sctp_log_mbc(struct mbuf *m, int from)
251 {
252 	struct mbuf *mat;
253 
254 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
255 		sctp_log_mb(mat, from);
256 	}
257 }
258 
259 #endif
260 
261 void
262 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
263 {
264 	struct sctp_cwnd_log sctp_clog;
265 
266 	if (control == NULL) {
267 		SCTP_PRINTF("Gak log of NULL?\n");
268 		return;
269 	}
270 	sctp_clog.x.strlog.stcb = control->stcb;
271 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
272 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
273 	sctp_clog.x.strlog.strm = control->sinfo_stream;
274 	if (poschk != NULL) {
275 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
276 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
277 	} else {
278 		sctp_clog.x.strlog.e_tsn = 0;
279 		sctp_clog.x.strlog.e_sseq = 0;
280 	}
281 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
282 	    SCTP_LOG_EVENT_STRM,
283 	    from,
284 	    sctp_clog.x.misc.log1,
285 	    sctp_clog.x.misc.log2,
286 	    sctp_clog.x.misc.log3,
287 	    sctp_clog.x.misc.log4);
288 }
289 
290 void
291 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
292 {
293 	struct sctp_cwnd_log sctp_clog;
294 
295 	sctp_clog.x.cwnd.net = net;
296 	if (stcb->asoc.send_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_send = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
300 	if (stcb->asoc.stream_queue_cnt > 255)
301 		sctp_clog.x.cwnd.cnt_in_str = 255;
302 	else
303 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
304 
305 	if (net) {
306 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
307 		sctp_clog.x.cwnd.inflight = net->flight_size;
308 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
310 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
311 	}
312 	if (SCTP_CWNDLOG_PRESEND == from) {
313 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
314 	}
315 	sctp_clog.x.cwnd.cwnd_augment = augment;
316 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
317 	    SCTP_LOG_EVENT_CWND,
318 	    from,
319 	    sctp_clog.x.misc.log1,
320 	    sctp_clog.x.misc.log2,
321 	    sctp_clog.x.misc.log3,
322 	    sctp_clog.x.misc.log4);
323 }
324 
325 void
326 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
327 {
328 	struct sctp_cwnd_log sctp_clog;
329 
330 	memset(&sctp_clog, 0, sizeof(sctp_clog));
331 	if (inp) {
332 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
333 
334 	} else {
335 		sctp_clog.x.lock.sock = (void *)NULL;
336 	}
337 	sctp_clog.x.lock.inp = (void *)inp;
338 	if (stcb) {
339 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
340 	} else {
341 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
342 	}
343 	if (inp) {
344 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
345 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
346 	} else {
347 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
349 	}
350 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
351 	if (inp && (inp->sctp_socket)) {
352 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
353 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
354 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
355 	} else {
356 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
357 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
358 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
359 	}
360 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
361 	    SCTP_LOG_LOCK_EVENT,
362 	    from,
363 	    sctp_clog.x.misc.log1,
364 	    sctp_clog.x.misc.log2,
365 	    sctp_clog.x.misc.log3,
366 	    sctp_clog.x.misc.log4);
367 }
368 
369 void
370 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
371 {
372 	struct sctp_cwnd_log sctp_clog;
373 
374 	memset(&sctp_clog, 0, sizeof(sctp_clog));
375 	sctp_clog.x.cwnd.net = net;
376 	sctp_clog.x.cwnd.cwnd_new_value = error;
377 	sctp_clog.x.cwnd.inflight = net->flight_size;
378 	sctp_clog.x.cwnd.cwnd_augment = burst;
379 	if (stcb->asoc.send_queue_cnt > 255)
380 		sctp_clog.x.cwnd.cnt_in_send = 255;
381 	else
382 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
383 	if (stcb->asoc.stream_queue_cnt > 255)
384 		sctp_clog.x.cwnd.cnt_in_str = 255;
385 	else
386 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
387 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
388 	    SCTP_LOG_EVENT_MAXBURST,
389 	    from,
390 	    sctp_clog.x.misc.log1,
391 	    sctp_clog.x.misc.log2,
392 	    sctp_clog.x.misc.log3,
393 	    sctp_clog.x.misc.log4);
394 }
395 
396 void
397 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
398 {
399 	struct sctp_cwnd_log sctp_clog;
400 
401 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
402 	sctp_clog.x.rwnd.send_size = snd_size;
403 	sctp_clog.x.rwnd.overhead = overhead;
404 	sctp_clog.x.rwnd.new_rwnd = 0;
405 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
406 	    SCTP_LOG_EVENT_RWND,
407 	    from,
408 	    sctp_clog.x.misc.log1,
409 	    sctp_clog.x.misc.log2,
410 	    sctp_clog.x.misc.log3,
411 	    sctp_clog.x.misc.log4);
412 }
413 
414 void
415 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
416 {
417 	struct sctp_cwnd_log sctp_clog;
418 
419 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
420 	sctp_clog.x.rwnd.send_size = flight_size;
421 	sctp_clog.x.rwnd.overhead = overhead;
422 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
423 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
424 	    SCTP_LOG_EVENT_RWND,
425 	    from,
426 	    sctp_clog.x.misc.log1,
427 	    sctp_clog.x.misc.log2,
428 	    sctp_clog.x.misc.log3,
429 	    sctp_clog.x.misc.log4);
430 }
431 
432 #ifdef SCTP_MBCNT_LOGGING
433 static void
434 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
435 {
436 	struct sctp_cwnd_log sctp_clog;
437 
438 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
439 	sctp_clog.x.mbcnt.size_change = book;
440 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
441 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
442 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
443 	    SCTP_LOG_EVENT_MBCNT,
444 	    from,
445 	    sctp_clog.x.misc.log1,
446 	    sctp_clog.x.misc.log2,
447 	    sctp_clog.x.misc.log3,
448 	    sctp_clog.x.misc.log4);
449 }
450 
451 #endif
452 
453 void
454 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
455 {
456 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
457 	    SCTP_LOG_MISC_EVENT,
458 	    from,
459 	    a, b, c, d);
460 }
461 
462 void
463 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
464 {
465 	struct sctp_cwnd_log sctp_clog;
466 
467 	sctp_clog.x.wake.stcb = (void *)stcb;
468 	sctp_clog.x.wake.wake_cnt = wake_cnt;
469 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
470 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
471 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
472 
473 	if (stcb->asoc.stream_queue_cnt < 0xff)
474 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
475 	else
476 		sctp_clog.x.wake.stream_qcnt = 0xff;
477 
478 	if (stcb->asoc.chunks_on_out_queue < 0xff)
479 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
480 	else
481 		sctp_clog.x.wake.chunks_on_oque = 0xff;
482 
483 	sctp_clog.x.wake.sctpflags = 0;
484 	/* set in the defered mode stuff */
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
486 		sctp_clog.x.wake.sctpflags |= 1;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
488 		sctp_clog.x.wake.sctpflags |= 2;
489 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
490 		sctp_clog.x.wake.sctpflags |= 4;
491 	/* what about the sb */
492 	if (stcb->sctp_socket) {
493 		struct socket *so = stcb->sctp_socket;
494 
495 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
496 	} else {
497 		sctp_clog.x.wake.sbflags = 0xff;
498 	}
499 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
500 	    SCTP_LOG_EVENT_WAKE,
501 	    from,
502 	    sctp_clog.x.misc.log1,
503 	    sctp_clog.x.misc.log2,
504 	    sctp_clog.x.misc.log3,
505 	    sctp_clog.x.misc.log4);
506 }
507 
508 void
509 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
510 {
511 	struct sctp_cwnd_log sctp_clog;
512 
513 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
514 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
515 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
516 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
517 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
518 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
519 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
520 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
521 	    SCTP_LOG_EVENT_BLOCK,
522 	    from,
523 	    sctp_clog.x.misc.log1,
524 	    sctp_clog.x.misc.log2,
525 	    sctp_clog.x.misc.log3,
526 	    sctp_clog.x.misc.log4);
527 }
528 
529 int
530 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
531 {
532 	/* May need to fix this if ktrdump does not work */
533 	return (0);
534 }
535 
536 #ifdef SCTP_AUDITING_ENABLED
537 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
538 static int sctp_audit_indx = 0;
539 
540 static
541 void
542 sctp_print_audit_report(void)
543 {
544 	int i;
545 	int cnt;
546 
547 	cnt = 0;
548 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	for (i = 0; i < sctp_audit_indx; i++) {
568 		if ((sctp_audit_data[i][0] == 0xe0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if (sctp_audit_data[i][0] == 0xf0) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
576 		    (sctp_audit_data[i][1] == 0x01)) {
577 			SCTP_PRINTF("\n");
578 			cnt = 0;
579 		}
580 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
581 		    (uint32_t) sctp_audit_data[i][1]);
582 		cnt++;
583 		if ((cnt % 14) == 0)
584 			SCTP_PRINTF("\n");
585 	}
586 	SCTP_PRINTF("\n");
587 }
588 
589 void
590 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
591     struct sctp_nets *net)
592 {
593 	int resend_cnt, tot_out, rep, tot_book_cnt;
594 	struct sctp_nets *lnet;
595 	struct sctp_tmit_chunk *chk;
596 
597 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
598 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
599 	sctp_audit_indx++;
600 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 		sctp_audit_indx = 0;
602 	}
603 	if (inp == NULL) {
604 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
605 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
606 		sctp_audit_indx++;
607 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
608 			sctp_audit_indx = 0;
609 		}
610 		return;
611 	}
612 	if (stcb == NULL) {
613 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
614 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
615 		sctp_audit_indx++;
616 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
617 			sctp_audit_indx = 0;
618 		}
619 		return;
620 	}
621 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
622 	sctp_audit_data[sctp_audit_indx][1] =
623 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
624 	sctp_audit_indx++;
625 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
626 		sctp_audit_indx = 0;
627 	}
628 	rep = 0;
629 	tot_book_cnt = 0;
630 	resend_cnt = tot_out = 0;
631 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
632 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
633 			resend_cnt++;
634 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
635 			tot_out += chk->book_size;
636 			tot_book_cnt++;
637 		}
638 	}
639 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
647 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
648 		rep = 1;
649 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
650 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
651 		sctp_audit_data[sctp_audit_indx][1] =
652 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 	}
658 	if (tot_out != stcb->asoc.total_flight) {
659 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
660 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
661 		sctp_audit_indx++;
662 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
663 			sctp_audit_indx = 0;
664 		}
665 		rep = 1;
666 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
667 		    (int)stcb->asoc.total_flight);
668 		stcb->asoc.total_flight = tot_out;
669 	}
670 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
671 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
672 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
673 		sctp_audit_indx++;
674 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
675 			sctp_audit_indx = 0;
676 		}
677 		rep = 1;
678 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
679 
680 		stcb->asoc.total_flight_count = tot_book_cnt;
681 	}
682 	tot_out = 0;
683 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
684 		tot_out += lnet->flight_size;
685 	}
686 	if (tot_out != stcb->asoc.total_flight) {
687 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
688 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
689 		sctp_audit_indx++;
690 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 			sctp_audit_indx = 0;
692 		}
693 		rep = 1;
694 		SCTP_PRINTF("real flight:%d net total was %d\n",
695 		    stcb->asoc.total_flight, tot_out);
696 		/* now corrective action */
697 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
698 
699 			tot_out = 0;
700 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
701 				if ((chk->whoTo == lnet) &&
702 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
703 					tot_out += chk->book_size;
704 				}
705 			}
706 			if (lnet->flight_size != tot_out) {
707 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
708 				    (void *)lnet, lnet->flight_size,
709 				    tot_out);
710 				lnet->flight_size = tot_out;
711 			}
712 		}
713 	}
714 	if (rep) {
715 		sctp_print_audit_report();
716 	}
717 }
718 
719 void
720 sctp_audit_log(uint8_t ev, uint8_t fd)
721 {
722 
723 	sctp_audit_data[sctp_audit_indx][0] = ev;
724 	sctp_audit_data[sctp_audit_indx][1] = fd;
725 	sctp_audit_indx++;
726 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 		sctp_audit_indx = 0;
728 	}
729 }
730 
731 #endif
732 
733 /*
734  * sctp_stop_timers_for_shutdown() should be called
735  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
736  * state to make sure that all timers are stopped.
737  */
738 void
739 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
740 {
741 	struct sctp_association *asoc;
742 	struct sctp_nets *net;
743 
744 	asoc = &stcb->asoc;
745 
746 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
751 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
752 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
753 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
754 	}
755 }
756 
757 /*
758  * a list of sizes based on typical mtu's, used only if next hop size not
759  * returned.
760  */
761 static uint32_t sctp_mtu_sizes[] = {
762 	68,
763 	296,
764 	508,
765 	512,
766 	544,
767 	576,
768 	1006,
769 	1492,
770 	1500,
771 	1536,
772 	2002,
773 	2048,
774 	4352,
775 	4464,
776 	8166,
777 	17914,
778 	32000,
779 	65535
780 };
781 
782 /*
783  * Return the largest MTU smaller than val. If there is no
784  * entry, just return val.
785  */
786 uint32_t
787 sctp_get_prev_mtu(uint32_t val)
788 {
789 	uint32_t i;
790 
791 	if (val <= sctp_mtu_sizes[0]) {
792 		return (val);
793 	}
794 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
795 		if (val <= sctp_mtu_sizes[i]) {
796 			break;
797 		}
798 	}
799 	return (sctp_mtu_sizes[i - 1]);
800 }
801 
802 /*
803  * Return the smallest MTU larger than val. If there is no
804  * entry, just return val.
805  */
806 uint32_t
807 sctp_get_next_mtu(uint32_t val)
808 {
809 	/* select another MTU that is just bigger than this one */
810 	uint32_t i;
811 
812 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
813 		if (val < sctp_mtu_sizes[i]) {
814 			return (sctp_mtu_sizes[i]);
815 		}
816 	}
817 	return (val);
818 }
819 
820 void
821 sctp_fill_random_store(struct sctp_pcb *m)
822 {
823 	/*
824 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
825 	 * our counter. The result becomes our good random numbers and we
826 	 * then setup to give these out. Note that we do no locking to
827 	 * protect this. This is ok, since if competing folks call this we
828 	 * will get more gobbled gook in the random store which is what we
829 	 * want. There is a danger that two guys will use the same random
830 	 * numbers, but thats ok too since that is random as well :->
831 	 */
832 	m->store_at = 0;
833 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
834 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
835 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
836 	m->random_counter++;
837 }
838 
839 uint32_t
840 sctp_select_initial_TSN(struct sctp_pcb *inp)
841 {
842 	/*
843 	 * A true implementation should use random selection process to get
844 	 * the initial stream sequence number, using RFC1750 as a good
845 	 * guideline
846 	 */
847 	uint32_t x, *xp;
848 	uint8_t *p;
849 	int store_at, new_store;
850 
851 	if (inp->initial_sequence_debug != 0) {
852 		uint32_t ret;
853 
854 		ret = inp->initial_sequence_debug;
855 		inp->initial_sequence_debug++;
856 		return (ret);
857 	}
858 retry:
859 	store_at = inp->store_at;
860 	new_store = store_at + sizeof(uint32_t);
861 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
862 		new_store = 0;
863 	}
864 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
865 		goto retry;
866 	}
867 	if (new_store == 0) {
868 		/* Refill the random store */
869 		sctp_fill_random_store(inp);
870 	}
871 	p = &inp->random_store[store_at];
872 	xp = (uint32_t *) p;
873 	x = *xp;
874 	return (x);
875 }
876 
877 uint32_t
878 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
879 {
880 	uint32_t x;
881 	struct timeval now;
882 
883 	if (check) {
884 		(void)SCTP_GETTIME_TIMEVAL(&now);
885 	}
886 	for (;;) {
887 		x = sctp_select_initial_TSN(&inp->sctp_ep);
888 		if (x == 0) {
889 			/* we never use 0 */
890 			continue;
891 		}
892 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
893 			break;
894 		}
895 	}
896 	return (x);
897 }
898 
899 int32_t
900 sctp_map_assoc_state(int kernel_state)
901 {
902 	int32_t user_state;
903 
904 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
905 		user_state = SCTP_CLOSED;
906 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
907 		user_state = SCTP_SHUTDOWN_PENDING;
908 	} else {
909 		switch (kernel_state & SCTP_STATE_MASK) {
910 		case SCTP_STATE_EMPTY:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_INUSE:
914 			user_state = SCTP_CLOSED;
915 			break;
916 		case SCTP_STATE_COOKIE_WAIT:
917 			user_state = SCTP_COOKIE_WAIT;
918 			break;
919 		case SCTP_STATE_COOKIE_ECHOED:
920 			user_state = SCTP_COOKIE_ECHOED;
921 			break;
922 		case SCTP_STATE_OPEN:
923 			user_state = SCTP_ESTABLISHED;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_SENT:
926 			user_state = SCTP_SHUTDOWN_SENT;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_RECEIVED:
929 			user_state = SCTP_SHUTDOWN_RECEIVED;
930 			break;
931 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
932 			user_state = SCTP_SHUTDOWN_ACK_SENT;
933 			break;
934 		default:
935 			user_state = SCTP_CLOSED;
936 			break;
937 		}
938 	}
939 	return (user_state);
940 }
941 
942 int
943 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
944     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
945 {
946 	struct sctp_association *asoc;
947 
948 	/*
949 	 * Anything set to zero is taken care of by the allocation routine's
950 	 * bzero
951 	 */
952 
953 	/*
954 	 * Up front select what scoping to apply on addresses I tell my peer
955 	 * Not sure what to do with these right now, we will need to come up
956 	 * with a way to set them. We may need to pass them through from the
957 	 * caller in the sctp_aloc_assoc() function.
958 	 */
959 	int i;
960 
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 
964 #endif
965 
966 	asoc = &stcb->asoc;
967 	/* init all variables to a known value. */
968 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
969 	asoc->max_burst = inp->sctp_ep.max_burst;
970 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
971 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
972 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
973 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
974 	asoc->ecn_supported = inp->ecn_supported;
975 	asoc->prsctp_supported = inp->prsctp_supported;
976 	asoc->idata_supported = inp->idata_supported;
977 	asoc->auth_supported = inp->auth_supported;
978 	asoc->asconf_supported = inp->asconf_supported;
979 	asoc->reconfig_supported = inp->reconfig_supported;
980 	asoc->nrsack_supported = inp->nrsack_supported;
981 	asoc->pktdrop_supported = inp->pktdrop_supported;
982 	asoc->idata_supported = inp->idata_supported;
983 	asoc->sctp_cmt_pf = (uint8_t) 0;
984 	asoc->sctp_frag_point = inp->sctp_frag_point;
985 	asoc->sctp_features = inp->sctp_features;
986 	asoc->default_dscp = inp->sctp_ep.default_dscp;
987 	asoc->max_cwnd = inp->max_cwnd;
988 #ifdef INET6
989 	if (inp->sctp_ep.default_flowlabel) {
990 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
991 	} else {
992 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
993 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
994 			asoc->default_flowlabel &= 0x000fffff;
995 			asoc->default_flowlabel |= 0x80000000;
996 		} else {
997 			asoc->default_flowlabel = 0;
998 		}
999 	}
1000 #endif
1001 	asoc->sb_send_resv = 0;
1002 	if (override_tag) {
1003 		asoc->my_vtag = override_tag;
1004 	} else {
1005 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1006 	}
1007 	/* Get the nonce tags */
1008 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010 	asoc->vrf_id = vrf_id;
1011 
1012 #ifdef SCTP_ASOCLOG_OF_TSNS
1013 	asoc->tsn_in_at = 0;
1014 	asoc->tsn_out_at = 0;
1015 	asoc->tsn_in_wrapped = 0;
1016 	asoc->tsn_out_wrapped = 0;
1017 	asoc->cumack_log_at = 0;
1018 	asoc->cumack_log_atsnt = 0;
1019 #endif
1020 #ifdef SCTP_FS_SPEC_LOG
1021 	asoc->fs_index = 0;
1022 #endif
1023 	asoc->refcnt = 0;
1024 	asoc->assoc_up_sent = 0;
1025 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1026 	    sctp_select_initial_TSN(&inp->sctp_ep);
1027 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1028 	/* we are optimisitic here */
1029 	asoc->peer_supports_nat = 0;
1030 	asoc->sent_queue_retran_cnt = 0;
1031 
1032 	/* for CMT */
1033 	asoc->last_net_cmt_send_started = NULL;
1034 
1035 	/* This will need to be adjusted */
1036 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1037 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1038 	asoc->asconf_seq_in = asoc->last_acked_seq;
1039 
1040 	/* here we are different, we hold the next one we expect */
1041 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1042 
1043 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1044 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1045 
1046 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1047 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1048 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1049 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1050 	asoc->free_chunk_cnt = 0;
1051 
1052 	asoc->iam_blocking = 0;
1053 	asoc->context = inp->sctp_context;
1054 	asoc->local_strreset_support = inp->local_strreset_support;
1055 	asoc->def_send = inp->def_send;
1056 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1057 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1058 	asoc->pr_sctp_cnt = 0;
1059 	asoc->total_output_queue_size = 0;
1060 
1061 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1062 		asoc->scope.ipv6_addr_legal = 1;
1063 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1064 			asoc->scope.ipv4_addr_legal = 1;
1065 		} else {
1066 			asoc->scope.ipv4_addr_legal = 0;
1067 		}
1068 	} else {
1069 		asoc->scope.ipv6_addr_legal = 0;
1070 		asoc->scope.ipv4_addr_legal = 1;
1071 	}
1072 
1073 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1074 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1075 
1076 	asoc->smallest_mtu = inp->sctp_frag_point;
1077 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1078 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1079 
1080 	asoc->locked_on_sending = NULL;
1081 	asoc->stream_locked_on = 0;
1082 	asoc->ecn_echo_cnt_onq = 0;
1083 	asoc->stream_locked = 0;
1084 
1085 	asoc->send_sack = 1;
1086 
1087 	LIST_INIT(&asoc->sctp_restricted_addrs);
1088 
1089 	TAILQ_INIT(&asoc->nets);
1090 	TAILQ_INIT(&asoc->pending_reply_queue);
1091 	TAILQ_INIT(&asoc->asconf_ack_sent);
1092 	/* Setup to fill the hb random cache at first HB */
1093 	asoc->hb_random_idx = 4;
1094 
1095 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1096 
1097 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1098 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1099 
1100 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1101 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1102 
1103 	/*
1104 	 * Now the stream parameters, here we allocate space for all streams
1105 	 * that we request by default.
1106 	 */
1107 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1108 	    o_strms;
1109 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1110 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1111 	    SCTP_M_STRMO);
1112 	if (asoc->strmout == NULL) {
1113 		/* big trouble no memory */
1114 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1115 		return (ENOMEM);
1116 	}
1117 	for (i = 0; i < asoc->streamoutcnt; i++) {
1118 		/*
1119 		 * inbound side must be set to 0xffff, also NOTE when we get
1120 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1121 		 * count (streamoutcnt) but first check if we sent to any of
1122 		 * the upper streams that were dropped (if some were). Those
1123 		 * that were dropped must be notified to the upper layer as
1124 		 * failed to send.
1125 		 */
1126 		asoc->strmout[i].next_mid_ordered = 0;
1127 		asoc->strmout[i].next_mid_unordered = 0;
1128 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1129 		asoc->strmout[i].chunks_on_queues = 0;
1130 #if defined(SCTP_DETAILED_STR_STATS)
1131 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1132 			asoc->strmout[i].abandoned_sent[j] = 0;
1133 			asoc->strmout[i].abandoned_unsent[j] = 0;
1134 		}
1135 #else
1136 		asoc->strmout[i].abandoned_sent[0] = 0;
1137 		asoc->strmout[i].abandoned_unsent[0] = 0;
1138 #endif
1139 		asoc->strmout[i].stream_no = i;
1140 		asoc->strmout[i].last_msg_incomplete = 0;
1141 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1142 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1143 	}
1144 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1145 
1146 	/* Now the mapping array */
1147 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1148 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1149 	    SCTP_M_MAP);
1150 	if (asoc->mapping_array == NULL) {
1151 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1152 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1153 		return (ENOMEM);
1154 	}
1155 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1156 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1157 	    SCTP_M_MAP);
1158 	if (asoc->nr_mapping_array == NULL) {
1159 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1160 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1161 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1162 		return (ENOMEM);
1163 	}
1164 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1165 
1166 	/* Now the init of the other outqueues */
1167 	TAILQ_INIT(&asoc->free_chunks);
1168 	TAILQ_INIT(&asoc->control_send_queue);
1169 	TAILQ_INIT(&asoc->asconf_send_queue);
1170 	TAILQ_INIT(&asoc->send_queue);
1171 	TAILQ_INIT(&asoc->sent_queue);
1172 	TAILQ_INIT(&asoc->resetHead);
1173 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1174 	TAILQ_INIT(&asoc->asconf_queue);
1175 	/* authentication fields */
1176 	asoc->authinfo.random = NULL;
1177 	asoc->authinfo.active_keyid = 0;
1178 	asoc->authinfo.assoc_key = NULL;
1179 	asoc->authinfo.assoc_keyid = 0;
1180 	asoc->authinfo.recv_key = NULL;
1181 	asoc->authinfo.recv_keyid = 0;
1182 	LIST_INIT(&asoc->shared_keys);
1183 	asoc->marked_retrans = 0;
1184 	asoc->port = inp->sctp_ep.port;
1185 	asoc->timoinit = 0;
1186 	asoc->timodata = 0;
1187 	asoc->timosack = 0;
1188 	asoc->timoshutdown = 0;
1189 	asoc->timoheartbeat = 0;
1190 	asoc->timocookie = 0;
1191 	asoc->timoshutdownack = 0;
1192 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1193 	asoc->discontinuity_time = asoc->start_time;
1194 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1195 		asoc->abandoned_unsent[i] = 0;
1196 		asoc->abandoned_sent[i] = 0;
1197 	}
1198 	/*
1199 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1200 	 * freed later when the association is freed.
1201 	 */
1202 	return (0);
1203 }
1204 
1205 void
1206 sctp_print_mapping_array(struct sctp_association *asoc)
1207 {
1208 	unsigned int i, limit;
1209 
1210 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1211 	    asoc->mapping_array_size,
1212 	    asoc->mapping_array_base_tsn,
1213 	    asoc->cumulative_tsn,
1214 	    asoc->highest_tsn_inside_map,
1215 	    asoc->highest_tsn_inside_nr_map);
1216 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1217 		if (asoc->mapping_array[limit - 1] != 0) {
1218 			break;
1219 		}
1220 	}
1221 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1222 	for (i = 0; i < limit; i++) {
1223 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1224 	}
1225 	if (limit % 16)
1226 		SCTP_PRINTF("\n");
1227 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1228 		if (asoc->nr_mapping_array[limit - 1]) {
1229 			break;
1230 		}
1231 	}
1232 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1233 	for (i = 0; i < limit; i++) {
1234 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1235 	}
1236 	if (limit % 16)
1237 		SCTP_PRINTF("\n");
1238 }
1239 
1240 int
1241 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1242 {
1243 	/* mapping array needs to grow */
1244 	uint8_t *new_array1, *new_array2;
1245 	uint32_t new_size;
1246 
1247 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1248 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1249 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1250 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1251 		/* can't get more, forget it */
1252 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1253 		if (new_array1) {
1254 			SCTP_FREE(new_array1, SCTP_M_MAP);
1255 		}
1256 		if (new_array2) {
1257 			SCTP_FREE(new_array2, SCTP_M_MAP);
1258 		}
1259 		return (-1);
1260 	}
1261 	memset(new_array1, 0, new_size);
1262 	memset(new_array2, 0, new_size);
1263 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1264 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1265 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1266 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1267 	asoc->mapping_array = new_array1;
1268 	asoc->nr_mapping_array = new_array2;
1269 	asoc->mapping_array_size = new_size;
1270 	return (0);
1271 }
1272 
1273 
1274 static void
1275 sctp_iterator_work(struct sctp_iterator *it)
1276 {
1277 	int iteration_count = 0;
1278 	int inp_skip = 0;
1279 	int first_in = 1;
1280 	struct sctp_inpcb *tinp;
1281 
1282 	SCTP_INP_INFO_RLOCK();
1283 	SCTP_ITERATOR_LOCK();
1284 	if (it->inp) {
1285 		SCTP_INP_RLOCK(it->inp);
1286 		SCTP_INP_DECR_REF(it->inp);
1287 	}
1288 	if (it->inp == NULL) {
1289 		/* iterator is complete */
1290 done_with_iterator:
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		sctp_it_ctl.cur_it = it;
1432 		/* now lets work on this one */
1433 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1434 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1435 		CURVNET_SET(it->vn);
1436 		sctp_iterator_work(it);
1437 		sctp_it_ctl.cur_it = NULL;
1438 		CURVNET_RESTORE();
1439 		SCTP_IPI_ITERATOR_WQ_LOCK();
1440 		/* sa_ignore FREED_MEMORY */
1441 	}
1442 	sctp_it_ctl.iterator_running = 0;
1443 	return;
1444 }
1445 
1446 
1447 static void
1448 sctp_handle_addr_wq(void)
1449 {
1450 	/* deal with the ADDR wq from the rtsock calls */
1451 	struct sctp_laddr *wi, *nwi;
1452 	struct sctp_asconf_iterator *asc;
1453 
1454 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1455 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1456 	if (asc == NULL) {
1457 		/* Try later, no memory */
1458 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1459 		    (struct sctp_inpcb *)NULL,
1460 		    (struct sctp_tcb *)NULL,
1461 		    (struct sctp_nets *)NULL);
1462 		return;
1463 	}
1464 	LIST_INIT(&asc->list_of_work);
1465 	asc->cnt = 0;
1466 
1467 	SCTP_WQ_ADDR_LOCK();
1468 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1469 		LIST_REMOVE(wi, sctp_nxt_addr);
1470 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1471 		asc->cnt++;
1472 	}
1473 	SCTP_WQ_ADDR_UNLOCK();
1474 
1475 	if (asc->cnt == 0) {
1476 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1477 	} else {
1478 		int ret;
1479 
1480 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1481 		    sctp_asconf_iterator_stcb,
1482 		    NULL,	/* No ep end for boundall */
1483 		    SCTP_PCB_FLAGS_BOUNDALL,
1484 		    SCTP_PCB_ANY_FEATURES,
1485 		    SCTP_ASOC_ANY_STATE,
1486 		    (void *)asc, 0,
1487 		    sctp_asconf_iterator_end, NULL, 0);
1488 		if (ret) {
1489 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1490 			/*
1491 			 * Freeing if we are stopping or put back on the
1492 			 * addr_wq.
1493 			 */
1494 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1495 				sctp_asconf_iterator_end(asc, 0);
1496 			} else {
1497 				SCTP_WQ_ADDR_LOCK();
1498 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1499 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1500 				}
1501 				SCTP_WQ_ADDR_UNLOCK();
1502 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1503 			}
1504 		}
1505 	}
1506 }
1507 
1508 void
1509 sctp_timeout_handler(void *t)
1510 {
1511 	struct sctp_inpcb *inp;
1512 	struct sctp_tcb *stcb;
1513 	struct sctp_nets *net;
1514 	struct sctp_timer *tmr;
1515 	struct mbuf *op_err;
1516 
1517 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1518 	struct socket *so;
1519 
1520 #endif
1521 	int did_output;
1522 	int type;
1523 
1524 	tmr = (struct sctp_timer *)t;
1525 	inp = (struct sctp_inpcb *)tmr->ep;
1526 	stcb = (struct sctp_tcb *)tmr->tcb;
1527 	net = (struct sctp_nets *)tmr->net;
1528 	CURVNET_SET((struct vnet *)tmr->vnet);
1529 	did_output = 1;
1530 
1531 #ifdef SCTP_AUDITING_ENABLED
1532 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1533 	sctp_auditing(3, inp, stcb, net);
1534 #endif
1535 
1536 	/* sanity checks... */
1537 	if (tmr->self != (void *)tmr) {
1538 		/*
1539 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1540 		 * (void *)tmr);
1541 		 */
1542 		CURVNET_RESTORE();
1543 		return;
1544 	}
1545 	tmr->stopped_from = 0xa001;
1546 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1547 		/*
1548 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1549 		 * tmr->type);
1550 		 */
1551 		CURVNET_RESTORE();
1552 		return;
1553 	}
1554 	tmr->stopped_from = 0xa002;
1555 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1556 		CURVNET_RESTORE();
1557 		return;
1558 	}
1559 	/* if this is an iterator timeout, get the struct and clear inp */
1560 	tmr->stopped_from = 0xa003;
1561 	if (inp) {
1562 		SCTP_INP_INCR_REF(inp);
1563 		if ((inp->sctp_socket == NULL) &&
1564 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1569 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1570 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1571 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1572 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1573 		    ) {
1574 			SCTP_INP_DECR_REF(inp);
1575 			CURVNET_RESTORE();
1576 			return;
1577 		}
1578 	}
1579 	tmr->stopped_from = 0xa004;
1580 	if (stcb) {
1581 		atomic_add_int(&stcb->asoc.refcnt, 1);
1582 		if (stcb->asoc.state == 0) {
1583 			atomic_add_int(&stcb->asoc.refcnt, -1);
1584 			if (inp) {
1585 				SCTP_INP_DECR_REF(inp);
1586 			}
1587 			CURVNET_RESTORE();
1588 			return;
1589 		}
1590 	}
1591 	type = tmr->type;
1592 	tmr->stopped_from = 0xa005;
1593 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1594 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1595 		if (inp) {
1596 			SCTP_INP_DECR_REF(inp);
1597 		}
1598 		if (stcb) {
1599 			atomic_add_int(&stcb->asoc.refcnt, -1);
1600 		}
1601 		CURVNET_RESTORE();
1602 		return;
1603 	}
1604 	tmr->stopped_from = 0xa006;
1605 
1606 	if (stcb) {
1607 		SCTP_TCB_LOCK(stcb);
1608 		atomic_add_int(&stcb->asoc.refcnt, -1);
1609 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1610 		    ((stcb->asoc.state == 0) ||
1611 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1612 			SCTP_TCB_UNLOCK(stcb);
1613 			if (inp) {
1614 				SCTP_INP_DECR_REF(inp);
1615 			}
1616 			CURVNET_RESTORE();
1617 			return;
1618 		}
1619 	}
1620 	/* record in stopped what t-o occurred */
1621 	tmr->stopped_from = type;
1622 
1623 	/* mark as being serviced now */
1624 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1625 		/*
1626 		 * Callout has been rescheduled.
1627 		 */
1628 		goto get_out;
1629 	}
1630 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1631 		/*
1632 		 * Not active, so no action.
1633 		 */
1634 		goto get_out;
1635 	}
1636 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1637 
1638 	/* call the handler for the appropriate timer type */
1639 	switch (type) {
1640 	case SCTP_TIMER_TYPE_ZERO_COPY:
1641 		if (inp == NULL) {
1642 			break;
1643 		}
1644 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1645 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1646 		}
1647 		break;
1648 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1649 		if (inp == NULL) {
1650 			break;
1651 		}
1652 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1653 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1654 		}
1655 		break;
1656 	case SCTP_TIMER_TYPE_ADDR_WQ:
1657 		sctp_handle_addr_wq();
1658 		break;
1659 	case SCTP_TIMER_TYPE_SEND:
1660 		if ((stcb == NULL) || (inp == NULL)) {
1661 			break;
1662 		}
1663 		SCTP_STAT_INCR(sctps_timodata);
1664 		stcb->asoc.timodata++;
1665 		stcb->asoc.num_send_timers_up--;
1666 		if (stcb->asoc.num_send_timers_up < 0) {
1667 			stcb->asoc.num_send_timers_up = 0;
1668 		}
1669 		SCTP_TCB_LOCK_ASSERT(stcb);
1670 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1671 			/* no need to unlock on tcb its gone */
1672 
1673 			goto out_decr;
1674 		}
1675 		SCTP_TCB_LOCK_ASSERT(stcb);
1676 #ifdef SCTP_AUDITING_ENABLED
1677 		sctp_auditing(4, inp, stcb, net);
1678 #endif
1679 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1680 		if ((stcb->asoc.num_send_timers_up == 0) &&
1681 		    (stcb->asoc.sent_queue_cnt > 0)) {
1682 			struct sctp_tmit_chunk *chk;
1683 
1684 			/*
1685 			 * safeguard. If there on some on the sent queue
1686 			 * somewhere but no timers running something is
1687 			 * wrong... so we start a timer on the first chunk
1688 			 * on the send queue on whatever net it is sent to.
1689 			 */
1690 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1691 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1692 			    chk->whoTo);
1693 		}
1694 		break;
1695 	case SCTP_TIMER_TYPE_INIT:
1696 		if ((stcb == NULL) || (inp == NULL)) {
1697 			break;
1698 		}
1699 		SCTP_STAT_INCR(sctps_timoinit);
1700 		stcb->asoc.timoinit++;
1701 		if (sctp_t1init_timer(inp, stcb, net)) {
1702 			/* no need to unlock on tcb its gone */
1703 			goto out_decr;
1704 		}
1705 		/* We do output but not here */
1706 		did_output = 0;
1707 		break;
1708 	case SCTP_TIMER_TYPE_RECV:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		SCTP_STAT_INCR(sctps_timosack);
1713 		stcb->asoc.timosack++;
1714 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1715 #ifdef SCTP_AUDITING_ENABLED
1716 		sctp_auditing(4, inp, stcb, net);
1717 #endif
1718 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1719 		break;
1720 	case SCTP_TIMER_TYPE_SHUTDOWN:
1721 		if ((stcb == NULL) || (inp == NULL)) {
1722 			break;
1723 		}
1724 		if (sctp_shutdown_timer(inp, stcb, net)) {
1725 			/* no need to unlock on tcb its gone */
1726 			goto out_decr;
1727 		}
1728 		SCTP_STAT_INCR(sctps_timoshutdown);
1729 		stcb->asoc.timoshutdown++;
1730 #ifdef SCTP_AUDITING_ENABLED
1731 		sctp_auditing(4, inp, stcb, net);
1732 #endif
1733 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1734 		break;
1735 	case SCTP_TIMER_TYPE_HEARTBEAT:
1736 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1737 			break;
1738 		}
1739 		SCTP_STAT_INCR(sctps_timoheartbeat);
1740 		stcb->asoc.timoheartbeat++;
1741 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1742 			/* no need to unlock on tcb its gone */
1743 			goto out_decr;
1744 		}
1745 #ifdef SCTP_AUDITING_ENABLED
1746 		sctp_auditing(4, inp, stcb, net);
1747 #endif
1748 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1749 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1750 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1751 		}
1752 		break;
1753 	case SCTP_TIMER_TYPE_COOKIE:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		if (sctp_cookie_timer(inp, stcb, net)) {
1758 			/* no need to unlock on tcb its gone */
1759 			goto out_decr;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timocookie);
1762 		stcb->asoc.timocookie++;
1763 #ifdef SCTP_AUDITING_ENABLED
1764 		sctp_auditing(4, inp, stcb, net);
1765 #endif
1766 		/*
1767 		 * We consider T3 and Cookie timer pretty much the same with
1768 		 * respect to where from in chunk_output.
1769 		 */
1770 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1771 		break;
1772 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1773 		{
1774 			struct timeval tv;
1775 			int i, secret;
1776 
1777 			if (inp == NULL) {
1778 				break;
1779 			}
1780 			SCTP_STAT_INCR(sctps_timosecret);
1781 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1782 			SCTP_INP_WLOCK(inp);
1783 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1784 			inp->sctp_ep.last_secret_number =
1785 			    inp->sctp_ep.current_secret_number;
1786 			inp->sctp_ep.current_secret_number++;
1787 			if (inp->sctp_ep.current_secret_number >=
1788 			    SCTP_HOW_MANY_SECRETS) {
1789 				inp->sctp_ep.current_secret_number = 0;
1790 			}
1791 			secret = (int)inp->sctp_ep.current_secret_number;
1792 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1793 				inp->sctp_ep.secret_key[secret][i] =
1794 				    sctp_select_initial_TSN(&inp->sctp_ep);
1795 			}
1796 			SCTP_INP_WUNLOCK(inp);
1797 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1798 		}
1799 		did_output = 0;
1800 		break;
1801 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1802 		if ((stcb == NULL) || (inp == NULL)) {
1803 			break;
1804 		}
1805 		SCTP_STAT_INCR(sctps_timopathmtu);
1806 		sctp_pathmtu_timer(inp, stcb, net);
1807 		did_output = 0;
1808 		break;
1809 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1810 		if ((stcb == NULL) || (inp == NULL)) {
1811 			break;
1812 		}
1813 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1814 			/* no need to unlock on tcb its gone */
1815 			goto out_decr;
1816 		}
1817 		SCTP_STAT_INCR(sctps_timoshutdownack);
1818 		stcb->asoc.timoshutdownack++;
1819 #ifdef SCTP_AUDITING_ENABLED
1820 		sctp_auditing(4, inp, stcb, net);
1821 #endif
1822 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1823 		break;
1824 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1825 		if ((stcb == NULL) || (inp == NULL)) {
1826 			break;
1827 		}
1828 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1829 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1830 		    "Shutdown guard timer expired");
1831 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1832 		/* no need to unlock on tcb its gone */
1833 		goto out_decr;
1834 
1835 	case SCTP_TIMER_TYPE_STRRESET:
1836 		if ((stcb == NULL) || (inp == NULL)) {
1837 			break;
1838 		}
1839 		if (sctp_strreset_timer(inp, stcb, net)) {
1840 			/* no need to unlock on tcb its gone */
1841 			goto out_decr;
1842 		}
1843 		SCTP_STAT_INCR(sctps_timostrmrst);
1844 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1845 		break;
1846 	case SCTP_TIMER_TYPE_ASCONF:
1847 		if ((stcb == NULL) || (inp == NULL)) {
1848 			break;
1849 		}
1850 		if (sctp_asconf_timer(inp, stcb, net)) {
1851 			/* no need to unlock on tcb its gone */
1852 			goto out_decr;
1853 		}
1854 		SCTP_STAT_INCR(sctps_timoasconf);
1855 #ifdef SCTP_AUDITING_ENABLED
1856 		sctp_auditing(4, inp, stcb, net);
1857 #endif
1858 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1859 		break;
1860 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1861 		if ((stcb == NULL) || (inp == NULL)) {
1862 			break;
1863 		}
1864 		sctp_delete_prim_timer(inp, stcb, net);
1865 		SCTP_STAT_INCR(sctps_timodelprim);
1866 		break;
1867 
1868 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1869 		if ((stcb == NULL) || (inp == NULL)) {
1870 			break;
1871 		}
1872 		SCTP_STAT_INCR(sctps_timoautoclose);
1873 		sctp_autoclose_timer(inp, stcb, net);
1874 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1875 		did_output = 0;
1876 		break;
1877 	case SCTP_TIMER_TYPE_ASOCKILL:
1878 		if ((stcb == NULL) || (inp == NULL)) {
1879 			break;
1880 		}
1881 		SCTP_STAT_INCR(sctps_timoassockill);
1882 		/* Can we free it yet? */
1883 		SCTP_INP_DECR_REF(inp);
1884 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1885 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1887 		so = SCTP_INP_SO(inp);
1888 		atomic_add_int(&stcb->asoc.refcnt, 1);
1889 		SCTP_TCB_UNLOCK(stcb);
1890 		SCTP_SOCKET_LOCK(so, 1);
1891 		SCTP_TCB_LOCK(stcb);
1892 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1893 #endif
1894 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1895 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1896 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1897 		SCTP_SOCKET_UNLOCK(so, 1);
1898 #endif
1899 		/*
1900 		 * free asoc, always unlocks (or destroy's) so prevent
1901 		 * duplicate unlock or unlock of a free mtx :-0
1902 		 */
1903 		stcb = NULL;
1904 		goto out_no_decr;
1905 	case SCTP_TIMER_TYPE_INPKILL:
1906 		SCTP_STAT_INCR(sctps_timoinpkill);
1907 		if (inp == NULL) {
1908 			break;
1909 		}
1910 		/*
1911 		 * special case, take away our increment since WE are the
1912 		 * killer
1913 		 */
1914 		SCTP_INP_DECR_REF(inp);
1915 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1916 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1917 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1918 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1919 		inp = NULL;
1920 		goto out_no_decr;
1921 	default:
1922 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1923 		    type);
1924 		break;
1925 	}
1926 #ifdef SCTP_AUDITING_ENABLED
1927 	sctp_audit_log(0xF1, (uint8_t) type);
1928 	if (inp)
1929 		sctp_auditing(5, inp, stcb, net);
1930 #endif
1931 	if ((did_output) && stcb) {
1932 		/*
1933 		 * Now we need to clean up the control chunk chain if an
1934 		 * ECNE is on it. It must be marked as UNSENT again so next
1935 		 * call will continue to send it until such time that we get
1936 		 * a CWR, to remove it. It is, however, less likely that we
1937 		 * will find a ecn echo on the chain though.
1938 		 */
1939 		sctp_fix_ecn_echo(&stcb->asoc);
1940 	}
1941 get_out:
1942 	if (stcb) {
1943 		SCTP_TCB_UNLOCK(stcb);
1944 	}
1945 out_decr:
1946 	if (inp) {
1947 		SCTP_INP_DECR_REF(inp);
1948 	}
1949 out_no_decr:
1950 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1951 	CURVNET_RESTORE();
1952 }
1953 
1954 void
1955 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1956     struct sctp_nets *net)
1957 {
1958 	uint32_t to_ticks;
1959 	struct sctp_timer *tmr;
1960 
1961 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1962 		return;
1963 
1964 	tmr = NULL;
1965 	if (stcb) {
1966 		SCTP_TCB_LOCK_ASSERT(stcb);
1967 	}
1968 	switch (t_type) {
1969 	case SCTP_TIMER_TYPE_ZERO_COPY:
1970 		tmr = &inp->sctp_ep.zero_copy_timer;
1971 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1974 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1975 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1976 		break;
1977 	case SCTP_TIMER_TYPE_ADDR_WQ:
1978 		/* Only 1 tick away :-) */
1979 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1980 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1981 		break;
1982 	case SCTP_TIMER_TYPE_SEND:
1983 		/* Here we use the RTO timer */
1984 		{
1985 			int rto_val;
1986 
1987 			if ((stcb == NULL) || (net == NULL)) {
1988 				return;
1989 			}
1990 			tmr = &net->rxt_timer;
1991 			if (net->RTO == 0) {
1992 				rto_val = stcb->asoc.initial_rto;
1993 			} else {
1994 				rto_val = net->RTO;
1995 			}
1996 			to_ticks = MSEC_TO_TICKS(rto_val);
1997 		}
1998 		break;
1999 	case SCTP_TIMER_TYPE_INIT:
2000 		/*
2001 		 * Here we use the INIT timer default usually about 1
2002 		 * minute.
2003 		 */
2004 		if ((stcb == NULL) || (net == NULL)) {
2005 			return;
2006 		}
2007 		tmr = &net->rxt_timer;
2008 		if (net->RTO == 0) {
2009 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2010 		} else {
2011 			to_ticks = MSEC_TO_TICKS(net->RTO);
2012 		}
2013 		break;
2014 	case SCTP_TIMER_TYPE_RECV:
2015 		/*
2016 		 * Here we use the Delayed-Ack timer value from the inp
2017 		 * ususually about 200ms.
2018 		 */
2019 		if (stcb == NULL) {
2020 			return;
2021 		}
2022 		tmr = &stcb->asoc.dack_timer;
2023 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2024 		break;
2025 	case SCTP_TIMER_TYPE_SHUTDOWN:
2026 		/* Here we use the RTO of the destination. */
2027 		if ((stcb == NULL) || (net == NULL)) {
2028 			return;
2029 		}
2030 		if (net->RTO == 0) {
2031 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2032 		} else {
2033 			to_ticks = MSEC_TO_TICKS(net->RTO);
2034 		}
2035 		tmr = &net->rxt_timer;
2036 		break;
2037 	case SCTP_TIMER_TYPE_HEARTBEAT:
2038 		/*
2039 		 * the net is used here so that we can add in the RTO. Even
2040 		 * though we use a different timer. We also add the HB timer
2041 		 * PLUS a random jitter.
2042 		 */
2043 		if ((stcb == NULL) || (net == NULL)) {
2044 			return;
2045 		} else {
2046 			uint32_t rndval;
2047 			uint32_t jitter;
2048 
2049 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2050 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2051 				return;
2052 			}
2053 			if (net->RTO == 0) {
2054 				to_ticks = stcb->asoc.initial_rto;
2055 			} else {
2056 				to_ticks = net->RTO;
2057 			}
2058 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2059 			jitter = rndval % to_ticks;
2060 			if (jitter >= (to_ticks >> 1)) {
2061 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2062 			} else {
2063 				to_ticks = to_ticks - jitter;
2064 			}
2065 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2066 			    !(net->dest_state & SCTP_ADDR_PF)) {
2067 				to_ticks += net->heart_beat_delay;
2068 			}
2069 			/*
2070 			 * Now we must convert the to_ticks that are now in
2071 			 * ms to ticks.
2072 			 */
2073 			to_ticks = MSEC_TO_TICKS(to_ticks);
2074 			tmr = &net->hb_timer;
2075 		}
2076 		break;
2077 	case SCTP_TIMER_TYPE_COOKIE:
2078 		/*
2079 		 * Here we can use the RTO timer from the network since one
2080 		 * RTT was compelete. If a retran happened then we will be
2081 		 * using the RTO initial value.
2082 		 */
2083 		if ((stcb == NULL) || (net == NULL)) {
2084 			return;
2085 		}
2086 		if (net->RTO == 0) {
2087 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2088 		} else {
2089 			to_ticks = MSEC_TO_TICKS(net->RTO);
2090 		}
2091 		tmr = &net->rxt_timer;
2092 		break;
2093 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2094 		/*
2095 		 * nothing needed but the endpoint here ususually about 60
2096 		 * minutes.
2097 		 */
2098 		tmr = &inp->sctp_ep.signature_change;
2099 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2100 		break;
2101 	case SCTP_TIMER_TYPE_ASOCKILL:
2102 		if (stcb == NULL) {
2103 			return;
2104 		}
2105 		tmr = &stcb->asoc.strreset_timer;
2106 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2107 		break;
2108 	case SCTP_TIMER_TYPE_INPKILL:
2109 		/*
2110 		 * The inp is setup to die. We re-use the signature_chage
2111 		 * timer since that has stopped and we are in the GONE
2112 		 * state.
2113 		 */
2114 		tmr = &inp->sctp_ep.signature_change;
2115 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2116 		break;
2117 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2118 		/*
2119 		 * Here we use the value found in the EP for PMTU ususually
2120 		 * about 10 minutes.
2121 		 */
2122 		if ((stcb == NULL) || (net == NULL)) {
2123 			return;
2124 		}
2125 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2126 			return;
2127 		}
2128 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2129 		tmr = &net->pmtu_timer;
2130 		break;
2131 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2132 		/* Here we use the RTO of the destination */
2133 		if ((stcb == NULL) || (net == NULL)) {
2134 			return;
2135 		}
2136 		if (net->RTO == 0) {
2137 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2138 		} else {
2139 			to_ticks = MSEC_TO_TICKS(net->RTO);
2140 		}
2141 		tmr = &net->rxt_timer;
2142 		break;
2143 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2144 		/*
2145 		 * Here we use the endpoints shutdown guard timer usually
2146 		 * about 3 minutes.
2147 		 */
2148 		if (stcb == NULL) {
2149 			return;
2150 		}
2151 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2152 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2153 		} else {
2154 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2155 		}
2156 		tmr = &stcb->asoc.shut_guard_timer;
2157 		break;
2158 	case SCTP_TIMER_TYPE_STRRESET:
2159 		/*
2160 		 * Here the timer comes from the stcb but its value is from
2161 		 * the net's RTO.
2162 		 */
2163 		if ((stcb == NULL) || (net == NULL)) {
2164 			return;
2165 		}
2166 		if (net->RTO == 0) {
2167 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		} else {
2169 			to_ticks = MSEC_TO_TICKS(net->RTO);
2170 		}
2171 		tmr = &stcb->asoc.strreset_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_ASCONF:
2174 		/*
2175 		 * Here the timer comes from the stcb but its value is from
2176 		 * the net's RTO.
2177 		 */
2178 		if ((stcb == NULL) || (net == NULL)) {
2179 			return;
2180 		}
2181 		if (net->RTO == 0) {
2182 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2183 		} else {
2184 			to_ticks = MSEC_TO_TICKS(net->RTO);
2185 		}
2186 		tmr = &stcb->asoc.asconf_timer;
2187 		break;
2188 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2189 		if ((stcb == NULL) || (net != NULL)) {
2190 			return;
2191 		}
2192 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2193 		tmr = &stcb->asoc.delete_prim_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2196 		if (stcb == NULL) {
2197 			return;
2198 		}
2199 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2200 			/*
2201 			 * Really an error since stcb is NOT set to
2202 			 * autoclose
2203 			 */
2204 			return;
2205 		}
2206 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2207 		tmr = &stcb->asoc.autoclose_timer;
2208 		break;
2209 	default:
2210 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2211 		    __func__, t_type);
2212 		return;
2213 		break;
2214 	}
2215 	if ((to_ticks <= 0) || (tmr == NULL)) {
2216 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2217 		    __func__, t_type, to_ticks, (void *)tmr);
2218 		return;
2219 	}
2220 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2221 		/*
2222 		 * we do NOT allow you to have it already running. if it is
2223 		 * we leave the current one up unchanged
2224 		 */
2225 		return;
2226 	}
2227 	/* At this point we can proceed */
2228 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2229 		stcb->asoc.num_send_timers_up++;
2230 	}
2231 	tmr->stopped_from = 0;
2232 	tmr->type = t_type;
2233 	tmr->ep = (void *)inp;
2234 	tmr->tcb = (void *)stcb;
2235 	tmr->net = (void *)net;
2236 	tmr->self = (void *)tmr;
2237 	tmr->vnet = (void *)curvnet;
2238 	tmr->ticks = sctp_get_tick_count();
2239 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2240 	return;
2241 }
2242 
2243 void
2244 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2245     struct sctp_nets *net, uint32_t from)
2246 {
2247 	struct sctp_timer *tmr;
2248 
2249 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2250 	    (inp == NULL))
2251 		return;
2252 
2253 	tmr = NULL;
2254 	if (stcb) {
2255 		SCTP_TCB_LOCK_ASSERT(stcb);
2256 	}
2257 	switch (t_type) {
2258 	case SCTP_TIMER_TYPE_ZERO_COPY:
2259 		tmr = &inp->sctp_ep.zero_copy_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2262 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2263 		break;
2264 	case SCTP_TIMER_TYPE_ADDR_WQ:
2265 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2266 		break;
2267 	case SCTP_TIMER_TYPE_SEND:
2268 		if ((stcb == NULL) || (net == NULL)) {
2269 			return;
2270 		}
2271 		tmr = &net->rxt_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_INIT:
2274 		if ((stcb == NULL) || (net == NULL)) {
2275 			return;
2276 		}
2277 		tmr = &net->rxt_timer;
2278 		break;
2279 	case SCTP_TIMER_TYPE_RECV:
2280 		if (stcb == NULL) {
2281 			return;
2282 		}
2283 		tmr = &stcb->asoc.dack_timer;
2284 		break;
2285 	case SCTP_TIMER_TYPE_SHUTDOWN:
2286 		if ((stcb == NULL) || (net == NULL)) {
2287 			return;
2288 		}
2289 		tmr = &net->rxt_timer;
2290 		break;
2291 	case SCTP_TIMER_TYPE_HEARTBEAT:
2292 		if ((stcb == NULL) || (net == NULL)) {
2293 			return;
2294 		}
2295 		tmr = &net->hb_timer;
2296 		break;
2297 	case SCTP_TIMER_TYPE_COOKIE:
2298 		if ((stcb == NULL) || (net == NULL)) {
2299 			return;
2300 		}
2301 		tmr = &net->rxt_timer;
2302 		break;
2303 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2304 		/* nothing needed but the endpoint here */
2305 		tmr = &inp->sctp_ep.signature_change;
2306 		/*
2307 		 * We re-use the newcookie timer for the INP kill timer. We
2308 		 * must assure that we do not kill it by accident.
2309 		 */
2310 		break;
2311 	case SCTP_TIMER_TYPE_ASOCKILL:
2312 		/*
2313 		 * Stop the asoc kill timer.
2314 		 */
2315 		if (stcb == NULL) {
2316 			return;
2317 		}
2318 		tmr = &stcb->asoc.strreset_timer;
2319 		break;
2320 
2321 	case SCTP_TIMER_TYPE_INPKILL:
2322 		/*
2323 		 * The inp is setup to die. We re-use the signature_chage
2324 		 * timer since that has stopped and we are in the GONE
2325 		 * state.
2326 		 */
2327 		tmr = &inp->sctp_ep.signature_change;
2328 		break;
2329 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2330 		if ((stcb == NULL) || (net == NULL)) {
2331 			return;
2332 		}
2333 		tmr = &net->pmtu_timer;
2334 		break;
2335 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2336 		if ((stcb == NULL) || (net == NULL)) {
2337 			return;
2338 		}
2339 		tmr = &net->rxt_timer;
2340 		break;
2341 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2342 		if (stcb == NULL) {
2343 			return;
2344 		}
2345 		tmr = &stcb->asoc.shut_guard_timer;
2346 		break;
2347 	case SCTP_TIMER_TYPE_STRRESET:
2348 		if (stcb == NULL) {
2349 			return;
2350 		}
2351 		tmr = &stcb->asoc.strreset_timer;
2352 		break;
2353 	case SCTP_TIMER_TYPE_ASCONF:
2354 		if (stcb == NULL) {
2355 			return;
2356 		}
2357 		tmr = &stcb->asoc.asconf_timer;
2358 		break;
2359 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2360 		if (stcb == NULL) {
2361 			return;
2362 		}
2363 		tmr = &stcb->asoc.delete_prim_timer;
2364 		break;
2365 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2366 		if (stcb == NULL) {
2367 			return;
2368 		}
2369 		tmr = &stcb->asoc.autoclose_timer;
2370 		break;
2371 	default:
2372 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2373 		    __func__, t_type);
2374 		break;
2375 	}
2376 	if (tmr == NULL) {
2377 		return;
2378 	}
2379 	if ((tmr->type != t_type) && tmr->type) {
2380 		/*
2381 		 * Ok we have a timer that is under joint use. Cookie timer
2382 		 * per chance with the SEND timer. We therefore are NOT
2383 		 * running the timer that the caller wants stopped.  So just
2384 		 * return.
2385 		 */
2386 		return;
2387 	}
2388 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2389 		stcb->asoc.num_send_timers_up--;
2390 		if (stcb->asoc.num_send_timers_up < 0) {
2391 			stcb->asoc.num_send_timers_up = 0;
2392 		}
2393 	}
2394 	tmr->self = NULL;
2395 	tmr->stopped_from = from;
2396 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2397 	return;
2398 }
2399 
2400 uint32_t
2401 sctp_calculate_len(struct mbuf *m)
2402 {
2403 	uint32_t tlen = 0;
2404 	struct mbuf *at;
2405 
2406 	at = m;
2407 	while (at) {
2408 		tlen += SCTP_BUF_LEN(at);
2409 		at = SCTP_BUF_NEXT(at);
2410 	}
2411 	return (tlen);
2412 }
2413 
2414 void
2415 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2416     struct sctp_association *asoc, uint32_t mtu)
2417 {
2418 	/*
2419 	 * Reset the P-MTU size on this association, this involves changing
2420 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2421 	 * allow the DF flag to be cleared.
2422 	 */
2423 	struct sctp_tmit_chunk *chk;
2424 	unsigned int eff_mtu, ovh;
2425 
2426 	asoc->smallest_mtu = mtu;
2427 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2428 		ovh = SCTP_MIN_OVERHEAD;
2429 	} else {
2430 		ovh = SCTP_MIN_V4_OVERHEAD;
2431 	}
2432 	eff_mtu = mtu - ovh;
2433 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2434 		if (chk->send_size > eff_mtu) {
2435 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2436 		}
2437 	}
2438 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2439 		if (chk->send_size > eff_mtu) {
2440 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2441 		}
2442 	}
2443 }
2444 
2445 
2446 /*
2447  * given an association and starting time of the current RTT period return
2448  * RTO in number of msecs net should point to the current network
2449  */
2450 
2451 uint32_t
2452 sctp_calculate_rto(struct sctp_tcb *stcb,
2453     struct sctp_association *asoc,
2454     struct sctp_nets *net,
2455     struct timeval *told,
2456     int safe, int rtt_from_sack)
2457 {
2458 	/*-
2459 	 * given an association and the starting time of the current RTT
2460 	 * period (in value1/value2) return RTO in number of msecs.
2461 	 */
2462 	int32_t rtt;		/* RTT in ms */
2463 	uint32_t new_rto;
2464 	int first_measure = 0;
2465 	struct timeval now, then, *old;
2466 
2467 	/* Copy it out for sparc64 */
2468 	if (safe == sctp_align_unsafe_makecopy) {
2469 		old = &then;
2470 		memcpy(&then, told, sizeof(struct timeval));
2471 	} else if (safe == sctp_align_safe_nocopy) {
2472 		old = told;
2473 	} else {
2474 		/* error */
2475 		SCTP_PRINTF("Huh, bad rto calc call\n");
2476 		return (0);
2477 	}
2478 	/************************/
2479 	/* 1. calculate new RTT */
2480 	/************************/
2481 	/* get the current time */
2482 	if (stcb->asoc.use_precise_time) {
2483 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2484 	} else {
2485 		(void)SCTP_GETTIME_TIMEVAL(&now);
2486 	}
2487 	timevalsub(&now, old);
2488 	/* store the current RTT in us */
2489 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2490 	        (uint64_t) now.tv_usec;
2491 
2492 	/* compute rtt in ms */
2493 	rtt = (int32_t) (net->rtt / 1000);
2494 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2495 		/*
2496 		 * Tell the CC module that a new update has just occurred
2497 		 * from a sack
2498 		 */
2499 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2500 	}
2501 	/*
2502 	 * Do we need to determine the lan? We do this only on sacks i.e.
2503 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2504 	 */
2505 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2506 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2507 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2508 			net->lan_type = SCTP_LAN_INTERNET;
2509 		} else {
2510 			net->lan_type = SCTP_LAN_LOCAL;
2511 		}
2512 	}
2513 	/***************************/
2514 	/* 2. update RTTVAR & SRTT */
2515 	/***************************/
2516 	/*-
2517 	 * Compute the scaled average lastsa and the
2518 	 * scaled variance lastsv as described in van Jacobson
2519 	 * Paper "Congestion Avoidance and Control", Annex A.
2520 	 *
2521 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2522 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2523 	 */
2524 	if (net->RTO_measured) {
2525 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2526 		net->lastsa += rtt;
2527 		if (rtt < 0) {
2528 			rtt = -rtt;
2529 		}
2530 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2531 		net->lastsv += rtt;
2532 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2533 			rto_logging(net, SCTP_LOG_RTTVAR);
2534 		}
2535 	} else {
2536 		/* First RTO measurment */
2537 		net->RTO_measured = 1;
2538 		first_measure = 1;
2539 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2540 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2541 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2542 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2543 		}
2544 	}
2545 	if (net->lastsv == 0) {
2546 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2547 	}
2548 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2549 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2550 	    (stcb->asoc.sat_network_lockout == 0)) {
2551 		stcb->asoc.sat_network = 1;
2552 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2553 		stcb->asoc.sat_network = 0;
2554 		stcb->asoc.sat_network_lockout = 1;
2555 	}
2556 	/* bound it, per C6/C7 in Section 5.3.1 */
2557 	if (new_rto < stcb->asoc.minrto) {
2558 		new_rto = stcb->asoc.minrto;
2559 	}
2560 	if (new_rto > stcb->asoc.maxrto) {
2561 		new_rto = stcb->asoc.maxrto;
2562 	}
2563 	/* we are now returning the RTO */
2564 	return (new_rto);
2565 }
2566 
2567 /*
2568  * return a pointer to a contiguous piece of data from the given mbuf chain
2569  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2570  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2571  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2572  */
2573 caddr_t
2574 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2575 {
2576 	uint32_t count;
2577 	uint8_t *ptr;
2578 
2579 	ptr = in_ptr;
2580 	if ((off < 0) || (len <= 0))
2581 		return (NULL);
2582 
2583 	/* find the desired start location */
2584 	while ((m != NULL) && (off > 0)) {
2585 		if (off < SCTP_BUF_LEN(m))
2586 			break;
2587 		off -= SCTP_BUF_LEN(m);
2588 		m = SCTP_BUF_NEXT(m);
2589 	}
2590 	if (m == NULL)
2591 		return (NULL);
2592 
2593 	/* is the current mbuf large enough (eg. contiguous)? */
2594 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2595 		return (mtod(m, caddr_t)+off);
2596 	} else {
2597 		/* else, it spans more than one mbuf, so save a temp copy... */
2598 		while ((m != NULL) && (len > 0)) {
2599 			count = min(SCTP_BUF_LEN(m) - off, len);
2600 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2601 			len -= count;
2602 			ptr += count;
2603 			off = 0;
2604 			m = SCTP_BUF_NEXT(m);
2605 		}
2606 		if ((m == NULL) && (len > 0))
2607 			return (NULL);
2608 		else
2609 			return ((caddr_t)in_ptr);
2610 	}
2611 }
2612 
2613 
2614 
2615 struct sctp_paramhdr *
2616 sctp_get_next_param(struct mbuf *m,
2617     int offset,
2618     struct sctp_paramhdr *pull,
2619     int pull_limit)
2620 {
2621 	/* This just provides a typed signature to Peter's Pull routine */
2622 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2623 	    (uint8_t *) pull));
2624 }
2625 
2626 
2627 struct mbuf *
2628 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2629 {
2630 	struct mbuf *m_last;
2631 	caddr_t dp;
2632 
2633 	if (padlen > 3) {
2634 		return (NULL);
2635 	}
2636 	if (padlen <= M_TRAILINGSPACE(m)) {
2637 		/*
2638 		 * The easy way. We hope the majority of the time we hit
2639 		 * here :)
2640 		 */
2641 		m_last = m;
2642 	} else {
2643 		/* Hard way we must grow the mbuf chain */
2644 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2645 		if (m_last == NULL) {
2646 			return (NULL);
2647 		}
2648 		SCTP_BUF_LEN(m_last) = 0;
2649 		SCTP_BUF_NEXT(m_last) = NULL;
2650 		SCTP_BUF_NEXT(m) = m_last;
2651 	}
2652 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2653 	SCTP_BUF_LEN(m_last) += padlen;
2654 	memset(dp, 0, padlen);
2655 	return (m_last);
2656 }
2657 
2658 struct mbuf *
2659 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2660 {
2661 	/* find the last mbuf in chain and pad it */
2662 	struct mbuf *m_at;
2663 
2664 	if (last_mbuf != NULL) {
2665 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2666 	} else {
2667 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2668 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2669 				return (sctp_add_pad_tombuf(m_at, padval));
2670 			}
2671 		}
2672 	}
2673 	return (NULL);
2674 }
2675 
2676 static void
2677 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2678     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2679 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2680     SCTP_UNUSED
2681 #endif
2682 )
2683 {
2684 	struct mbuf *m_notify;
2685 	struct sctp_assoc_change *sac;
2686 	struct sctp_queued_to_read *control;
2687 	unsigned int notif_len;
2688 	uint16_t abort_len;
2689 	unsigned int i;
2690 
2691 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2692 	struct socket *so;
2693 
2694 #endif
2695 
2696 	if (stcb == NULL) {
2697 		return;
2698 	}
2699 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2700 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2701 		if (abort != NULL) {
2702 			abort_len = ntohs(abort->ch.chunk_length);
2703 		} else {
2704 			abort_len = 0;
2705 		}
2706 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2707 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2708 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2709 			notif_len += abort_len;
2710 		}
2711 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2712 		if (m_notify == NULL) {
2713 			/* Retry with smaller value. */
2714 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2715 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2716 			if (m_notify == NULL) {
2717 				goto set_error;
2718 			}
2719 		}
2720 		SCTP_BUF_NEXT(m_notify) = NULL;
2721 		sac = mtod(m_notify, struct sctp_assoc_change *);
2722 		memset(sac, 0, notif_len);
2723 		sac->sac_type = SCTP_ASSOC_CHANGE;
2724 		sac->sac_flags = 0;
2725 		sac->sac_length = sizeof(struct sctp_assoc_change);
2726 		sac->sac_state = state;
2727 		sac->sac_error = error;
2728 		/* XXX verify these stream counts */
2729 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2730 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2731 		sac->sac_assoc_id = sctp_get_associd(stcb);
2732 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2733 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2734 				i = 0;
2735 				if (stcb->asoc.prsctp_supported == 1) {
2736 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2737 				}
2738 				if (stcb->asoc.auth_supported == 1) {
2739 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2740 				}
2741 				if (stcb->asoc.asconf_supported == 1) {
2742 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2743 				}
2744 				if (stcb->asoc.idata_supported == 1) {
2745 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2746 				}
2747 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2748 				if (stcb->asoc.reconfig_supported == 1) {
2749 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2750 				}
2751 				sac->sac_length += i;
2752 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2753 				memcpy(sac->sac_info, abort, abort_len);
2754 				sac->sac_length += abort_len;
2755 			}
2756 		}
2757 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2758 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2759 		    0, 0, stcb->asoc.context, 0, 0, 0,
2760 		    m_notify);
2761 		if (control != NULL) {
2762 			control->length = SCTP_BUF_LEN(m_notify);
2763 			/* not that we need this */
2764 			control->tail_mbuf = m_notify;
2765 			control->spec_flags = M_NOTIFICATION;
2766 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2767 			    control,
2768 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2769 			    so_locked);
2770 		} else {
2771 			sctp_m_freem(m_notify);
2772 		}
2773 	}
2774 	/*
2775 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2776 	 * comes in.
2777 	 */
2778 set_error:
2779 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2780 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2781 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2782 		SOCK_LOCK(stcb->sctp_socket);
2783 		if (from_peer) {
2784 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2785 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2786 				stcb->sctp_socket->so_error = ECONNREFUSED;
2787 			} else {
2788 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2789 				stcb->sctp_socket->so_error = ECONNRESET;
2790 			}
2791 		} else {
2792 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2793 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2794 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2795 				stcb->sctp_socket->so_error = ETIMEDOUT;
2796 			} else {
2797 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2798 				stcb->sctp_socket->so_error = ECONNABORTED;
2799 			}
2800 		}
2801 	}
2802 	/* Wake ANY sleepers */
2803 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2804 	so = SCTP_INP_SO(stcb->sctp_ep);
2805 	if (!so_locked) {
2806 		atomic_add_int(&stcb->asoc.refcnt, 1);
2807 		SCTP_TCB_UNLOCK(stcb);
2808 		SCTP_SOCKET_LOCK(so, 1);
2809 		SCTP_TCB_LOCK(stcb);
2810 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2811 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2812 			SCTP_SOCKET_UNLOCK(so, 1);
2813 			return;
2814 		}
2815 	}
2816 #endif
2817 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2818 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2819 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2820 		socantrcvmore_locked(stcb->sctp_socket);
2821 	}
2822 	sorwakeup(stcb->sctp_socket);
2823 	sowwakeup(stcb->sctp_socket);
2824 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2825 	if (!so_locked) {
2826 		SCTP_SOCKET_UNLOCK(so, 1);
2827 	}
2828 #endif
2829 }
2830 
2831 static void
2832 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2833     struct sockaddr *sa, uint32_t error, int so_locked
2834 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2835     SCTP_UNUSED
2836 #endif
2837 )
2838 {
2839 	struct mbuf *m_notify;
2840 	struct sctp_paddr_change *spc;
2841 	struct sctp_queued_to_read *control;
2842 
2843 	if ((stcb == NULL) ||
2844 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2845 		/* event not enabled */
2846 		return;
2847 	}
2848 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2849 	if (m_notify == NULL)
2850 		return;
2851 	SCTP_BUF_LEN(m_notify) = 0;
2852 	spc = mtod(m_notify, struct sctp_paddr_change *);
2853 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2854 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2855 	spc->spc_flags = 0;
2856 	spc->spc_length = sizeof(struct sctp_paddr_change);
2857 	switch (sa->sa_family) {
2858 #ifdef INET
2859 	case AF_INET:
2860 #ifdef INET6
2861 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2862 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2863 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2864 		} else {
2865 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2866 		}
2867 #else
2868 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2869 #endif
2870 		break;
2871 #endif
2872 #ifdef INET6
2873 	case AF_INET6:
2874 		{
2875 			struct sockaddr_in6 *sin6;
2876 
2877 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2878 
2879 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2880 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2881 				if (sin6->sin6_scope_id == 0) {
2882 					/* recover scope_id for user */
2883 					(void)sa6_recoverscope(sin6);
2884 				} else {
2885 					/* clear embedded scope_id for user */
2886 					in6_clearscope(&sin6->sin6_addr);
2887 				}
2888 			}
2889 			break;
2890 		}
2891 #endif
2892 	default:
2893 		/* TSNH */
2894 		break;
2895 	}
2896 	spc->spc_state = state;
2897 	spc->spc_error = error;
2898 	spc->spc_assoc_id = sctp_get_associd(stcb);
2899 
2900 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2901 	SCTP_BUF_NEXT(m_notify) = NULL;
2902 
2903 	/* append to socket */
2904 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2905 	    0, 0, stcb->asoc.context, 0, 0, 0,
2906 	    m_notify);
2907 	if (control == NULL) {
2908 		/* no memory */
2909 		sctp_m_freem(m_notify);
2910 		return;
2911 	}
2912 	control->length = SCTP_BUF_LEN(m_notify);
2913 	control->spec_flags = M_NOTIFICATION;
2914 	/* not that we need this */
2915 	control->tail_mbuf = m_notify;
2916 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2917 	    control,
2918 	    &stcb->sctp_socket->so_rcv, 1,
2919 	    SCTP_READ_LOCK_NOT_HELD,
2920 	    so_locked);
2921 }
2922 
2923 
2924 static void
2925 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2926     struct sctp_tmit_chunk *chk, int so_locked
2927 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2928     SCTP_UNUSED
2929 #endif
2930 )
2931 {
2932 	struct mbuf *m_notify;
2933 	struct sctp_send_failed *ssf;
2934 	struct sctp_send_failed_event *ssfe;
2935 	struct sctp_queued_to_read *control;
2936 	int length;
2937 
2938 	if ((stcb == NULL) ||
2939 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2940 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2941 		/* event not enabled */
2942 		return;
2943 	}
2944 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2945 		length = sizeof(struct sctp_send_failed_event);
2946 	} else {
2947 		length = sizeof(struct sctp_send_failed);
2948 	}
2949 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2950 	if (m_notify == NULL)
2951 		/* no space left */
2952 		return;
2953 	SCTP_BUF_LEN(m_notify) = 0;
2954 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2955 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2956 		memset(ssfe, 0, length);
2957 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2958 		if (sent) {
2959 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2960 		} else {
2961 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2962 		}
2963 		length += chk->send_size;
2964 		length -= sizeof(struct sctp_data_chunk);
2965 		ssfe->ssfe_length = length;
2966 		ssfe->ssfe_error = error;
2967 		/* not exactly what the user sent in, but should be close :) */
2968 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2969 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2970 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2971 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2972 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2973 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2974 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2975 	} else {
2976 		ssf = mtod(m_notify, struct sctp_send_failed *);
2977 		memset(ssf, 0, length);
2978 		ssf->ssf_type = SCTP_SEND_FAILED;
2979 		if (sent) {
2980 			ssf->ssf_flags = SCTP_DATA_SENT;
2981 		} else {
2982 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2983 		}
2984 		length += chk->send_size;
2985 		length -= sizeof(struct sctp_data_chunk);
2986 		ssf->ssf_length = length;
2987 		ssf->ssf_error = error;
2988 		/* not exactly what the user sent in, but should be close :) */
2989 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2990 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2991 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2992 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2993 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2994 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2995 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2996 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2997 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2998 	}
2999 	if (chk->data) {
3000 		/*
3001 		 * trim off the sctp chunk header(it should be there)
3002 		 */
3003 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3004 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3005 			sctp_mbuf_crush(chk->data);
3006 			chk->send_size -= sizeof(struct sctp_data_chunk);
3007 		}
3008 	}
3009 	SCTP_BUF_NEXT(m_notify) = chk->data;
3010 	/* Steal off the mbuf */
3011 	chk->data = NULL;
3012 	/*
3013 	 * For this case, we check the actual socket buffer, since the assoc
3014 	 * is going away we don't want to overfill the socket buffer for a
3015 	 * non-reader
3016 	 */
3017 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3018 		sctp_m_freem(m_notify);
3019 		return;
3020 	}
3021 	/* append to socket */
3022 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3023 	    0, 0, stcb->asoc.context, 0, 0, 0,
3024 	    m_notify);
3025 	if (control == NULL) {
3026 		/* no memory */
3027 		sctp_m_freem(m_notify);
3028 		return;
3029 	}
3030 	control->spec_flags = M_NOTIFICATION;
3031 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3032 	    control,
3033 	    &stcb->sctp_socket->so_rcv, 1,
3034 	    SCTP_READ_LOCK_NOT_HELD,
3035 	    so_locked);
3036 }
3037 
3038 
3039 static void
3040 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3041     struct sctp_stream_queue_pending *sp, int so_locked
3042 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3043     SCTP_UNUSED
3044 #endif
3045 )
3046 {
3047 	struct mbuf *m_notify;
3048 	struct sctp_send_failed *ssf;
3049 	struct sctp_send_failed_event *ssfe;
3050 	struct sctp_queued_to_read *control;
3051 	int length;
3052 
3053 	if ((stcb == NULL) ||
3054 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3055 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3056 		/* event not enabled */
3057 		return;
3058 	}
3059 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3060 		length = sizeof(struct sctp_send_failed_event);
3061 	} else {
3062 		length = sizeof(struct sctp_send_failed);
3063 	}
3064 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3065 	if (m_notify == NULL) {
3066 		/* no space left */
3067 		return;
3068 	}
3069 	SCTP_BUF_LEN(m_notify) = 0;
3070 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3071 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3072 		memset(ssfe, 0, length);
3073 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3074 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3075 		length += sp->length;
3076 		ssfe->ssfe_length = length;
3077 		ssfe->ssfe_error = error;
3078 		/* not exactly what the user sent in, but should be close :) */
3079 		ssfe->ssfe_info.snd_sid = sp->stream;
3080 		if (sp->some_taken) {
3081 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3082 		} else {
3083 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3084 		}
3085 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3086 		ssfe->ssfe_info.snd_context = sp->context;
3087 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3088 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3089 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3090 	} else {
3091 		ssf = mtod(m_notify, struct sctp_send_failed *);
3092 		memset(ssf, 0, length);
3093 		ssf->ssf_type = SCTP_SEND_FAILED;
3094 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3095 		length += sp->length;
3096 		ssf->ssf_length = length;
3097 		ssf->ssf_error = error;
3098 		/* not exactly what the user sent in, but should be close :) */
3099 		ssf->ssf_info.sinfo_stream = sp->stream;
3100 		ssf->ssf_info.sinfo_ssn = 0;
3101 		if (sp->some_taken) {
3102 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3103 		} else {
3104 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3105 		}
3106 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3107 		ssf->ssf_info.sinfo_context = sp->context;
3108 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3109 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3110 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3111 	}
3112 	SCTP_BUF_NEXT(m_notify) = sp->data;
3113 
3114 	/* Steal off the mbuf */
3115 	sp->data = NULL;
3116 	/*
3117 	 * For this case, we check the actual socket buffer, since the assoc
3118 	 * is going away we don't want to overfill the socket buffer for a
3119 	 * non-reader
3120 	 */
3121 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3122 		sctp_m_freem(m_notify);
3123 		return;
3124 	}
3125 	/* append to socket */
3126 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3127 	    0, 0, stcb->asoc.context, 0, 0, 0,
3128 	    m_notify);
3129 	if (control == NULL) {
3130 		/* no memory */
3131 		sctp_m_freem(m_notify);
3132 		return;
3133 	}
3134 	control->spec_flags = M_NOTIFICATION;
3135 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3136 	    control,
3137 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3138 }
3139 
3140 
3141 
3142 static void
3143 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3144 {
3145 	struct mbuf *m_notify;
3146 	struct sctp_adaptation_event *sai;
3147 	struct sctp_queued_to_read *control;
3148 
3149 	if ((stcb == NULL) ||
3150 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3151 		/* event not enabled */
3152 		return;
3153 	}
3154 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3155 	if (m_notify == NULL)
3156 		/* no space left */
3157 		return;
3158 	SCTP_BUF_LEN(m_notify) = 0;
3159 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3160 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3161 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3162 	sai->sai_flags = 0;
3163 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3164 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3165 	sai->sai_assoc_id = sctp_get_associd(stcb);
3166 
3167 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3168 	SCTP_BUF_NEXT(m_notify) = NULL;
3169 
3170 	/* append to socket */
3171 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3172 	    0, 0, stcb->asoc.context, 0, 0, 0,
3173 	    m_notify);
3174 	if (control == NULL) {
3175 		/* no memory */
3176 		sctp_m_freem(m_notify);
3177 		return;
3178 	}
3179 	control->length = SCTP_BUF_LEN(m_notify);
3180 	control->spec_flags = M_NOTIFICATION;
3181 	/* not that we need this */
3182 	control->tail_mbuf = m_notify;
3183 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3184 	    control,
3185 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3186 }
3187 
3188 /* This always must be called with the read-queue LOCKED in the INP */
3189 static void
3190 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3191     uint32_t val, int so_locked
3192 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3193     SCTP_UNUSED
3194 #endif
3195 )
3196 {
3197 	struct mbuf *m_notify;
3198 	struct sctp_pdapi_event *pdapi;
3199 	struct sctp_queued_to_read *control;
3200 	struct sockbuf *sb;
3201 
3202 	if ((stcb == NULL) ||
3203 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3204 		/* event not enabled */
3205 		return;
3206 	}
3207 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3208 		return;
3209 	}
3210 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3211 	if (m_notify == NULL)
3212 		/* no space left */
3213 		return;
3214 	SCTP_BUF_LEN(m_notify) = 0;
3215 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3216 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3217 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3218 	pdapi->pdapi_flags = 0;
3219 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3220 	pdapi->pdapi_indication = error;
3221 	pdapi->pdapi_stream = (val >> 16);
3222 	pdapi->pdapi_seq = (val & 0x0000ffff);
3223 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3224 
3225 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3226 	SCTP_BUF_NEXT(m_notify) = NULL;
3227 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3228 	    0, 0, stcb->asoc.context, 0, 0, 0,
3229 	    m_notify);
3230 	if (control == NULL) {
3231 		/* no memory */
3232 		sctp_m_freem(m_notify);
3233 		return;
3234 	}
3235 	control->spec_flags = M_NOTIFICATION;
3236 	control->length = SCTP_BUF_LEN(m_notify);
3237 	/* not that we need this */
3238 	control->tail_mbuf = m_notify;
3239 	control->held_length = 0;
3240 	control->length = 0;
3241 	sb = &stcb->sctp_socket->so_rcv;
3242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3243 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3244 	}
3245 	sctp_sballoc(stcb, sb, m_notify);
3246 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3247 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3248 	}
3249 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3250 	control->end_added = 1;
3251 	if (stcb->asoc.control_pdapi)
3252 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3253 	else {
3254 		/* we really should not see this case */
3255 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3256 	}
3257 	if (stcb->sctp_ep && stcb->sctp_socket) {
3258 		/* This should always be the case */
3259 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3260 		struct socket *so;
3261 
3262 		so = SCTP_INP_SO(stcb->sctp_ep);
3263 		if (!so_locked) {
3264 			atomic_add_int(&stcb->asoc.refcnt, 1);
3265 			SCTP_TCB_UNLOCK(stcb);
3266 			SCTP_SOCKET_LOCK(so, 1);
3267 			SCTP_TCB_LOCK(stcb);
3268 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3269 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3270 				SCTP_SOCKET_UNLOCK(so, 1);
3271 				return;
3272 			}
3273 		}
3274 #endif
3275 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3276 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3277 		if (!so_locked) {
3278 			SCTP_SOCKET_UNLOCK(so, 1);
3279 		}
3280 #endif
3281 	}
3282 }
3283 
3284 static void
3285 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3286 {
3287 	struct mbuf *m_notify;
3288 	struct sctp_shutdown_event *sse;
3289 	struct sctp_queued_to_read *control;
3290 
3291 	/*
3292 	 * For TCP model AND UDP connected sockets we will send an error up
3293 	 * when an SHUTDOWN completes
3294 	 */
3295 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3296 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3297 		/* mark socket closed for read/write and wakeup! */
3298 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3299 		struct socket *so;
3300 
3301 		so = SCTP_INP_SO(stcb->sctp_ep);
3302 		atomic_add_int(&stcb->asoc.refcnt, 1);
3303 		SCTP_TCB_UNLOCK(stcb);
3304 		SCTP_SOCKET_LOCK(so, 1);
3305 		SCTP_TCB_LOCK(stcb);
3306 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3307 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3308 			SCTP_SOCKET_UNLOCK(so, 1);
3309 			return;
3310 		}
3311 #endif
3312 		socantsendmore(stcb->sctp_socket);
3313 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3314 		SCTP_SOCKET_UNLOCK(so, 1);
3315 #endif
3316 	}
3317 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3318 		/* event not enabled */
3319 		return;
3320 	}
3321 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3322 	if (m_notify == NULL)
3323 		/* no space left */
3324 		return;
3325 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3326 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3327 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3328 	sse->sse_flags = 0;
3329 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3330 	sse->sse_assoc_id = sctp_get_associd(stcb);
3331 
3332 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3333 	SCTP_BUF_NEXT(m_notify) = NULL;
3334 
3335 	/* append to socket */
3336 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3337 	    0, 0, stcb->asoc.context, 0, 0, 0,
3338 	    m_notify);
3339 	if (control == NULL) {
3340 		/* no memory */
3341 		sctp_m_freem(m_notify);
3342 		return;
3343 	}
3344 	control->spec_flags = M_NOTIFICATION;
3345 	control->length = SCTP_BUF_LEN(m_notify);
3346 	/* not that we need this */
3347 	control->tail_mbuf = m_notify;
3348 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3349 	    control,
3350 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3351 }
3352 
3353 static void
3354 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3355     int so_locked
3356 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3357     SCTP_UNUSED
3358 #endif
3359 )
3360 {
3361 	struct mbuf *m_notify;
3362 	struct sctp_sender_dry_event *event;
3363 	struct sctp_queued_to_read *control;
3364 
3365 	if ((stcb == NULL) ||
3366 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3367 		/* event not enabled */
3368 		return;
3369 	}
3370 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3371 	if (m_notify == NULL) {
3372 		/* no space left */
3373 		return;
3374 	}
3375 	SCTP_BUF_LEN(m_notify) = 0;
3376 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3377 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3378 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3379 	event->sender_dry_flags = 0;
3380 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3381 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3382 
3383 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3384 	SCTP_BUF_NEXT(m_notify) = NULL;
3385 
3386 	/* append to socket */
3387 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3388 	    0, 0, stcb->asoc.context, 0, 0, 0,
3389 	    m_notify);
3390 	if (control == NULL) {
3391 		/* no memory */
3392 		sctp_m_freem(m_notify);
3393 		return;
3394 	}
3395 	control->length = SCTP_BUF_LEN(m_notify);
3396 	control->spec_flags = M_NOTIFICATION;
3397 	/* not that we need this */
3398 	control->tail_mbuf = m_notify;
3399 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3400 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3401 }
3402 
3403 
3404 void
3405 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3406 {
3407 	struct mbuf *m_notify;
3408 	struct sctp_queued_to_read *control;
3409 	struct sctp_stream_change_event *stradd;
3410 
3411 	if ((stcb == NULL) ||
3412 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3413 		/* event not enabled */
3414 		return;
3415 	}
3416 	if ((stcb->asoc.peer_req_out) && flag) {
3417 		/* Peer made the request, don't tell the local user */
3418 		stcb->asoc.peer_req_out = 0;
3419 		return;
3420 	}
3421 	stcb->asoc.peer_req_out = 0;
3422 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3423 	if (m_notify == NULL)
3424 		/* no space left */
3425 		return;
3426 	SCTP_BUF_LEN(m_notify) = 0;
3427 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3428 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3429 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3430 	stradd->strchange_flags = flag;
3431 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3432 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3433 	stradd->strchange_instrms = numberin;
3434 	stradd->strchange_outstrms = numberout;
3435 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3436 	SCTP_BUF_NEXT(m_notify) = NULL;
3437 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3438 		/* no space */
3439 		sctp_m_freem(m_notify);
3440 		return;
3441 	}
3442 	/* append to socket */
3443 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3444 	    0, 0, stcb->asoc.context, 0, 0, 0,
3445 	    m_notify);
3446 	if (control == NULL) {
3447 		/* no memory */
3448 		sctp_m_freem(m_notify);
3449 		return;
3450 	}
3451 	control->spec_flags = M_NOTIFICATION;
3452 	control->length = SCTP_BUF_LEN(m_notify);
3453 	/* not that we need this */
3454 	control->tail_mbuf = m_notify;
3455 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3456 	    control,
3457 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3458 }
3459 
3460 void
3461 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3462 {
3463 	struct mbuf *m_notify;
3464 	struct sctp_queued_to_read *control;
3465 	struct sctp_assoc_reset_event *strasoc;
3466 
3467 	if ((stcb == NULL) ||
3468 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3469 		/* event not enabled */
3470 		return;
3471 	}
3472 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3473 	if (m_notify == NULL)
3474 		/* no space left */
3475 		return;
3476 	SCTP_BUF_LEN(m_notify) = 0;
3477 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3478 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3479 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3480 	strasoc->assocreset_flags = flag;
3481 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3482 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3483 	strasoc->assocreset_local_tsn = sending_tsn;
3484 	strasoc->assocreset_remote_tsn = recv_tsn;
3485 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3486 	SCTP_BUF_NEXT(m_notify) = NULL;
3487 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3488 		/* no space */
3489 		sctp_m_freem(m_notify);
3490 		return;
3491 	}
3492 	/* append to socket */
3493 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3494 	    0, 0, stcb->asoc.context, 0, 0, 0,
3495 	    m_notify);
3496 	if (control == NULL) {
3497 		/* no memory */
3498 		sctp_m_freem(m_notify);
3499 		return;
3500 	}
3501 	control->spec_flags = M_NOTIFICATION;
3502 	control->length = SCTP_BUF_LEN(m_notify);
3503 	/* not that we need this */
3504 	control->tail_mbuf = m_notify;
3505 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3506 	    control,
3507 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3508 }
3509 
3510 
3511 
3512 static void
3513 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3514     int number_entries, uint16_t * list, int flag)
3515 {
3516 	struct mbuf *m_notify;
3517 	struct sctp_queued_to_read *control;
3518 	struct sctp_stream_reset_event *strreset;
3519 	int len;
3520 
3521 	if ((stcb == NULL) ||
3522 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3523 		/* event not enabled */
3524 		return;
3525 	}
3526 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3527 	if (m_notify == NULL)
3528 		/* no space left */
3529 		return;
3530 	SCTP_BUF_LEN(m_notify) = 0;
3531 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3532 	if (len > M_TRAILINGSPACE(m_notify)) {
3533 		/* never enough room */
3534 		sctp_m_freem(m_notify);
3535 		return;
3536 	}
3537 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3538 	memset(strreset, 0, len);
3539 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3540 	strreset->strreset_flags = flag;
3541 	strreset->strreset_length = len;
3542 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3543 	if (number_entries) {
3544 		int i;
3545 
3546 		for (i = 0; i < number_entries; i++) {
3547 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3548 		}
3549 	}
3550 	SCTP_BUF_LEN(m_notify) = len;
3551 	SCTP_BUF_NEXT(m_notify) = NULL;
3552 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3553 		/* no space */
3554 		sctp_m_freem(m_notify);
3555 		return;
3556 	}
3557 	/* append to socket */
3558 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3559 	    0, 0, stcb->asoc.context, 0, 0, 0,
3560 	    m_notify);
3561 	if (control == NULL) {
3562 		/* no memory */
3563 		sctp_m_freem(m_notify);
3564 		return;
3565 	}
3566 	control->spec_flags = M_NOTIFICATION;
3567 	control->length = SCTP_BUF_LEN(m_notify);
3568 	/* not that we need this */
3569 	control->tail_mbuf = m_notify;
3570 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3571 	    control,
3572 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3573 }
3574 
3575 
3576 static void
3577 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3578 {
3579 	struct mbuf *m_notify;
3580 	struct sctp_remote_error *sre;
3581 	struct sctp_queued_to_read *control;
3582 	unsigned int notif_len;
3583 	uint16_t chunk_len;
3584 
3585 	if ((stcb == NULL) ||
3586 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3587 		return;
3588 	}
3589 	if (chunk != NULL) {
3590 		chunk_len = ntohs(chunk->ch.chunk_length);
3591 	} else {
3592 		chunk_len = 0;
3593 	}
3594 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3595 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3596 	if (m_notify == NULL) {
3597 		/* Retry with smaller value. */
3598 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3599 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3600 		if (m_notify == NULL) {
3601 			return;
3602 		}
3603 	}
3604 	SCTP_BUF_NEXT(m_notify) = NULL;
3605 	sre = mtod(m_notify, struct sctp_remote_error *);
3606 	memset(sre, 0, notif_len);
3607 	sre->sre_type = SCTP_REMOTE_ERROR;
3608 	sre->sre_flags = 0;
3609 	sre->sre_length = sizeof(struct sctp_remote_error);
3610 	sre->sre_error = error;
3611 	sre->sre_assoc_id = sctp_get_associd(stcb);
3612 	if (notif_len > sizeof(struct sctp_remote_error)) {
3613 		memcpy(sre->sre_data, chunk, chunk_len);
3614 		sre->sre_length += chunk_len;
3615 	}
3616 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3617 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3618 	    0, 0, stcb->asoc.context, 0, 0, 0,
3619 	    m_notify);
3620 	if (control != NULL) {
3621 		control->length = SCTP_BUF_LEN(m_notify);
3622 		/* not that we need this */
3623 		control->tail_mbuf = m_notify;
3624 		control->spec_flags = M_NOTIFICATION;
3625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3626 		    control,
3627 		    &stcb->sctp_socket->so_rcv, 1,
3628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3629 	} else {
3630 		sctp_m_freem(m_notify);
3631 	}
3632 }
3633 
3634 
3635 void
3636 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3637     uint32_t error, void *data, int so_locked
3638 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3639     SCTP_UNUSED
3640 #endif
3641 )
3642 {
3643 	if ((stcb == NULL) ||
3644 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3645 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3646 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3647 		/* If the socket is gone we are out of here */
3648 		return;
3649 	}
3650 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3651 		return;
3652 	}
3653 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3654 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3655 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3656 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3657 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3658 			/* Don't report these in front states */
3659 			return;
3660 		}
3661 	}
3662 	switch (notification) {
3663 	case SCTP_NOTIFY_ASSOC_UP:
3664 		if (stcb->asoc.assoc_up_sent == 0) {
3665 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3666 			stcb->asoc.assoc_up_sent = 1;
3667 		}
3668 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3669 			sctp_notify_adaptation_layer(stcb);
3670 		}
3671 		if (stcb->asoc.auth_supported == 0) {
3672 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3673 			    NULL, so_locked);
3674 		}
3675 		break;
3676 	case SCTP_NOTIFY_ASSOC_DOWN:
3677 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3678 		break;
3679 	case SCTP_NOTIFY_INTERFACE_DOWN:
3680 		{
3681 			struct sctp_nets *net;
3682 
3683 			net = (struct sctp_nets *)data;
3684 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3685 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3686 			break;
3687 		}
3688 	case SCTP_NOTIFY_INTERFACE_UP:
3689 		{
3690 			struct sctp_nets *net;
3691 
3692 			net = (struct sctp_nets *)data;
3693 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3694 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3695 			break;
3696 		}
3697 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3698 		{
3699 			struct sctp_nets *net;
3700 
3701 			net = (struct sctp_nets *)data;
3702 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3703 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3704 			break;
3705 		}
3706 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3707 		sctp_notify_send_failed2(stcb, error,
3708 		    (struct sctp_stream_queue_pending *)data, so_locked);
3709 		break;
3710 	case SCTP_NOTIFY_SENT_DG_FAIL:
3711 		sctp_notify_send_failed(stcb, 1, error,
3712 		    (struct sctp_tmit_chunk *)data, so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3715 		sctp_notify_send_failed(stcb, 0, error,
3716 		    (struct sctp_tmit_chunk *)data, so_locked);
3717 		break;
3718 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3719 		{
3720 			uint32_t val;
3721 
3722 			val = *((uint32_t *) data);
3723 
3724 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3725 			break;
3726 		}
3727 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3728 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3729 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3730 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3731 		} else {
3732 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3733 		}
3734 		break;
3735 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3736 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3737 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3738 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3739 		} else {
3740 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3741 		}
3742 		break;
3743 	case SCTP_NOTIFY_ASSOC_RESTART:
3744 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3745 		if (stcb->asoc.auth_supported == 0) {
3746 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3747 			    NULL, so_locked);
3748 		}
3749 		break;
3750 	case SCTP_NOTIFY_STR_RESET_SEND:
3751 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3752 		break;
3753 	case SCTP_NOTIFY_STR_RESET_RECV:
3754 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3755 		break;
3756 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3757 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3758 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3759 		break;
3760 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3761 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3762 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3766 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3767 		break;
3768 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3769 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3770 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3771 		break;
3772 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3773 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3774 		    error, so_locked);
3775 		break;
3776 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3777 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3778 		    error, so_locked);
3779 		break;
3780 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3781 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3782 		    error, so_locked);
3783 		break;
3784 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3785 		sctp_notify_shutdown_event(stcb);
3786 		break;
3787 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3788 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3789 		    (uint16_t) (uintptr_t) data,
3790 		    so_locked);
3791 		break;
3792 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3793 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3794 		    (uint16_t) (uintptr_t) data,
3795 		    so_locked);
3796 		break;
3797 	case SCTP_NOTIFY_NO_PEER_AUTH:
3798 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3799 		    (uint16_t) (uintptr_t) data,
3800 		    so_locked);
3801 		break;
3802 	case SCTP_NOTIFY_SENDER_DRY:
3803 		sctp_notify_sender_dry_event(stcb, so_locked);
3804 		break;
3805 	case SCTP_NOTIFY_REMOTE_ERROR:
3806 		sctp_notify_remote_error(stcb, error, data);
3807 		break;
3808 	default:
3809 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3810 		    __func__, notification, notification);
3811 		break;
3812 	}			/* end switch */
3813 }
3814 
3815 void
3816 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3817 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3818     SCTP_UNUSED
3819 #endif
3820 )
3821 {
3822 	struct sctp_association *asoc;
3823 	struct sctp_stream_out *outs;
3824 	struct sctp_tmit_chunk *chk, *nchk;
3825 	struct sctp_stream_queue_pending *sp, *nsp;
3826 	int i;
3827 
3828 	if (stcb == NULL) {
3829 		return;
3830 	}
3831 	asoc = &stcb->asoc;
3832 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3833 		/* already being freed */
3834 		return;
3835 	}
3836 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3837 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3838 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3839 		return;
3840 	}
3841 	/* now through all the gunk freeing chunks */
3842 	if (holds_lock == 0) {
3843 		SCTP_TCB_SEND_LOCK(stcb);
3844 	}
3845 	/* sent queue SHOULD be empty */
3846 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3847 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3848 		asoc->sent_queue_cnt--;
3849 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3850 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3851 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3852 #ifdef INVARIANTS
3853 			} else {
3854 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3855 #endif
3856 			}
3857 		}
3858 		if (chk->data != NULL) {
3859 			sctp_free_bufspace(stcb, asoc, chk, 1);
3860 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3861 			    error, chk, so_locked);
3862 			if (chk->data) {
3863 				sctp_m_freem(chk->data);
3864 				chk->data = NULL;
3865 			}
3866 		}
3867 		sctp_free_a_chunk(stcb, chk, so_locked);
3868 		/* sa_ignore FREED_MEMORY */
3869 	}
3870 	/* pending send queue SHOULD be empty */
3871 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3872 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3873 		asoc->send_queue_cnt--;
3874 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3875 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3876 #ifdef INVARIANTS
3877 		} else {
3878 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3879 #endif
3880 		}
3881 		if (chk->data != NULL) {
3882 			sctp_free_bufspace(stcb, asoc, chk, 1);
3883 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3884 			    error, chk, so_locked);
3885 			if (chk->data) {
3886 				sctp_m_freem(chk->data);
3887 				chk->data = NULL;
3888 			}
3889 		}
3890 		sctp_free_a_chunk(stcb, chk, so_locked);
3891 		/* sa_ignore FREED_MEMORY */
3892 	}
3893 	for (i = 0; i < asoc->streamoutcnt; i++) {
3894 		/* For each stream */
3895 		outs = &asoc->strmout[i];
3896 		/* clean up any sends there */
3897 		asoc->locked_on_sending = NULL;
3898 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3899 			asoc->stream_queue_cnt--;
3900 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3901 			sctp_free_spbufspace(stcb, asoc, sp);
3902 			if (sp->data) {
3903 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3904 				    error, (void *)sp, so_locked);
3905 				if (sp->data) {
3906 					sctp_m_freem(sp->data);
3907 					sp->data = NULL;
3908 					sp->tail_mbuf = NULL;
3909 					sp->length = 0;
3910 				}
3911 			}
3912 			if (sp->net) {
3913 				sctp_free_remote_addr(sp->net);
3914 				sp->net = NULL;
3915 			}
3916 			/* Free the chunk */
3917 			sctp_free_a_strmoq(stcb, sp, so_locked);
3918 			/* sa_ignore FREED_MEMORY */
3919 		}
3920 	}
3921 
3922 	if (holds_lock == 0) {
3923 		SCTP_TCB_SEND_UNLOCK(stcb);
3924 	}
3925 }
3926 
3927 void
3928 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3929     struct sctp_abort_chunk *abort, int so_locked
3930 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3931     SCTP_UNUSED
3932 #endif
3933 )
3934 {
3935 	if (stcb == NULL) {
3936 		return;
3937 	}
3938 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3939 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3940 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3941 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3942 	}
3943 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3944 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3945 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3946 		return;
3947 	}
3948 	/* Tell them we lost the asoc */
3949 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3950 	if (from_peer) {
3951 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3952 	} else {
3953 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3954 	}
3955 }
3956 
3957 void
3958 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3959     struct mbuf *m, int iphlen,
3960     struct sockaddr *src, struct sockaddr *dst,
3961     struct sctphdr *sh, struct mbuf *op_err,
3962     uint8_t mflowtype, uint32_t mflowid,
3963     uint32_t vrf_id, uint16_t port)
3964 {
3965 	uint32_t vtag;
3966 
3967 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3968 	struct socket *so;
3969 
3970 #endif
3971 
3972 	vtag = 0;
3973 	if (stcb != NULL) {
3974 		/* We have a TCB to abort, send notification too */
3975 		vtag = stcb->asoc.peer_vtag;
3976 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3977 		/* get the assoc vrf id and table id */
3978 		vrf_id = stcb->asoc.vrf_id;
3979 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3980 	}
3981 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3982 	    mflowtype, mflowid, inp->fibnum,
3983 	    vrf_id, port);
3984 	if (stcb != NULL) {
3985 		/* Ok, now lets free it */
3986 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3987 		so = SCTP_INP_SO(inp);
3988 		atomic_add_int(&stcb->asoc.refcnt, 1);
3989 		SCTP_TCB_UNLOCK(stcb);
3990 		SCTP_SOCKET_LOCK(so, 1);
3991 		SCTP_TCB_LOCK(stcb);
3992 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3993 #endif
3994 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3995 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3996 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3997 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3998 		}
3999 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4000 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4001 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4002 		SCTP_SOCKET_UNLOCK(so, 1);
4003 #endif
4004 	}
4005 }
4006 
4007 #ifdef SCTP_ASOCLOG_OF_TSNS
4008 void
4009 sctp_print_out_track_log(struct sctp_tcb *stcb)
4010 {
4011 #ifdef NOSIY_PRINTS
4012 	int i;
4013 
4014 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4015 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4016 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4017 		SCTP_PRINTF("None rcvd\n");
4018 		goto none_in;
4019 	}
4020 	if (stcb->asoc.tsn_in_wrapped) {
4021 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4022 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4023 			    stcb->asoc.in_tsnlog[i].tsn,
4024 			    stcb->asoc.in_tsnlog[i].strm,
4025 			    stcb->asoc.in_tsnlog[i].seq,
4026 			    stcb->asoc.in_tsnlog[i].flgs,
4027 			    stcb->asoc.in_tsnlog[i].sz);
4028 		}
4029 	}
4030 	if (stcb->asoc.tsn_in_at) {
4031 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4032 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4033 			    stcb->asoc.in_tsnlog[i].tsn,
4034 			    stcb->asoc.in_tsnlog[i].strm,
4035 			    stcb->asoc.in_tsnlog[i].seq,
4036 			    stcb->asoc.in_tsnlog[i].flgs,
4037 			    stcb->asoc.in_tsnlog[i].sz);
4038 		}
4039 	}
4040 none_in:
4041 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4042 	if ((stcb->asoc.tsn_out_at == 0) &&
4043 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4044 		SCTP_PRINTF("None sent\n");
4045 	}
4046 	if (stcb->asoc.tsn_out_wrapped) {
4047 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4048 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4049 			    stcb->asoc.out_tsnlog[i].tsn,
4050 			    stcb->asoc.out_tsnlog[i].strm,
4051 			    stcb->asoc.out_tsnlog[i].seq,
4052 			    stcb->asoc.out_tsnlog[i].flgs,
4053 			    stcb->asoc.out_tsnlog[i].sz);
4054 		}
4055 	}
4056 	if (stcb->asoc.tsn_out_at) {
4057 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4058 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4059 			    stcb->asoc.out_tsnlog[i].tsn,
4060 			    stcb->asoc.out_tsnlog[i].strm,
4061 			    stcb->asoc.out_tsnlog[i].seq,
4062 			    stcb->asoc.out_tsnlog[i].flgs,
4063 			    stcb->asoc.out_tsnlog[i].sz);
4064 		}
4065 	}
4066 #endif
4067 }
4068 
4069 #endif
4070 
4071 void
4072 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4073     struct mbuf *op_err,
4074     int so_locked
4075 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4076     SCTP_UNUSED
4077 #endif
4078 )
4079 {
4080 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4081 	struct socket *so;
4082 
4083 #endif
4084 
4085 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4086 	so = SCTP_INP_SO(inp);
4087 #endif
4088 	if (stcb == NULL) {
4089 		/* Got to have a TCB */
4090 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4091 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4092 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4093 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4094 			}
4095 		}
4096 		return;
4097 	} else {
4098 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4099 	}
4100 	/* notify the ulp */
4101 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4102 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4103 	}
4104 	/* notify the peer */
4105 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4106 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4107 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4108 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4109 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4110 	}
4111 	/* now free the asoc */
4112 #ifdef SCTP_ASOCLOG_OF_TSNS
4113 	sctp_print_out_track_log(stcb);
4114 #endif
4115 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4116 	if (!so_locked) {
4117 		atomic_add_int(&stcb->asoc.refcnt, 1);
4118 		SCTP_TCB_UNLOCK(stcb);
4119 		SCTP_SOCKET_LOCK(so, 1);
4120 		SCTP_TCB_LOCK(stcb);
4121 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4122 	}
4123 #endif
4124 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4125 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4126 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4127 	if (!so_locked) {
4128 		SCTP_SOCKET_UNLOCK(so, 1);
4129 	}
4130 #endif
4131 }
4132 
4133 void
4134 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4135     struct sockaddr *src, struct sockaddr *dst,
4136     struct sctphdr *sh, struct sctp_inpcb *inp,
4137     struct mbuf *cause,
4138     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4139     uint32_t vrf_id, uint16_t port)
4140 {
4141 	struct sctp_chunkhdr *ch, chunk_buf;
4142 	unsigned int chk_length;
4143 	int contains_init_chunk;
4144 
4145 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4146 	/* Generate a TO address for future reference */
4147 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4148 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4149 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4150 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4151 		}
4152 	}
4153 	contains_init_chunk = 0;
4154 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4155 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4156 	while (ch != NULL) {
4157 		chk_length = ntohs(ch->chunk_length);
4158 		if (chk_length < sizeof(*ch)) {
4159 			/* break to abort land */
4160 			break;
4161 		}
4162 		switch (ch->chunk_type) {
4163 		case SCTP_INIT:
4164 			contains_init_chunk = 1;
4165 			break;
4166 		case SCTP_PACKET_DROPPED:
4167 			/* we don't respond to pkt-dropped */
4168 			return;
4169 		case SCTP_ABORT_ASSOCIATION:
4170 			/* we don't respond with an ABORT to an ABORT */
4171 			return;
4172 		case SCTP_SHUTDOWN_COMPLETE:
4173 			/*
4174 			 * we ignore it since we are not waiting for it and
4175 			 * peer is gone
4176 			 */
4177 			return;
4178 		case SCTP_SHUTDOWN_ACK:
4179 			sctp_send_shutdown_complete2(src, dst, sh,
4180 			    mflowtype, mflowid, fibnum,
4181 			    vrf_id, port);
4182 			return;
4183 		default:
4184 			break;
4185 		}
4186 		offset += SCTP_SIZE32(chk_length);
4187 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4188 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4189 	}
4190 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4191 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4192 	    (contains_init_chunk == 0))) {
4193 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4194 		    mflowtype, mflowid, fibnum,
4195 		    vrf_id, port);
4196 	}
4197 }
4198 
4199 /*
4200  * check the inbound datagram to make sure there is not an abort inside it,
4201  * if there is return 1, else return 0.
4202  */
4203 int
4204 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4205 {
4206 	struct sctp_chunkhdr *ch;
4207 	struct sctp_init_chunk *init_chk, chunk_buf;
4208 	int offset;
4209 	unsigned int chk_length;
4210 
4211 	offset = iphlen + sizeof(struct sctphdr);
4212 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4213 	    (uint8_t *) & chunk_buf);
4214 	while (ch != NULL) {
4215 		chk_length = ntohs(ch->chunk_length);
4216 		if (chk_length < sizeof(*ch)) {
4217 			/* packet is probably corrupt */
4218 			break;
4219 		}
4220 		/* we seem to be ok, is it an abort? */
4221 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4222 			/* yep, tell them */
4223 			return (1);
4224 		}
4225 		if (ch->chunk_type == SCTP_INITIATION) {
4226 			/* need to update the Vtag */
4227 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4228 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4229 			if (init_chk != NULL) {
4230 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4231 			}
4232 		}
4233 		/* Nope, move to the next chunk */
4234 		offset += SCTP_SIZE32(chk_length);
4235 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4236 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4237 	}
4238 	return (0);
4239 }
4240 
4241 /*
4242  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4243  * set (i.e. it's 0) so, create this function to compare link local scopes
4244  */
4245 #ifdef INET6
4246 uint32_t
4247 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4248 {
4249 	struct sockaddr_in6 a, b;
4250 
4251 	/* save copies */
4252 	a = *addr1;
4253 	b = *addr2;
4254 
4255 	if (a.sin6_scope_id == 0)
4256 		if (sa6_recoverscope(&a)) {
4257 			/* can't get scope, so can't match */
4258 			return (0);
4259 		}
4260 	if (b.sin6_scope_id == 0)
4261 		if (sa6_recoverscope(&b)) {
4262 			/* can't get scope, so can't match */
4263 			return (0);
4264 		}
4265 	if (a.sin6_scope_id != b.sin6_scope_id)
4266 		return (0);
4267 
4268 	return (1);
4269 }
4270 
4271 /*
4272  * returns a sockaddr_in6 with embedded scope recovered and removed
4273  */
4274 struct sockaddr_in6 *
4275 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4276 {
4277 	/* check and strip embedded scope junk */
4278 	if (addr->sin6_family == AF_INET6) {
4279 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4280 			if (addr->sin6_scope_id == 0) {
4281 				*store = *addr;
4282 				if (!sa6_recoverscope(store)) {
4283 					/* use the recovered scope */
4284 					addr = store;
4285 				}
4286 			} else {
4287 				/* else, return the original "to" addr */
4288 				in6_clearscope(&addr->sin6_addr);
4289 			}
4290 		}
4291 	}
4292 	return (addr);
4293 }
4294 
4295 #endif
4296 
4297 /*
4298  * are the two addresses the same?  currently a "scopeless" check returns: 1
4299  * if same, 0 if not
4300  */
4301 int
4302 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4303 {
4304 
4305 	/* must be valid */
4306 	if (sa1 == NULL || sa2 == NULL)
4307 		return (0);
4308 
4309 	/* must be the same family */
4310 	if (sa1->sa_family != sa2->sa_family)
4311 		return (0);
4312 
4313 	switch (sa1->sa_family) {
4314 #ifdef INET6
4315 	case AF_INET6:
4316 		{
4317 			/* IPv6 addresses */
4318 			struct sockaddr_in6 *sin6_1, *sin6_2;
4319 
4320 			sin6_1 = (struct sockaddr_in6 *)sa1;
4321 			sin6_2 = (struct sockaddr_in6 *)sa2;
4322 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4323 			    sin6_2));
4324 		}
4325 #endif
4326 #ifdef INET
4327 	case AF_INET:
4328 		{
4329 			/* IPv4 addresses */
4330 			struct sockaddr_in *sin_1, *sin_2;
4331 
4332 			sin_1 = (struct sockaddr_in *)sa1;
4333 			sin_2 = (struct sockaddr_in *)sa2;
4334 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4335 		}
4336 #endif
4337 	default:
4338 		/* we don't do these... */
4339 		return (0);
4340 	}
4341 }
4342 
4343 void
4344 sctp_print_address(struct sockaddr *sa)
4345 {
4346 #ifdef INET6
4347 	char ip6buf[INET6_ADDRSTRLEN];
4348 
4349 #endif
4350 
4351 	switch (sa->sa_family) {
4352 #ifdef INET6
4353 	case AF_INET6:
4354 		{
4355 			struct sockaddr_in6 *sin6;
4356 
4357 			sin6 = (struct sockaddr_in6 *)sa;
4358 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4359 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4360 			    ntohs(sin6->sin6_port),
4361 			    sin6->sin6_scope_id);
4362 			break;
4363 		}
4364 #endif
4365 #ifdef INET
4366 	case AF_INET:
4367 		{
4368 			struct sockaddr_in *sin;
4369 			unsigned char *p;
4370 
4371 			sin = (struct sockaddr_in *)sa;
4372 			p = (unsigned char *)&sin->sin_addr;
4373 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4374 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4375 			break;
4376 		}
4377 #endif
4378 	default:
4379 		SCTP_PRINTF("?\n");
4380 		break;
4381 	}
4382 }
4383 
4384 void
4385 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4386     struct sctp_inpcb *new_inp,
4387     struct sctp_tcb *stcb,
4388     int waitflags)
4389 {
4390 	/*
4391 	 * go through our old INP and pull off any control structures that
4392 	 * belong to stcb and move then to the new inp.
4393 	 */
4394 	struct socket *old_so, *new_so;
4395 	struct sctp_queued_to_read *control, *nctl;
4396 	struct sctp_readhead tmp_queue;
4397 	struct mbuf *m;
4398 	int error = 0;
4399 
4400 	old_so = old_inp->sctp_socket;
4401 	new_so = new_inp->sctp_socket;
4402 	TAILQ_INIT(&tmp_queue);
4403 	error = sblock(&old_so->so_rcv, waitflags);
4404 	if (error) {
4405 		/*
4406 		 * Gak, can't get sblock, we have a problem. data will be
4407 		 * left stranded.. and we don't dare look at it since the
4408 		 * other thread may be reading something. Oh well, its a
4409 		 * screwed up app that does a peeloff OR a accept while
4410 		 * reading from the main socket... actually its only the
4411 		 * peeloff() case, since I think read will fail on a
4412 		 * listening socket..
4413 		 */
4414 		return;
4415 	}
4416 	/* lock the socket buffers */
4417 	SCTP_INP_READ_LOCK(old_inp);
4418 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4419 		/* Pull off all for out target stcb */
4420 		if (control->stcb == stcb) {
4421 			/* remove it we want it */
4422 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4423 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4424 			m = control->data;
4425 			while (m) {
4426 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4427 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4428 				}
4429 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4430 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4431 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4432 				}
4433 				m = SCTP_BUF_NEXT(m);
4434 			}
4435 		}
4436 	}
4437 	SCTP_INP_READ_UNLOCK(old_inp);
4438 	/* Remove the sb-lock on the old socket */
4439 
4440 	sbunlock(&old_so->so_rcv);
4441 	/* Now we move them over to the new socket buffer */
4442 	SCTP_INP_READ_LOCK(new_inp);
4443 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4444 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4445 		m = control->data;
4446 		while (m) {
4447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4448 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4449 			}
4450 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4451 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4452 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4453 			}
4454 			m = SCTP_BUF_NEXT(m);
4455 		}
4456 	}
4457 	SCTP_INP_READ_UNLOCK(new_inp);
4458 }
4459 
4460 void
4461 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4462     struct sctp_tcb *stcb,
4463     int so_locked
4464 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4465     SCTP_UNUSED
4466 #endif
4467 )
4468 {
4469 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4470 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4471 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4472 		} else {
4473 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4474 			struct socket *so;
4475 
4476 			so = SCTP_INP_SO(inp);
4477 			if (!so_locked) {
4478 				if (stcb) {
4479 					atomic_add_int(&stcb->asoc.refcnt, 1);
4480 					SCTP_TCB_UNLOCK(stcb);
4481 				}
4482 				SCTP_SOCKET_LOCK(so, 1);
4483 				if (stcb) {
4484 					SCTP_TCB_LOCK(stcb);
4485 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4486 				}
4487 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4488 					SCTP_SOCKET_UNLOCK(so, 1);
4489 					return;
4490 				}
4491 			}
4492 #endif
4493 			sctp_sorwakeup(inp, inp->sctp_socket);
4494 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4495 			if (!so_locked) {
4496 				SCTP_SOCKET_UNLOCK(so, 1);
4497 			}
4498 #endif
4499 		}
4500 	}
4501 }
4502 
4503 void
4504 sctp_add_to_readq(struct sctp_inpcb *inp,
4505     struct sctp_tcb *stcb,
4506     struct sctp_queued_to_read *control,
4507     struct sockbuf *sb,
4508     int end,
4509     int inp_read_lock_held,
4510     int so_locked
4511 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4512     SCTP_UNUSED
4513 #endif
4514 )
4515 {
4516 	/*
4517 	 * Here we must place the control on the end of the socket read
4518 	 * queue AND increment sb_cc so that select will work properly on
4519 	 * read.
4520 	 */
4521 	struct mbuf *m, *prev = NULL;
4522 
4523 	if (inp == NULL) {
4524 		/* Gak, TSNH!! */
4525 #ifdef INVARIANTS
4526 		panic("Gak, inp NULL on add_to_readq");
4527 #endif
4528 		return;
4529 	}
4530 	if (inp_read_lock_held == 0)
4531 		SCTP_INP_READ_LOCK(inp);
4532 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4533 		sctp_free_remote_addr(control->whoFrom);
4534 		if (control->data) {
4535 			sctp_m_freem(control->data);
4536 			control->data = NULL;
4537 		}
4538 		sctp_free_a_readq(stcb, control);
4539 		if (inp_read_lock_held == 0)
4540 			SCTP_INP_READ_UNLOCK(inp);
4541 		return;
4542 	}
4543 	if (!(control->spec_flags & M_NOTIFICATION)) {
4544 		atomic_add_int(&inp->total_recvs, 1);
4545 		if (!control->do_not_ref_stcb) {
4546 			atomic_add_int(&stcb->total_recvs, 1);
4547 		}
4548 	}
4549 	m = control->data;
4550 	control->held_length = 0;
4551 	control->length = 0;
4552 	while (m) {
4553 		if (SCTP_BUF_LEN(m) == 0) {
4554 			/* Skip mbufs with NO length */
4555 			if (prev == NULL) {
4556 				/* First one */
4557 				control->data = sctp_m_free(m);
4558 				m = control->data;
4559 			} else {
4560 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4561 				m = SCTP_BUF_NEXT(prev);
4562 			}
4563 			if (m == NULL) {
4564 				control->tail_mbuf = prev;
4565 			}
4566 			continue;
4567 		}
4568 		prev = m;
4569 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4570 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4571 		}
4572 		sctp_sballoc(stcb, sb, m);
4573 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4574 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4575 		}
4576 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4577 		m = SCTP_BUF_NEXT(m);
4578 	}
4579 	if (prev != NULL) {
4580 		control->tail_mbuf = prev;
4581 	} else {
4582 		/* Everything got collapsed out?? */
4583 		sctp_free_remote_addr(control->whoFrom);
4584 		sctp_free_a_readq(stcb, control);
4585 		if (inp_read_lock_held == 0)
4586 			SCTP_INP_READ_UNLOCK(inp);
4587 		return;
4588 	}
4589 	if (end) {
4590 		control->end_added = 1;
4591 	}
4592 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4593 	control->on_read_q = 1;
4594 	if (inp_read_lock_held == 0)
4595 		SCTP_INP_READ_UNLOCK(inp);
4596 	if (inp && inp->sctp_socket) {
4597 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4598 	}
4599 }
4600 
4601 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4602  *************ALTERNATE ROUTING CODE
4603  */
4604 
4605 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4606  *************ALTERNATE ROUTING CODE
4607  */
4608 
4609 struct mbuf *
4610 sctp_generate_cause(uint16_t code, char *info)
4611 {
4612 	struct mbuf *m;
4613 	struct sctp_gen_error_cause *cause;
4614 	size_t info_len;
4615 	uint16_t len;
4616 
4617 	if ((code == 0) || (info == NULL)) {
4618 		return (NULL);
4619 	}
4620 	info_len = strlen(info);
4621 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4622 		return (NULL);
4623 	}
4624 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4625 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4626 	if (m != NULL) {
4627 		SCTP_BUF_LEN(m) = len;
4628 		cause = mtod(m, struct sctp_gen_error_cause *);
4629 		cause->code = htons(code);
4630 		cause->length = htons(len);
4631 		memcpy(cause->info, info, info_len);
4632 	}
4633 	return (m);
4634 }
4635 
4636 struct mbuf *
4637 sctp_generate_no_user_data_cause(uint32_t tsn)
4638 {
4639 	struct mbuf *m;
4640 	struct sctp_error_no_user_data *no_user_data_cause;
4641 	uint16_t len;
4642 
4643 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4644 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4645 	if (m != NULL) {
4646 		SCTP_BUF_LEN(m) = len;
4647 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4648 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4649 		no_user_data_cause->cause.length = htons(len);
4650 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4651 	}
4652 	return (m);
4653 }
4654 
4655 #ifdef SCTP_MBCNT_LOGGING
4656 void
4657 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4658     struct sctp_tmit_chunk *tp1, int chk_cnt)
4659 {
4660 	if (tp1->data == NULL) {
4661 		return;
4662 	}
4663 	asoc->chunks_on_out_queue -= chk_cnt;
4664 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4665 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4666 		    asoc->total_output_queue_size,
4667 		    tp1->book_size,
4668 		    0,
4669 		    tp1->mbcnt);
4670 	}
4671 	if (asoc->total_output_queue_size >= tp1->book_size) {
4672 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4673 	} else {
4674 		asoc->total_output_queue_size = 0;
4675 	}
4676 
4677 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4678 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4679 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4680 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4681 		} else {
4682 			stcb->sctp_socket->so_snd.sb_cc = 0;
4683 
4684 		}
4685 	}
4686 }
4687 
4688 #endif
4689 
4690 int
4691 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4692     uint8_t sent, int so_locked
4693 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4694     SCTP_UNUSED
4695 #endif
4696 )
4697 {
4698 	struct sctp_stream_out *strq;
4699 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4700 	struct sctp_stream_queue_pending *sp;
4701 	uint16_t stream = 0, seq = 0;
4702 	uint8_t foundeom = 0;
4703 	int ret_sz = 0;
4704 	int notdone;
4705 	int do_wakeup_routine = 0;
4706 
4707 	stream = tp1->rec.data.stream_number;
4708 	seq = tp1->rec.data.stream_seq;
4709 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4710 		stcb->asoc.abandoned_sent[0]++;
4711 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4712 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4713 #if defined(SCTP_DETAILED_STR_STATS)
4714 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4715 #endif
4716 	} else {
4717 		stcb->asoc.abandoned_unsent[0]++;
4718 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4719 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4720 #if defined(SCTP_DETAILED_STR_STATS)
4721 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4722 #endif
4723 	}
4724 	do {
4725 		ret_sz += tp1->book_size;
4726 		if (tp1->data != NULL) {
4727 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4728 				sctp_flight_size_decrease(tp1);
4729 				sctp_total_flight_decrease(stcb, tp1);
4730 			}
4731 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4732 			stcb->asoc.peers_rwnd += tp1->send_size;
4733 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4734 			if (sent) {
4735 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4736 			} else {
4737 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4738 			}
4739 			if (tp1->data) {
4740 				sctp_m_freem(tp1->data);
4741 				tp1->data = NULL;
4742 			}
4743 			do_wakeup_routine = 1;
4744 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4745 				stcb->asoc.sent_queue_cnt_removeable--;
4746 			}
4747 		}
4748 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4749 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4750 		    SCTP_DATA_NOT_FRAG) {
4751 			/* not frag'ed we ae done   */
4752 			notdone = 0;
4753 			foundeom = 1;
4754 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4755 			/* end of frag, we are done */
4756 			notdone = 0;
4757 			foundeom = 1;
4758 		} else {
4759 			/*
4760 			 * Its a begin or middle piece, we must mark all of
4761 			 * it
4762 			 */
4763 			notdone = 1;
4764 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4765 		}
4766 	} while (tp1 && notdone);
4767 	if (foundeom == 0) {
4768 		/*
4769 		 * The multi-part message was scattered across the send and
4770 		 * sent queue.
4771 		 */
4772 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4773 			if ((tp1->rec.data.stream_number != stream) ||
4774 			    (tp1->rec.data.stream_seq != seq)) {
4775 				break;
4776 			}
4777 			/*
4778 			 * save to chk in case we have some on stream out
4779 			 * queue. If so and we have an un-transmitted one we
4780 			 * don't have to fudge the TSN.
4781 			 */
4782 			chk = tp1;
4783 			ret_sz += tp1->book_size;
4784 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4785 			if (sent) {
4786 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4787 			} else {
4788 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4789 			}
4790 			if (tp1->data) {
4791 				sctp_m_freem(tp1->data);
4792 				tp1->data = NULL;
4793 			}
4794 			/* No flight involved here book the size to 0 */
4795 			tp1->book_size = 0;
4796 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4797 				foundeom = 1;
4798 			}
4799 			do_wakeup_routine = 1;
4800 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4801 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4802 			/*
4803 			 * on to the sent queue so we can wait for it to be
4804 			 * passed by.
4805 			 */
4806 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4807 			    sctp_next);
4808 			stcb->asoc.send_queue_cnt--;
4809 			stcb->asoc.sent_queue_cnt++;
4810 		}
4811 	}
4812 	if (foundeom == 0) {
4813 		/*
4814 		 * Still no eom found. That means there is stuff left on the
4815 		 * stream out queue.. yuck.
4816 		 */
4817 		SCTP_TCB_SEND_LOCK(stcb);
4818 		strq = &stcb->asoc.strmout[stream];
4819 		sp = TAILQ_FIRST(&strq->outqueue);
4820 		if (sp != NULL) {
4821 			sp->discard_rest = 1;
4822 			/*
4823 			 * We may need to put a chunk on the queue that
4824 			 * holds the TSN that would have been sent with the
4825 			 * LAST bit.
4826 			 */
4827 			if (chk == NULL) {
4828 				/* Yep, we have to */
4829 				sctp_alloc_a_chunk(stcb, chk);
4830 				if (chk == NULL) {
4831 					/*
4832 					 * we are hosed. All we can do is
4833 					 * nothing.. which will cause an
4834 					 * abort if the peer is paying
4835 					 * attention.
4836 					 */
4837 					goto oh_well;
4838 				}
4839 				memset(chk, 0, sizeof(*chk));
4840 				chk->rec.data.rcv_flags = 0;
4841 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4842 				chk->asoc = &stcb->asoc;
4843 				if (stcb->asoc.idata_supported == 0) {
4844 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4845 						chk->rec.data.stream_seq = 0;
4846 					} else {
4847 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4848 					}
4849 				} else {
4850 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4851 						chk->rec.data.stream_seq = strq->next_mid_unordered;
4852 					} else {
4853 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4854 					}
4855 				}
4856 				chk->rec.data.stream_number = sp->stream;
4857 				chk->rec.data.payloadtype = sp->ppid;
4858 				chk->rec.data.context = sp->context;
4859 				chk->flags = sp->act_flags;
4860 				chk->whoTo = NULL;
4861 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4862 				strq->chunks_on_queues++;
4863 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4864 				stcb->asoc.sent_queue_cnt++;
4865 				stcb->asoc.pr_sctp_cnt++;
4866 			}
4867 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4868 			if (stcb->asoc.idata_supported == 0) {
4869 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4870 					strq->next_mid_ordered++;
4871 				}
4872 			} else {
4873 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4874 					strq->next_mid_unordered++;
4875 				} else {
4876 					strq->next_mid_ordered++;
4877 				}
4878 			}
4879 	oh_well:
4880 			if (sp->data) {
4881 				/*
4882 				 * Pull any data to free up the SB and allow
4883 				 * sender to "add more" while we will throw
4884 				 * away :-)
4885 				 */
4886 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4887 				ret_sz += sp->length;
4888 				do_wakeup_routine = 1;
4889 				sp->some_taken = 1;
4890 				sctp_m_freem(sp->data);
4891 				sp->data = NULL;
4892 				sp->tail_mbuf = NULL;
4893 				sp->length = 0;
4894 			}
4895 		}
4896 		SCTP_TCB_SEND_UNLOCK(stcb);
4897 	}
4898 	if (do_wakeup_routine) {
4899 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4900 		struct socket *so;
4901 
4902 		so = SCTP_INP_SO(stcb->sctp_ep);
4903 		if (!so_locked) {
4904 			atomic_add_int(&stcb->asoc.refcnt, 1);
4905 			SCTP_TCB_UNLOCK(stcb);
4906 			SCTP_SOCKET_LOCK(so, 1);
4907 			SCTP_TCB_LOCK(stcb);
4908 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4909 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4910 				/* assoc was freed while we were unlocked */
4911 				SCTP_SOCKET_UNLOCK(so, 1);
4912 				return (ret_sz);
4913 			}
4914 		}
4915 #endif
4916 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4917 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4918 		if (!so_locked) {
4919 			SCTP_SOCKET_UNLOCK(so, 1);
4920 		}
4921 #endif
4922 	}
4923 	return (ret_sz);
4924 }
4925 
4926 /*
4927  * checks to see if the given address, sa, is one that is currently known by
4928  * the kernel note: can't distinguish the same address on multiple interfaces
4929  * and doesn't handle multiple addresses with different zone/scope id's note:
4930  * ifa_ifwithaddr() compares the entire sockaddr struct
4931  */
4932 struct sctp_ifa *
4933 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4934     int holds_lock)
4935 {
4936 	struct sctp_laddr *laddr;
4937 
4938 	if (holds_lock == 0) {
4939 		SCTP_INP_RLOCK(inp);
4940 	}
4941 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4942 		if (laddr->ifa == NULL)
4943 			continue;
4944 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4945 			continue;
4946 #ifdef INET
4947 		if (addr->sa_family == AF_INET) {
4948 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4949 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4950 				/* found him. */
4951 				if (holds_lock == 0) {
4952 					SCTP_INP_RUNLOCK(inp);
4953 				}
4954 				return (laddr->ifa);
4955 				break;
4956 			}
4957 		}
4958 #endif
4959 #ifdef INET6
4960 		if (addr->sa_family == AF_INET6) {
4961 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4962 			    &laddr->ifa->address.sin6)) {
4963 				/* found him. */
4964 				if (holds_lock == 0) {
4965 					SCTP_INP_RUNLOCK(inp);
4966 				}
4967 				return (laddr->ifa);
4968 				break;
4969 			}
4970 		}
4971 #endif
4972 	}
4973 	if (holds_lock == 0) {
4974 		SCTP_INP_RUNLOCK(inp);
4975 	}
4976 	return (NULL);
4977 }
4978 
4979 uint32_t
4980 sctp_get_ifa_hash_val(struct sockaddr *addr)
4981 {
4982 	switch (addr->sa_family) {
4983 #ifdef INET
4984 	case AF_INET:
4985 		{
4986 			struct sockaddr_in *sin;
4987 
4988 			sin = (struct sockaddr_in *)addr;
4989 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4990 		}
4991 #endif
4992 #ifdef INET6
4993 	case AF_INET6:
4994 		{
4995 			struct sockaddr_in6 *sin6;
4996 			uint32_t hash_of_addr;
4997 
4998 			sin6 = (struct sockaddr_in6 *)addr;
4999 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5000 			    sin6->sin6_addr.s6_addr32[1] +
5001 			    sin6->sin6_addr.s6_addr32[2] +
5002 			    sin6->sin6_addr.s6_addr32[3]);
5003 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5004 			return (hash_of_addr);
5005 		}
5006 #endif
5007 	default:
5008 		break;
5009 	}
5010 	return (0);
5011 }
5012 
5013 struct sctp_ifa *
5014 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5015 {
5016 	struct sctp_ifa *sctp_ifap;
5017 	struct sctp_vrf *vrf;
5018 	struct sctp_ifalist *hash_head;
5019 	uint32_t hash_of_addr;
5020 
5021 	if (holds_lock == 0)
5022 		SCTP_IPI_ADDR_RLOCK();
5023 
5024 	vrf = sctp_find_vrf(vrf_id);
5025 	if (vrf == NULL) {
5026 		if (holds_lock == 0)
5027 			SCTP_IPI_ADDR_RUNLOCK();
5028 		return (NULL);
5029 	}
5030 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5031 
5032 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5033 	if (hash_head == NULL) {
5034 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5035 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5036 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5037 		sctp_print_address(addr);
5038 		SCTP_PRINTF("No such bucket for address\n");
5039 		if (holds_lock == 0)
5040 			SCTP_IPI_ADDR_RUNLOCK();
5041 
5042 		return (NULL);
5043 	}
5044 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5045 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5046 			continue;
5047 #ifdef INET
5048 		if (addr->sa_family == AF_INET) {
5049 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5050 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5051 				/* found him. */
5052 				if (holds_lock == 0)
5053 					SCTP_IPI_ADDR_RUNLOCK();
5054 				return (sctp_ifap);
5055 				break;
5056 			}
5057 		}
5058 #endif
5059 #ifdef INET6
5060 		if (addr->sa_family == AF_INET6) {
5061 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5062 			    &sctp_ifap->address.sin6)) {
5063 				/* found him. */
5064 				if (holds_lock == 0)
5065 					SCTP_IPI_ADDR_RUNLOCK();
5066 				return (sctp_ifap);
5067 				break;
5068 			}
5069 		}
5070 #endif
5071 	}
5072 	if (holds_lock == 0)
5073 		SCTP_IPI_ADDR_RUNLOCK();
5074 	return (NULL);
5075 }
5076 
5077 static void
5078 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5079     uint32_t rwnd_req)
5080 {
5081 	/* User pulled some data, do we need a rwnd update? */
5082 	int r_unlocked = 0;
5083 	uint32_t dif, rwnd;
5084 	struct socket *so = NULL;
5085 
5086 	if (stcb == NULL)
5087 		return;
5088 
5089 	atomic_add_int(&stcb->asoc.refcnt, 1);
5090 
5091 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5092 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5093 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5094 		/* Pre-check If we are freeing no update */
5095 		goto no_lock;
5096 	}
5097 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5098 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5099 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5100 		goto out;
5101 	}
5102 	so = stcb->sctp_socket;
5103 	if (so == NULL) {
5104 		goto out;
5105 	}
5106 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5107 	/* Have you have freed enough to look */
5108 	*freed_so_far = 0;
5109 	/* Yep, its worth a look and the lock overhead */
5110 
5111 	/* Figure out what the rwnd would be */
5112 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5113 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5114 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5115 	} else {
5116 		dif = 0;
5117 	}
5118 	if (dif >= rwnd_req) {
5119 		if (hold_rlock) {
5120 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5121 			r_unlocked = 1;
5122 		}
5123 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5124 			/*
5125 			 * One last check before we allow the guy possibly
5126 			 * to get in. There is a race, where the guy has not
5127 			 * reached the gate. In that case
5128 			 */
5129 			goto out;
5130 		}
5131 		SCTP_TCB_LOCK(stcb);
5132 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5133 			/* No reports here */
5134 			SCTP_TCB_UNLOCK(stcb);
5135 			goto out;
5136 		}
5137 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5138 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5139 
5140 		sctp_chunk_output(stcb->sctp_ep, stcb,
5141 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5142 		/* make sure no timer is running */
5143 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5144 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5145 		SCTP_TCB_UNLOCK(stcb);
5146 	} else {
5147 		/* Update how much we have pending */
5148 		stcb->freed_by_sorcv_sincelast = dif;
5149 	}
5150 out:
5151 	if (so && r_unlocked && hold_rlock) {
5152 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5153 	}
5154 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5155 no_lock:
5156 	atomic_add_int(&stcb->asoc.refcnt, -1);
5157 	return;
5158 }
5159 
5160 int
5161 sctp_sorecvmsg(struct socket *so,
5162     struct uio *uio,
5163     struct mbuf **mp,
5164     struct sockaddr *from,
5165     int fromlen,
5166     int *msg_flags,
5167     struct sctp_sndrcvinfo *sinfo,
5168     int filling_sinfo)
5169 {
5170 	/*
5171 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5172 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5173 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5174 	 * On the way out we may send out any combination of:
5175 	 * MSG_NOTIFICATION MSG_EOR
5176 	 *
5177 	 */
5178 	struct sctp_inpcb *inp = NULL;
5179 	int my_len = 0;
5180 	int cp_len = 0, error = 0;
5181 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5182 	struct mbuf *m = NULL;
5183 	struct sctp_tcb *stcb = NULL;
5184 	int wakeup_read_socket = 0;
5185 	int freecnt_applied = 0;
5186 	int out_flags = 0, in_flags = 0;
5187 	int block_allowed = 1;
5188 	uint32_t freed_so_far = 0;
5189 	uint32_t copied_so_far = 0;
5190 	int in_eeor_mode = 0;
5191 	int no_rcv_needed = 0;
5192 	uint32_t rwnd_req = 0;
5193 	int hold_sblock = 0;
5194 	int hold_rlock = 0;
5195 	ssize_t slen = 0;
5196 	uint32_t held_length = 0;
5197 	int sockbuf_lock = 0;
5198 
5199 	if (uio == NULL) {
5200 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5201 		return (EINVAL);
5202 	}
5203 	if (msg_flags) {
5204 		in_flags = *msg_flags;
5205 		if (in_flags & MSG_PEEK)
5206 			SCTP_STAT_INCR(sctps_read_peeks);
5207 	} else {
5208 		in_flags = 0;
5209 	}
5210 	slen = uio->uio_resid;
5211 
5212 	/* Pull in and set up our int flags */
5213 	if (in_flags & MSG_OOB) {
5214 		/* Out of band's NOT supported */
5215 		return (EOPNOTSUPP);
5216 	}
5217 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5218 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5219 		return (EINVAL);
5220 	}
5221 	if ((in_flags & (MSG_DONTWAIT
5222 	    | MSG_NBIO
5223 	    )) ||
5224 	    SCTP_SO_IS_NBIO(so)) {
5225 		block_allowed = 0;
5226 	}
5227 	/* setup the endpoint */
5228 	inp = (struct sctp_inpcb *)so->so_pcb;
5229 	if (inp == NULL) {
5230 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5231 		return (EFAULT);
5232 	}
5233 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5234 	/* Must be at least a MTU's worth */
5235 	if (rwnd_req < SCTP_MIN_RWND)
5236 		rwnd_req = SCTP_MIN_RWND;
5237 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5239 		sctp_misc_ints(SCTP_SORECV_ENTER,
5240 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5241 	}
5242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5243 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5244 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5245 	}
5246 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5247 	if (error) {
5248 		goto release_unlocked;
5249 	}
5250 	sockbuf_lock = 1;
5251 restart:
5252 
5253 
5254 restart_nosblocks:
5255 	if (hold_sblock == 0) {
5256 		SOCKBUF_LOCK(&so->so_rcv);
5257 		hold_sblock = 1;
5258 	}
5259 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5260 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5261 		goto out;
5262 	}
5263 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5264 		if (so->so_error) {
5265 			error = so->so_error;
5266 			if ((in_flags & MSG_PEEK) == 0)
5267 				so->so_error = 0;
5268 			goto out;
5269 		} else {
5270 			if (so->so_rcv.sb_cc == 0) {
5271 				/* indicate EOF */
5272 				error = 0;
5273 				goto out;
5274 			}
5275 		}
5276 	}
5277 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5278 		/* we need to wait for data */
5279 		if ((so->so_rcv.sb_cc == 0) &&
5280 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5281 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5282 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5283 				/*
5284 				 * For active open side clear flags for
5285 				 * re-use passive open is blocked by
5286 				 * connect.
5287 				 */
5288 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5289 					/*
5290 					 * You were aborted, passive side
5291 					 * always hits here
5292 					 */
5293 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5294 					error = ECONNRESET;
5295 				}
5296 				so->so_state &= ~(SS_ISCONNECTING |
5297 				    SS_ISDISCONNECTING |
5298 				    SS_ISCONFIRMING |
5299 				    SS_ISCONNECTED);
5300 				if (error == 0) {
5301 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5302 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5303 						error = ENOTCONN;
5304 					}
5305 				}
5306 				goto out;
5307 			}
5308 		}
5309 		error = sbwait(&so->so_rcv);
5310 		if (error) {
5311 			goto out;
5312 		}
5313 		held_length = 0;
5314 		goto restart_nosblocks;
5315 	} else if (so->so_rcv.sb_cc == 0) {
5316 		if (so->so_error) {
5317 			error = so->so_error;
5318 			if ((in_flags & MSG_PEEK) == 0)
5319 				so->so_error = 0;
5320 		} else {
5321 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5322 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5323 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5324 					/*
5325 					 * For active open side clear flags
5326 					 * for re-use passive open is
5327 					 * blocked by connect.
5328 					 */
5329 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5330 						/*
5331 						 * You were aborted, passive
5332 						 * side always hits here
5333 						 */
5334 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5335 						error = ECONNRESET;
5336 					}
5337 					so->so_state &= ~(SS_ISCONNECTING |
5338 					    SS_ISDISCONNECTING |
5339 					    SS_ISCONFIRMING |
5340 					    SS_ISCONNECTED);
5341 					if (error == 0) {
5342 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5343 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5344 							error = ENOTCONN;
5345 						}
5346 					}
5347 					goto out;
5348 				}
5349 			}
5350 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5351 			error = EWOULDBLOCK;
5352 		}
5353 		goto out;
5354 	}
5355 	if (hold_sblock == 1) {
5356 		SOCKBUF_UNLOCK(&so->so_rcv);
5357 		hold_sblock = 0;
5358 	}
5359 	/* we possibly have data we can read */
5360 	/* sa_ignore FREED_MEMORY */
5361 	control = TAILQ_FIRST(&inp->read_queue);
5362 	if (control == NULL) {
5363 		/*
5364 		 * This could be happening since the appender did the
5365 		 * increment but as not yet did the tailq insert onto the
5366 		 * read_queue
5367 		 */
5368 		if (hold_rlock == 0) {
5369 			SCTP_INP_READ_LOCK(inp);
5370 		}
5371 		control = TAILQ_FIRST(&inp->read_queue);
5372 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5373 #ifdef INVARIANTS
5374 			panic("Huh, its non zero and nothing on control?");
5375 #endif
5376 			so->so_rcv.sb_cc = 0;
5377 		}
5378 		SCTP_INP_READ_UNLOCK(inp);
5379 		hold_rlock = 0;
5380 		goto restart;
5381 	}
5382 	if ((control->length == 0) &&
5383 	    (control->do_not_ref_stcb)) {
5384 		/*
5385 		 * Clean up code for freeing assoc that left behind a
5386 		 * pdapi.. maybe a peer in EEOR that just closed after
5387 		 * sending and never indicated a EOR.
5388 		 */
5389 		if (hold_rlock == 0) {
5390 			hold_rlock = 1;
5391 			SCTP_INP_READ_LOCK(inp);
5392 		}
5393 		control->held_length = 0;
5394 		if (control->data) {
5395 			/* Hmm there is data here .. fix */
5396 			struct mbuf *m_tmp;
5397 			int cnt = 0;
5398 
5399 			m_tmp = control->data;
5400 			while (m_tmp) {
5401 				cnt += SCTP_BUF_LEN(m_tmp);
5402 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5403 					control->tail_mbuf = m_tmp;
5404 					control->end_added = 1;
5405 				}
5406 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5407 			}
5408 			control->length = cnt;
5409 		} else {
5410 			/* remove it */
5411 			TAILQ_REMOVE(&inp->read_queue, control, next);
5412 			/* Add back any hiddend data */
5413 			sctp_free_remote_addr(control->whoFrom);
5414 			sctp_free_a_readq(stcb, control);
5415 		}
5416 		if (hold_rlock) {
5417 			hold_rlock = 0;
5418 			SCTP_INP_READ_UNLOCK(inp);
5419 		}
5420 		goto restart;
5421 	}
5422 	if ((control->length == 0) &&
5423 	    (control->end_added == 1)) {
5424 		/*
5425 		 * Do we also need to check for (control->pdapi_aborted ==
5426 		 * 1)?
5427 		 */
5428 		if (hold_rlock == 0) {
5429 			hold_rlock = 1;
5430 			SCTP_INP_READ_LOCK(inp);
5431 		}
5432 		TAILQ_REMOVE(&inp->read_queue, control, next);
5433 		if (control->data) {
5434 #ifdef INVARIANTS
5435 			panic("control->data not null but control->length == 0");
5436 #else
5437 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5438 			sctp_m_freem(control->data);
5439 			control->data = NULL;
5440 #endif
5441 		}
5442 		if (control->aux_data) {
5443 			sctp_m_free(control->aux_data);
5444 			control->aux_data = NULL;
5445 		}
5446 #ifdef INVARIANTS
5447 		if (control->on_strm_q) {
5448 			panic("About to free ctl:%p so:%p and its in %d",
5449 			    control, so, control->on_strm_q);
5450 		}
5451 #endif
5452 		sctp_free_remote_addr(control->whoFrom);
5453 		sctp_free_a_readq(stcb, control);
5454 		if (hold_rlock) {
5455 			hold_rlock = 0;
5456 			SCTP_INP_READ_UNLOCK(inp);
5457 		}
5458 		goto restart;
5459 	}
5460 	if (control->length == 0) {
5461 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5462 		    (filling_sinfo)) {
5463 			/* find a more suitable one then this */
5464 			ctl = TAILQ_NEXT(control, next);
5465 			while (ctl) {
5466 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5467 				    (ctl->some_taken ||
5468 				    (ctl->spec_flags & M_NOTIFICATION) ||
5469 				    ((ctl->do_not_ref_stcb == 0) &&
5470 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5471 				    ) {
5472 					/*-
5473 					 * If we have a different TCB next, and there is data
5474 					 * present. If we have already taken some (pdapi), OR we can
5475 					 * ref the tcb and no delivery as started on this stream, we
5476 					 * take it. Note we allow a notification on a different
5477 					 * assoc to be delivered..
5478 					 */
5479 					control = ctl;
5480 					goto found_one;
5481 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5482 					    (ctl->length) &&
5483 					    ((ctl->some_taken) ||
5484 					    ((ctl->do_not_ref_stcb == 0) &&
5485 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5486 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5487 					/*-
5488 					 * If we have the same tcb, and there is data present, and we
5489 					 * have the strm interleave feature present. Then if we have
5490 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5491 					 * not started a delivery for this stream, we can take it.
5492 					 * Note we do NOT allow a notificaiton on the same assoc to
5493 					 * be delivered.
5494 					 */
5495 					control = ctl;
5496 					goto found_one;
5497 				}
5498 				ctl = TAILQ_NEXT(ctl, next);
5499 			}
5500 		}
5501 		/*
5502 		 * if we reach here, not suitable replacement is available
5503 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5504 		 * into the our held count, and its time to sleep again.
5505 		 */
5506 		held_length = so->so_rcv.sb_cc;
5507 		control->held_length = so->so_rcv.sb_cc;
5508 		goto restart;
5509 	}
5510 	/* Clear the held length since there is something to read */
5511 	control->held_length = 0;
5512 	if (hold_rlock) {
5513 		SCTP_INP_READ_UNLOCK(inp);
5514 		hold_rlock = 0;
5515 	}
5516 found_one:
5517 	/*
5518 	 * If we reach here, control has a some data for us to read off.
5519 	 * Note that stcb COULD be NULL.
5520 	 */
5521 	control->some_taken++;
5522 	if (hold_sblock) {
5523 		SOCKBUF_UNLOCK(&so->so_rcv);
5524 		hold_sblock = 0;
5525 	}
5526 	stcb = control->stcb;
5527 	if (stcb) {
5528 		if ((control->do_not_ref_stcb == 0) &&
5529 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5530 			if (freecnt_applied == 0)
5531 				stcb = NULL;
5532 		} else if (control->do_not_ref_stcb == 0) {
5533 			/* you can't free it on me please */
5534 			/*
5535 			 * The lock on the socket buffer protects us so the
5536 			 * free code will stop. But since we used the
5537 			 * socketbuf lock and the sender uses the tcb_lock
5538 			 * to increment, we need to use the atomic add to
5539 			 * the refcnt
5540 			 */
5541 			if (freecnt_applied) {
5542 #ifdef INVARIANTS
5543 				panic("refcnt already incremented");
5544 #else
5545 				SCTP_PRINTF("refcnt already incremented?\n");
5546 #endif
5547 			} else {
5548 				atomic_add_int(&stcb->asoc.refcnt, 1);
5549 				freecnt_applied = 1;
5550 			}
5551 			/*
5552 			 * Setup to remember how much we have not yet told
5553 			 * the peer our rwnd has opened up. Note we grab the
5554 			 * value from the tcb from last time. Note too that
5555 			 * sack sending clears this when a sack is sent,
5556 			 * which is fine. Once we hit the rwnd_req, we then
5557 			 * will go to the sctp_user_rcvd() that will not
5558 			 * lock until it KNOWs it MUST send a WUP-SACK.
5559 			 */
5560 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5561 			stcb->freed_by_sorcv_sincelast = 0;
5562 		}
5563 	}
5564 	if (stcb &&
5565 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5566 	    control->do_not_ref_stcb == 0) {
5567 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5568 	}
5569 	/* First lets get off the sinfo and sockaddr info */
5570 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5571 		sinfo->sinfo_stream = control->sinfo_stream;
5572 		sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn;
5573 		sinfo->sinfo_flags = control->sinfo_flags;
5574 		sinfo->sinfo_ppid = control->sinfo_ppid;
5575 		sinfo->sinfo_context = control->sinfo_context;
5576 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5577 		sinfo->sinfo_tsn = control->sinfo_tsn;
5578 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5579 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5580 		nxt = TAILQ_NEXT(control, next);
5581 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5582 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5583 			struct sctp_extrcvinfo *s_extra;
5584 
5585 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5586 			if ((nxt) &&
5587 			    (nxt->length)) {
5588 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5589 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5590 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5591 				}
5592 				if (nxt->spec_flags & M_NOTIFICATION) {
5593 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5594 				}
5595 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5596 				s_extra->serinfo_next_length = nxt->length;
5597 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5598 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5599 				if (nxt->tail_mbuf != NULL) {
5600 					if (nxt->end_added) {
5601 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5602 					}
5603 				}
5604 			} else {
5605 				/*
5606 				 * we explicitly 0 this, since the memcpy
5607 				 * got some other things beyond the older
5608 				 * sinfo_ that is on the control's structure
5609 				 * :-D
5610 				 */
5611 				nxt = NULL;
5612 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5613 				s_extra->serinfo_next_aid = 0;
5614 				s_extra->serinfo_next_length = 0;
5615 				s_extra->serinfo_next_ppid = 0;
5616 				s_extra->serinfo_next_stream = 0;
5617 			}
5618 		}
5619 		/*
5620 		 * update off the real current cum-ack, if we have an stcb.
5621 		 */
5622 		if ((control->do_not_ref_stcb == 0) && stcb)
5623 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5624 		/*
5625 		 * mask off the high bits, we keep the actual chunk bits in
5626 		 * there.
5627 		 */
5628 		sinfo->sinfo_flags &= 0x00ff;
5629 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5630 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5631 		}
5632 	}
5633 #ifdef SCTP_ASOCLOG_OF_TSNS
5634 	{
5635 		int index, newindex;
5636 		struct sctp_pcbtsn_rlog *entry;
5637 
5638 		do {
5639 			index = inp->readlog_index;
5640 			newindex = index + 1;
5641 			if (newindex >= SCTP_READ_LOG_SIZE) {
5642 				newindex = 0;
5643 			}
5644 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5645 		entry = &inp->readlog[index];
5646 		entry->vtag = control->sinfo_assoc_id;
5647 		entry->strm = control->sinfo_stream;
5648 		entry->seq = control->sinfo_ssn;
5649 		entry->sz = control->length;
5650 		entry->flgs = control->sinfo_flags;
5651 	}
5652 #endif
5653 	if ((fromlen > 0) && (from != NULL)) {
5654 		union sctp_sockstore store;
5655 		size_t len;
5656 
5657 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5658 #ifdef INET6
5659 		case AF_INET6:
5660 			len = sizeof(struct sockaddr_in6);
5661 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5662 			store.sin6.sin6_port = control->port_from;
5663 			break;
5664 #endif
5665 #ifdef INET
5666 		case AF_INET:
5667 #ifdef INET6
5668 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5669 				len = sizeof(struct sockaddr_in6);
5670 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5671 				    &store.sin6);
5672 				store.sin6.sin6_port = control->port_from;
5673 			} else {
5674 				len = sizeof(struct sockaddr_in);
5675 				store.sin = control->whoFrom->ro._l_addr.sin;
5676 				store.sin.sin_port = control->port_from;
5677 			}
5678 #else
5679 			len = sizeof(struct sockaddr_in);
5680 			store.sin = control->whoFrom->ro._l_addr.sin;
5681 			store.sin.sin_port = control->port_from;
5682 #endif
5683 			break;
5684 #endif
5685 		default:
5686 			len = 0;
5687 			break;
5688 		}
5689 		memcpy(from, &store, min((size_t)fromlen, len));
5690 #ifdef INET6
5691 		{
5692 			struct sockaddr_in6 lsa6, *from6;
5693 
5694 			from6 = (struct sockaddr_in6 *)from;
5695 			sctp_recover_scope_mac(from6, (&lsa6));
5696 		}
5697 #endif
5698 	}
5699 	/* now copy out what data we can */
5700 	if (mp == NULL) {
5701 		/* copy out each mbuf in the chain up to length */
5702 get_more_data:
5703 		m = control->data;
5704 		while (m) {
5705 			/* Move out all we can */
5706 			cp_len = (int)uio->uio_resid;
5707 			my_len = (int)SCTP_BUF_LEN(m);
5708 			if (cp_len > my_len) {
5709 				/* not enough in this buf */
5710 				cp_len = my_len;
5711 			}
5712 			if (hold_rlock) {
5713 				SCTP_INP_READ_UNLOCK(inp);
5714 				hold_rlock = 0;
5715 			}
5716 			if (cp_len > 0)
5717 				error = uiomove(mtod(m, char *), cp_len, uio);
5718 			/* re-read */
5719 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5720 				goto release;
5721 			}
5722 			if ((control->do_not_ref_stcb == 0) && stcb &&
5723 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5724 				no_rcv_needed = 1;
5725 			}
5726 			if (error) {
5727 				/* error we are out of here */
5728 				goto release;
5729 			}
5730 			SCTP_INP_READ_LOCK(inp);
5731 			hold_rlock = 1;
5732 			if (cp_len == SCTP_BUF_LEN(m)) {
5733 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5734 				    (control->end_added)) {
5735 					out_flags |= MSG_EOR;
5736 					if ((control->do_not_ref_stcb == 0) &&
5737 					    (control->stcb != NULL) &&
5738 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5739 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5740 				}
5741 				if (control->spec_flags & M_NOTIFICATION) {
5742 					out_flags |= MSG_NOTIFICATION;
5743 				}
5744 				/* we ate up the mbuf */
5745 				if (in_flags & MSG_PEEK) {
5746 					/* just looking */
5747 					m = SCTP_BUF_NEXT(m);
5748 					copied_so_far += cp_len;
5749 				} else {
5750 					/* dispose of the mbuf */
5751 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5752 						sctp_sblog(&so->so_rcv,
5753 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5754 					}
5755 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5756 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5757 						sctp_sblog(&so->so_rcv,
5758 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5759 					}
5760 					copied_so_far += cp_len;
5761 					freed_so_far += cp_len;
5762 					freed_so_far += MSIZE;
5763 					atomic_subtract_int(&control->length, cp_len);
5764 					control->data = sctp_m_free(m);
5765 					m = control->data;
5766 					/*
5767 					 * been through it all, must hold sb
5768 					 * lock ok to null tail
5769 					 */
5770 					if (control->data == NULL) {
5771 #ifdef INVARIANTS
5772 						if ((control->end_added == 0) ||
5773 						    (TAILQ_NEXT(control, next) == NULL)) {
5774 							/*
5775 							 * If the end is not
5776 							 * added, OR the
5777 							 * next is NOT null
5778 							 * we MUST have the
5779 							 * lock.
5780 							 */
5781 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5782 								panic("Hmm we don't own the lock?");
5783 							}
5784 						}
5785 #endif
5786 						control->tail_mbuf = NULL;
5787 #ifdef INVARIANTS
5788 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5789 							panic("end_added, nothing left and no MSG_EOR");
5790 						}
5791 #endif
5792 					}
5793 				}
5794 			} else {
5795 				/* Do we need to trim the mbuf? */
5796 				if (control->spec_flags & M_NOTIFICATION) {
5797 					out_flags |= MSG_NOTIFICATION;
5798 				}
5799 				if ((in_flags & MSG_PEEK) == 0) {
5800 					SCTP_BUF_RESV_UF(m, cp_len);
5801 					SCTP_BUF_LEN(m) -= cp_len;
5802 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5803 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5804 					}
5805 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5806 					if ((control->do_not_ref_stcb == 0) &&
5807 					    stcb) {
5808 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5809 					}
5810 					copied_so_far += cp_len;
5811 					freed_so_far += cp_len;
5812 					freed_so_far += MSIZE;
5813 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5814 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5815 						    SCTP_LOG_SBRESULT, 0);
5816 					}
5817 					atomic_subtract_int(&control->length, cp_len);
5818 				} else {
5819 					copied_so_far += cp_len;
5820 				}
5821 			}
5822 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5823 				break;
5824 			}
5825 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5826 			    (control->do_not_ref_stcb == 0) &&
5827 			    (freed_so_far >= rwnd_req)) {
5828 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5829 			}
5830 		}		/* end while(m) */
5831 		/*
5832 		 * At this point we have looked at it all and we either have
5833 		 * a MSG_EOR/or read all the user wants... <OR>
5834 		 * control->length == 0.
5835 		 */
5836 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5837 			/* we are done with this control */
5838 			if (control->length == 0) {
5839 				if (control->data) {
5840 #ifdef INVARIANTS
5841 					panic("control->data not null at read eor?");
5842 #else
5843 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5844 					sctp_m_freem(control->data);
5845 					control->data = NULL;
5846 #endif
5847 				}
5848 		done_with_control:
5849 				if (hold_rlock == 0) {
5850 					SCTP_INP_READ_LOCK(inp);
5851 					hold_rlock = 1;
5852 				}
5853 				TAILQ_REMOVE(&inp->read_queue, control, next);
5854 				/* Add back any hiddend data */
5855 				if (control->held_length) {
5856 					held_length = 0;
5857 					control->held_length = 0;
5858 					wakeup_read_socket = 1;
5859 				}
5860 				if (control->aux_data) {
5861 					sctp_m_free(control->aux_data);
5862 					control->aux_data = NULL;
5863 				}
5864 				no_rcv_needed = control->do_not_ref_stcb;
5865 				sctp_free_remote_addr(control->whoFrom);
5866 				control->data = NULL;
5867 #ifdef INVARIANTS
5868 				if (control->on_strm_q) {
5869 					panic("About to free ctl:%p so:%p and its in %d",
5870 					    control, so, control->on_strm_q);
5871 				}
5872 #endif
5873 				sctp_free_a_readq(stcb, control);
5874 				control = NULL;
5875 				if ((freed_so_far >= rwnd_req) &&
5876 				    (no_rcv_needed == 0))
5877 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5878 
5879 			} else {
5880 				/*
5881 				 * The user did not read all of this
5882 				 * message, turn off the returned MSG_EOR
5883 				 * since we are leaving more behind on the
5884 				 * control to read.
5885 				 */
5886 #ifdef INVARIANTS
5887 				if (control->end_added &&
5888 				    (control->data == NULL) &&
5889 				    (control->tail_mbuf == NULL)) {
5890 					panic("Gak, control->length is corrupt?");
5891 				}
5892 #endif
5893 				no_rcv_needed = control->do_not_ref_stcb;
5894 				out_flags &= ~MSG_EOR;
5895 			}
5896 		}
5897 		if (out_flags & MSG_EOR) {
5898 			goto release;
5899 		}
5900 		if ((uio->uio_resid == 0) ||
5901 		    ((in_eeor_mode) &&
5902 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5903 			goto release;
5904 		}
5905 		/*
5906 		 * If I hit here the receiver wants more and this message is
5907 		 * NOT done (pd-api). So two questions. Can we block? if not
5908 		 * we are done. Did the user NOT set MSG_WAITALL?
5909 		 */
5910 		if (block_allowed == 0) {
5911 			goto release;
5912 		}
5913 		/*
5914 		 * We need to wait for more data a few things: - We don't
5915 		 * sbunlock() so we don't get someone else reading. - We
5916 		 * must be sure to account for the case where what is added
5917 		 * is NOT to our control when we wakeup.
5918 		 */
5919 
5920 		/*
5921 		 * Do we need to tell the transport a rwnd update might be
5922 		 * needed before we go to sleep?
5923 		 */
5924 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5925 		    ((freed_so_far >= rwnd_req) &&
5926 		    (control->do_not_ref_stcb == 0) &&
5927 		    (no_rcv_needed == 0))) {
5928 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5929 		}
5930 wait_some_more:
5931 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5932 			goto release;
5933 		}
5934 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5935 			goto release;
5936 
5937 		if (hold_rlock == 1) {
5938 			SCTP_INP_READ_UNLOCK(inp);
5939 			hold_rlock = 0;
5940 		}
5941 		if (hold_sblock == 0) {
5942 			SOCKBUF_LOCK(&so->so_rcv);
5943 			hold_sblock = 1;
5944 		}
5945 		if ((copied_so_far) && (control->length == 0) &&
5946 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5947 			goto release;
5948 		}
5949 		if (so->so_rcv.sb_cc <= control->held_length) {
5950 			error = sbwait(&so->so_rcv);
5951 			if (error) {
5952 				goto release;
5953 			}
5954 			control->held_length = 0;
5955 		}
5956 		if (hold_sblock) {
5957 			SOCKBUF_UNLOCK(&so->so_rcv);
5958 			hold_sblock = 0;
5959 		}
5960 		if (control->length == 0) {
5961 			/* still nothing here */
5962 			if (control->end_added == 1) {
5963 				/* he aborted, or is done i.e.did a shutdown */
5964 				out_flags |= MSG_EOR;
5965 				if (control->pdapi_aborted) {
5966 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5967 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5968 
5969 					out_flags |= MSG_TRUNC;
5970 				} else {
5971 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5972 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5973 				}
5974 				goto done_with_control;
5975 			}
5976 			if (so->so_rcv.sb_cc > held_length) {
5977 				control->held_length = so->so_rcv.sb_cc;
5978 				held_length = 0;
5979 			}
5980 			goto wait_some_more;
5981 		} else if (control->data == NULL) {
5982 			/*
5983 			 * we must re-sync since data is probably being
5984 			 * added
5985 			 */
5986 			SCTP_INP_READ_LOCK(inp);
5987 			if ((control->length > 0) && (control->data == NULL)) {
5988 				/*
5989 				 * big trouble.. we have the lock and its
5990 				 * corrupt?
5991 				 */
5992 #ifdef INVARIANTS
5993 				panic("Impossible data==NULL length !=0");
5994 #endif
5995 				out_flags |= MSG_EOR;
5996 				out_flags |= MSG_TRUNC;
5997 				control->length = 0;
5998 				SCTP_INP_READ_UNLOCK(inp);
5999 				goto done_with_control;
6000 			}
6001 			SCTP_INP_READ_UNLOCK(inp);
6002 			/* We will fall around to get more data */
6003 		}
6004 		goto get_more_data;
6005 	} else {
6006 		/*-
6007 		 * Give caller back the mbuf chain,
6008 		 * store in uio_resid the length
6009 		 */
6010 		wakeup_read_socket = 0;
6011 		if ((control->end_added == 0) ||
6012 		    (TAILQ_NEXT(control, next) == NULL)) {
6013 			/* Need to get rlock */
6014 			if (hold_rlock == 0) {
6015 				SCTP_INP_READ_LOCK(inp);
6016 				hold_rlock = 1;
6017 			}
6018 		}
6019 		if (control->end_added) {
6020 			out_flags |= MSG_EOR;
6021 			if ((control->do_not_ref_stcb == 0) &&
6022 			    (control->stcb != NULL) &&
6023 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6024 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6025 		}
6026 		if (control->spec_flags & M_NOTIFICATION) {
6027 			out_flags |= MSG_NOTIFICATION;
6028 		}
6029 		uio->uio_resid = control->length;
6030 		*mp = control->data;
6031 		m = control->data;
6032 		while (m) {
6033 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6034 				sctp_sblog(&so->so_rcv,
6035 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6036 			}
6037 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6038 			freed_so_far += SCTP_BUF_LEN(m);
6039 			freed_so_far += MSIZE;
6040 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6041 				sctp_sblog(&so->so_rcv,
6042 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6043 			}
6044 			m = SCTP_BUF_NEXT(m);
6045 		}
6046 		control->data = control->tail_mbuf = NULL;
6047 		control->length = 0;
6048 		if (out_flags & MSG_EOR) {
6049 			/* Done with this control */
6050 			goto done_with_control;
6051 		}
6052 	}
6053 release:
6054 	if (hold_rlock == 1) {
6055 		SCTP_INP_READ_UNLOCK(inp);
6056 		hold_rlock = 0;
6057 	}
6058 	if (hold_sblock == 1) {
6059 		SOCKBUF_UNLOCK(&so->so_rcv);
6060 		hold_sblock = 0;
6061 	}
6062 	sbunlock(&so->so_rcv);
6063 	sockbuf_lock = 0;
6064 
6065 release_unlocked:
6066 	if (hold_sblock) {
6067 		SOCKBUF_UNLOCK(&so->so_rcv);
6068 		hold_sblock = 0;
6069 	}
6070 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6071 		if ((freed_so_far >= rwnd_req) &&
6072 		    (control && (control->do_not_ref_stcb == 0)) &&
6073 		    (no_rcv_needed == 0))
6074 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6075 	}
6076 out:
6077 	if (msg_flags) {
6078 		*msg_flags = out_flags;
6079 	}
6080 	if (((out_flags & MSG_EOR) == 0) &&
6081 	    ((in_flags & MSG_PEEK) == 0) &&
6082 	    (sinfo) &&
6083 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6084 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6085 		struct sctp_extrcvinfo *s_extra;
6086 
6087 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6088 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6089 	}
6090 	if (hold_rlock == 1) {
6091 		SCTP_INP_READ_UNLOCK(inp);
6092 	}
6093 	if (hold_sblock) {
6094 		SOCKBUF_UNLOCK(&so->so_rcv);
6095 	}
6096 	if (sockbuf_lock) {
6097 		sbunlock(&so->so_rcv);
6098 	}
6099 	if (freecnt_applied) {
6100 		/*
6101 		 * The lock on the socket buffer protects us so the free
6102 		 * code will stop. But since we used the socketbuf lock and
6103 		 * the sender uses the tcb_lock to increment, we need to use
6104 		 * the atomic add to the refcnt.
6105 		 */
6106 		if (stcb == NULL) {
6107 #ifdef INVARIANTS
6108 			panic("stcb for refcnt has gone NULL?");
6109 			goto stage_left;
6110 #else
6111 			goto stage_left;
6112 #endif
6113 		}
6114 		atomic_add_int(&stcb->asoc.refcnt, -1);
6115 		/* Save the value back for next time */
6116 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6117 	}
6118 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6119 		if (stcb) {
6120 			sctp_misc_ints(SCTP_SORECV_DONE,
6121 			    freed_so_far,
6122 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6123 			    stcb->asoc.my_rwnd,
6124 			    so->so_rcv.sb_cc);
6125 		} else {
6126 			sctp_misc_ints(SCTP_SORECV_DONE,
6127 			    freed_so_far,
6128 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6129 			    0,
6130 			    so->so_rcv.sb_cc);
6131 		}
6132 	}
6133 stage_left:
6134 	if (wakeup_read_socket) {
6135 		sctp_sorwakeup(inp, so);
6136 	}
6137 	return (error);
6138 }
6139 
6140 
6141 #ifdef SCTP_MBUF_LOGGING
6142 struct mbuf *
6143 sctp_m_free(struct mbuf *m)
6144 {
6145 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6146 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6147 	}
6148 	return (m_free(m));
6149 }
6150 
6151 void
6152 sctp_m_freem(struct mbuf *mb)
6153 {
6154 	while (mb != NULL)
6155 		mb = sctp_m_free(mb);
6156 }
6157 
6158 #endif
6159 
6160 int
6161 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6162 {
6163 	/*
6164 	 * Given a local address. For all associations that holds the
6165 	 * address, request a peer-set-primary.
6166 	 */
6167 	struct sctp_ifa *ifa;
6168 	struct sctp_laddr *wi;
6169 
6170 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6171 	if (ifa == NULL) {
6172 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6173 		return (EADDRNOTAVAIL);
6174 	}
6175 	/*
6176 	 * Now that we have the ifa we must awaken the iterator with this
6177 	 * message.
6178 	 */
6179 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6180 	if (wi == NULL) {
6181 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6182 		return (ENOMEM);
6183 	}
6184 	/* Now incr the count and int wi structure */
6185 	SCTP_INCR_LADDR_COUNT();
6186 	bzero(wi, sizeof(*wi));
6187 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6188 	wi->ifa = ifa;
6189 	wi->action = SCTP_SET_PRIM_ADDR;
6190 	atomic_add_int(&ifa->refcount, 1);
6191 
6192 	/* Now add it to the work queue */
6193 	SCTP_WQ_ADDR_LOCK();
6194 	/*
6195 	 * Should this really be a tailq? As it is we will process the
6196 	 * newest first :-0
6197 	 */
6198 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6199 	SCTP_WQ_ADDR_UNLOCK();
6200 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6201 	    (struct sctp_inpcb *)NULL,
6202 	    (struct sctp_tcb *)NULL,
6203 	    (struct sctp_nets *)NULL);
6204 	return (0);
6205 }
6206 
6207 
6208 int
6209 sctp_soreceive(struct socket *so,
6210     struct sockaddr **psa,
6211     struct uio *uio,
6212     struct mbuf **mp0,
6213     struct mbuf **controlp,
6214     int *flagsp)
6215 {
6216 	int error, fromlen;
6217 	uint8_t sockbuf[256];
6218 	struct sockaddr *from;
6219 	struct sctp_extrcvinfo sinfo;
6220 	int filling_sinfo = 1;
6221 	struct sctp_inpcb *inp;
6222 
6223 	inp = (struct sctp_inpcb *)so->so_pcb;
6224 	/* pickup the assoc we are reading from */
6225 	if (inp == NULL) {
6226 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6227 		return (EINVAL);
6228 	}
6229 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6230 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6231 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6232 	    (controlp == NULL)) {
6233 		/* user does not want the sndrcv ctl */
6234 		filling_sinfo = 0;
6235 	}
6236 	if (psa) {
6237 		from = (struct sockaddr *)sockbuf;
6238 		fromlen = sizeof(sockbuf);
6239 		from->sa_len = 0;
6240 	} else {
6241 		from = NULL;
6242 		fromlen = 0;
6243 	}
6244 
6245 	if (filling_sinfo) {
6246 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6247 	}
6248 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6249 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6250 	if (controlp != NULL) {
6251 		/* copy back the sinfo in a CMSG format */
6252 		if (filling_sinfo)
6253 			*controlp = sctp_build_ctl_nchunk(inp,
6254 			    (struct sctp_sndrcvinfo *)&sinfo);
6255 		else
6256 			*controlp = NULL;
6257 	}
6258 	if (psa) {
6259 		/* copy back the address info */
6260 		if (from && from->sa_len) {
6261 			*psa = sodupsockaddr(from, M_NOWAIT);
6262 		} else {
6263 			*psa = NULL;
6264 		}
6265 	}
6266 	return (error);
6267 }
6268 
6269 
6270 
6271 
6272 
6273 int
6274 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6275     int totaddr, int *error)
6276 {
6277 	int added = 0;
6278 	int i;
6279 	struct sctp_inpcb *inp;
6280 	struct sockaddr *sa;
6281 	size_t incr = 0;
6282 
6283 #ifdef INET
6284 	struct sockaddr_in *sin;
6285 
6286 #endif
6287 #ifdef INET6
6288 	struct sockaddr_in6 *sin6;
6289 
6290 #endif
6291 
6292 	sa = addr;
6293 	inp = stcb->sctp_ep;
6294 	*error = 0;
6295 	for (i = 0; i < totaddr; i++) {
6296 		switch (sa->sa_family) {
6297 #ifdef INET
6298 		case AF_INET:
6299 			incr = sizeof(struct sockaddr_in);
6300 			sin = (struct sockaddr_in *)sa;
6301 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6302 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6303 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6304 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6305 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6306 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6307 				*error = EINVAL;
6308 				goto out_now;
6309 			}
6310 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6311 			    SCTP_DONOT_SETSCOPE,
6312 			    SCTP_ADDR_IS_CONFIRMED)) {
6313 				/* assoc gone no un-lock */
6314 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6315 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6316 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6317 				*error = ENOBUFS;
6318 				goto out_now;
6319 			}
6320 			added++;
6321 			break;
6322 #endif
6323 #ifdef INET6
6324 		case AF_INET6:
6325 			incr = sizeof(struct sockaddr_in6);
6326 			sin6 = (struct sockaddr_in6 *)sa;
6327 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6328 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6329 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6330 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6331 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6332 				*error = EINVAL;
6333 				goto out_now;
6334 			}
6335 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6336 			    SCTP_DONOT_SETSCOPE,
6337 			    SCTP_ADDR_IS_CONFIRMED)) {
6338 				/* assoc gone no un-lock */
6339 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6340 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6341 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6342 				*error = ENOBUFS;
6343 				goto out_now;
6344 			}
6345 			added++;
6346 			break;
6347 #endif
6348 		default:
6349 			break;
6350 		}
6351 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6352 	}
6353 out_now:
6354 	return (added);
6355 }
6356 
6357 struct sctp_tcb *
6358 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6359     unsigned int *totaddr,
6360     unsigned int *num_v4, unsigned int *num_v6, int *error,
6361     unsigned int limit, int *bad_addr)
6362 {
6363 	struct sockaddr *sa;
6364 	struct sctp_tcb *stcb = NULL;
6365 	unsigned int incr, at, i;
6366 
6367 	at = incr = 0;
6368 	sa = addr;
6369 	*error = *num_v6 = *num_v4 = 0;
6370 	/* account and validate addresses */
6371 	for (i = 0; i < *totaddr; i++) {
6372 		switch (sa->sa_family) {
6373 #ifdef INET
6374 		case AF_INET:
6375 			if (sa->sa_len != incr) {
6376 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6377 				*error = EINVAL;
6378 				*bad_addr = 1;
6379 				return (NULL);
6380 			}
6381 			(*num_v4) += 1;
6382 			incr = (unsigned int)sizeof(struct sockaddr_in);
6383 			break;
6384 #endif
6385 #ifdef INET6
6386 		case AF_INET6:
6387 			{
6388 				struct sockaddr_in6 *sin6;
6389 
6390 				sin6 = (struct sockaddr_in6 *)sa;
6391 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6392 					/* Must be non-mapped for connectx */
6393 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6394 					*error = EINVAL;
6395 					*bad_addr = 1;
6396 					return (NULL);
6397 				}
6398 				if (sa->sa_len != incr) {
6399 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6400 					*error = EINVAL;
6401 					*bad_addr = 1;
6402 					return (NULL);
6403 				}
6404 				(*num_v6) += 1;
6405 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6406 				break;
6407 			}
6408 #endif
6409 		default:
6410 			*totaddr = i;
6411 			/* we are done */
6412 			break;
6413 		}
6414 		if (i == *totaddr) {
6415 			break;
6416 		}
6417 		SCTP_INP_INCR_REF(inp);
6418 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6419 		if (stcb != NULL) {
6420 			/* Already have or am bring up an association */
6421 			return (stcb);
6422 		} else {
6423 			SCTP_INP_DECR_REF(inp);
6424 		}
6425 		if ((at + incr) > limit) {
6426 			*totaddr = i;
6427 			break;
6428 		}
6429 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6430 	}
6431 	return ((struct sctp_tcb *)NULL);
6432 }
6433 
6434 /*
6435  * sctp_bindx(ADD) for one address.
6436  * assumes all arguments are valid/checked by caller.
6437  */
6438 void
6439 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6440     struct sockaddr *sa, sctp_assoc_t assoc_id,
6441     uint32_t vrf_id, int *error, void *p)
6442 {
6443 	struct sockaddr *addr_touse;
6444 
6445 #if defined(INET) && defined(INET6)
6446 	struct sockaddr_in sin;
6447 
6448 #endif
6449 
6450 	/* see if we're bound all already! */
6451 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6452 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6453 		*error = EINVAL;
6454 		return;
6455 	}
6456 	addr_touse = sa;
6457 #ifdef INET6
6458 	if (sa->sa_family == AF_INET6) {
6459 #ifdef INET
6460 		struct sockaddr_in6 *sin6;
6461 
6462 #endif
6463 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6464 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6465 			*error = EINVAL;
6466 			return;
6467 		}
6468 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6469 			/* can only bind v6 on PF_INET6 sockets */
6470 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6471 			*error = EINVAL;
6472 			return;
6473 		}
6474 #ifdef INET
6475 		sin6 = (struct sockaddr_in6 *)addr_touse;
6476 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6477 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6478 			    SCTP_IPV6_V6ONLY(inp)) {
6479 				/* can't bind v4-mapped on PF_INET sockets */
6480 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6481 				*error = EINVAL;
6482 				return;
6483 			}
6484 			in6_sin6_2_sin(&sin, sin6);
6485 			addr_touse = (struct sockaddr *)&sin;
6486 		}
6487 #endif
6488 	}
6489 #endif
6490 #ifdef INET
6491 	if (sa->sa_family == AF_INET) {
6492 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6493 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6494 			*error = EINVAL;
6495 			return;
6496 		}
6497 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6498 		    SCTP_IPV6_V6ONLY(inp)) {
6499 			/* can't bind v4 on PF_INET sockets */
6500 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6501 			*error = EINVAL;
6502 			return;
6503 		}
6504 	}
6505 #endif
6506 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6507 		if (p == NULL) {
6508 			/* Can't get proc for Net/Open BSD */
6509 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6510 			*error = EINVAL;
6511 			return;
6512 		}
6513 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6514 		return;
6515 	}
6516 	/*
6517 	 * No locks required here since bind and mgmt_ep_sa all do their own
6518 	 * locking. If we do something for the FIX: below we may need to
6519 	 * lock in that case.
6520 	 */
6521 	if (assoc_id == 0) {
6522 		/* add the address */
6523 		struct sctp_inpcb *lep;
6524 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6525 
6526 		/* validate the incoming port */
6527 		if ((lsin->sin_port != 0) &&
6528 		    (lsin->sin_port != inp->sctp_lport)) {
6529 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6530 			*error = EINVAL;
6531 			return;
6532 		} else {
6533 			/* user specified 0 port, set it to existing port */
6534 			lsin->sin_port = inp->sctp_lport;
6535 		}
6536 
6537 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6538 		if (lep != NULL) {
6539 			/*
6540 			 * We must decrement the refcount since we have the
6541 			 * ep already and are binding. No remove going on
6542 			 * here.
6543 			 */
6544 			SCTP_INP_DECR_REF(lep);
6545 		}
6546 		if (lep == inp) {
6547 			/* already bound to it.. ok */
6548 			return;
6549 		} else if (lep == NULL) {
6550 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6551 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6552 			    SCTP_ADD_IP_ADDRESS,
6553 			    vrf_id, NULL);
6554 		} else {
6555 			*error = EADDRINUSE;
6556 		}
6557 		if (*error)
6558 			return;
6559 	} else {
6560 		/*
6561 		 * FIX: decide whether we allow assoc based bindx
6562 		 */
6563 	}
6564 }
6565 
6566 /*
6567  * sctp_bindx(DELETE) for one address.
6568  * assumes all arguments are valid/checked by caller.
6569  */
6570 void
6571 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6572     struct sockaddr *sa, sctp_assoc_t assoc_id,
6573     uint32_t vrf_id, int *error)
6574 {
6575 	struct sockaddr *addr_touse;
6576 
6577 #if defined(INET) && defined(INET6)
6578 	struct sockaddr_in sin;
6579 
6580 #endif
6581 
6582 	/* see if we're bound all already! */
6583 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6584 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6585 		*error = EINVAL;
6586 		return;
6587 	}
6588 	addr_touse = sa;
6589 #ifdef INET6
6590 	if (sa->sa_family == AF_INET6) {
6591 #ifdef INET
6592 		struct sockaddr_in6 *sin6;
6593 
6594 #endif
6595 
6596 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6597 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6598 			*error = EINVAL;
6599 			return;
6600 		}
6601 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6602 			/* can only bind v6 on PF_INET6 sockets */
6603 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6604 			*error = EINVAL;
6605 			return;
6606 		}
6607 #ifdef INET
6608 		sin6 = (struct sockaddr_in6 *)addr_touse;
6609 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6610 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6611 			    SCTP_IPV6_V6ONLY(inp)) {
6612 				/* can't bind mapped-v4 on PF_INET sockets */
6613 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6614 				*error = EINVAL;
6615 				return;
6616 			}
6617 			in6_sin6_2_sin(&sin, sin6);
6618 			addr_touse = (struct sockaddr *)&sin;
6619 		}
6620 #endif
6621 	}
6622 #endif
6623 #ifdef INET
6624 	if (sa->sa_family == AF_INET) {
6625 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6626 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6627 			*error = EINVAL;
6628 			return;
6629 		}
6630 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6631 		    SCTP_IPV6_V6ONLY(inp)) {
6632 			/* can't bind v4 on PF_INET sockets */
6633 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6634 			*error = EINVAL;
6635 			return;
6636 		}
6637 	}
6638 #endif
6639 	/*
6640 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6641 	 * below is ever changed we may need to lock before calling
6642 	 * association level binding.
6643 	 */
6644 	if (assoc_id == 0) {
6645 		/* delete the address */
6646 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6647 		    SCTP_DEL_IP_ADDRESS,
6648 		    vrf_id, NULL);
6649 	} else {
6650 		/*
6651 		 * FIX: decide whether we allow assoc based bindx
6652 		 */
6653 	}
6654 }
6655 
6656 /*
6657  * returns the valid local address count for an assoc, taking into account
6658  * all scoping rules
6659  */
6660 int
6661 sctp_local_addr_count(struct sctp_tcb *stcb)
6662 {
6663 	int loopback_scope;
6664 
6665 #if defined(INET)
6666 	int ipv4_local_scope, ipv4_addr_legal;
6667 
6668 #endif
6669 #if defined (INET6)
6670 	int local_scope, site_scope, ipv6_addr_legal;
6671 
6672 #endif
6673 	struct sctp_vrf *vrf;
6674 	struct sctp_ifn *sctp_ifn;
6675 	struct sctp_ifa *sctp_ifa;
6676 	int count = 0;
6677 
6678 	/* Turn on all the appropriate scopes */
6679 	loopback_scope = stcb->asoc.scope.loopback_scope;
6680 #if defined(INET)
6681 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6682 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6683 #endif
6684 #if defined(INET6)
6685 	local_scope = stcb->asoc.scope.local_scope;
6686 	site_scope = stcb->asoc.scope.site_scope;
6687 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6688 #endif
6689 	SCTP_IPI_ADDR_RLOCK();
6690 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6691 	if (vrf == NULL) {
6692 		/* no vrf, no addresses */
6693 		SCTP_IPI_ADDR_RUNLOCK();
6694 		return (0);
6695 	}
6696 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6697 		/*
6698 		 * bound all case: go through all ifns on the vrf
6699 		 */
6700 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6701 			if ((loopback_scope == 0) &&
6702 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6703 				continue;
6704 			}
6705 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6706 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6707 					continue;
6708 				switch (sctp_ifa->address.sa.sa_family) {
6709 #ifdef INET
6710 				case AF_INET:
6711 					if (ipv4_addr_legal) {
6712 						struct sockaddr_in *sin;
6713 
6714 						sin = &sctp_ifa->address.sin;
6715 						if (sin->sin_addr.s_addr == 0) {
6716 							/*
6717 							 * skip unspecified
6718 							 * addrs
6719 							 */
6720 							continue;
6721 						}
6722 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6723 						    &sin->sin_addr) != 0) {
6724 							continue;
6725 						}
6726 						if ((ipv4_local_scope == 0) &&
6727 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6728 							continue;
6729 						}
6730 						/* count this one */
6731 						count++;
6732 					} else {
6733 						continue;
6734 					}
6735 					break;
6736 #endif
6737 #ifdef INET6
6738 				case AF_INET6:
6739 					if (ipv6_addr_legal) {
6740 						struct sockaddr_in6 *sin6;
6741 
6742 						sin6 = &sctp_ifa->address.sin6;
6743 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6744 							continue;
6745 						}
6746 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6747 						    &sin6->sin6_addr) != 0) {
6748 							continue;
6749 						}
6750 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6751 							if (local_scope == 0)
6752 								continue;
6753 							if (sin6->sin6_scope_id == 0) {
6754 								if (sa6_recoverscope(sin6) != 0)
6755 									/*
6756 									 *
6757 									 * bad
6758 									 *
6759 									 * li
6760 									 * nk
6761 									 *
6762 									 * loc
6763 									 * al
6764 									 *
6765 									 * add
6766 									 * re
6767 									 * ss
6768 									 * */
6769 									continue;
6770 							}
6771 						}
6772 						if ((site_scope == 0) &&
6773 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6774 							continue;
6775 						}
6776 						/* count this one */
6777 						count++;
6778 					}
6779 					break;
6780 #endif
6781 				default:
6782 					/* TSNH */
6783 					break;
6784 				}
6785 			}
6786 		}
6787 	} else {
6788 		/*
6789 		 * subset bound case
6790 		 */
6791 		struct sctp_laddr *laddr;
6792 
6793 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6794 		    sctp_nxt_addr) {
6795 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6796 				continue;
6797 			}
6798 			/* count this one */
6799 			count++;
6800 		}
6801 	}
6802 	SCTP_IPI_ADDR_RUNLOCK();
6803 	return (count);
6804 }
6805 
6806 #if defined(SCTP_LOCAL_TRACE_BUF)
6807 
6808 void
6809 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6810 {
6811 	uint32_t saveindex, newindex;
6812 
6813 	do {
6814 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6815 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6816 			newindex = 1;
6817 		} else {
6818 			newindex = saveindex + 1;
6819 		}
6820 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6821 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6822 		saveindex = 0;
6823 	}
6824 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6825 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6826 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6827 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6828 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6829 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6830 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6831 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6832 }
6833 
6834 #endif
6835 static void
6836 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6837     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6838 {
6839 	struct ip *iph;
6840 
6841 #ifdef INET6
6842 	struct ip6_hdr *ip6;
6843 
6844 #endif
6845 	struct mbuf *sp, *last;
6846 	struct udphdr *uhdr;
6847 	uint16_t port;
6848 
6849 	if ((m->m_flags & M_PKTHDR) == 0) {
6850 		/* Can't handle one that is not a pkt hdr */
6851 		goto out;
6852 	}
6853 	/* Pull the src port */
6854 	iph = mtod(m, struct ip *);
6855 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6856 	port = uhdr->uh_sport;
6857 	/*
6858 	 * Split out the mbuf chain. Leave the IP header in m, place the
6859 	 * rest in the sp.
6860 	 */
6861 	sp = m_split(m, off, M_NOWAIT);
6862 	if (sp == NULL) {
6863 		/* Gak, drop packet, we can't do a split */
6864 		goto out;
6865 	}
6866 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6867 		/* Gak, packet can't have an SCTP header in it - too small */
6868 		m_freem(sp);
6869 		goto out;
6870 	}
6871 	/* Now pull up the UDP header and SCTP header together */
6872 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6873 	if (sp == NULL) {
6874 		/* Gak pullup failed */
6875 		goto out;
6876 	}
6877 	/* Trim out the UDP header */
6878 	m_adj(sp, sizeof(struct udphdr));
6879 
6880 	/* Now reconstruct the mbuf chain */
6881 	for (last = m; last->m_next; last = last->m_next);
6882 	last->m_next = sp;
6883 	m->m_pkthdr.len += sp->m_pkthdr.len;
6884 	/*
6885 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6886 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6887 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6888 	 * SCTP checksum. Therefore, clear the bit.
6889 	 */
6890 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6891 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6892 	    m->m_pkthdr.len,
6893 	    if_name(m->m_pkthdr.rcvif),
6894 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6895 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6896 	iph = mtod(m, struct ip *);
6897 	switch (iph->ip_v) {
6898 #ifdef INET
6899 	case IPVERSION:
6900 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6901 		sctp_input_with_port(m, off, port);
6902 		break;
6903 #endif
6904 #ifdef INET6
6905 	case IPV6_VERSION >> 4:
6906 		ip6 = mtod(m, struct ip6_hdr *);
6907 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6908 		sctp6_input_with_port(&m, &off, port);
6909 		break;
6910 #endif
6911 	default:
6912 		goto out;
6913 		break;
6914 	}
6915 	return;
6916 out:
6917 	m_freem(m);
6918 }
6919 
6920 #ifdef INET
6921 static void
6922 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6923 {
6924 	struct ip *outer_ip, *inner_ip;
6925 	struct sctphdr *sh;
6926 	struct icmp *icmp;
6927 	struct udphdr *udp;
6928 	struct sctp_inpcb *inp;
6929 	struct sctp_tcb *stcb;
6930 	struct sctp_nets *net;
6931 	struct sctp_init_chunk *ch;
6932 	struct sockaddr_in src, dst;
6933 	uint8_t type, code;
6934 
6935 	inner_ip = (struct ip *)vip;
6936 	icmp = (struct icmp *)((caddr_t)inner_ip -
6937 	    (sizeof(struct icmp) - sizeof(struct ip)));
6938 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6939 	if (ntohs(outer_ip->ip_len) <
6940 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6941 		return;
6942 	}
6943 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6944 	sh = (struct sctphdr *)(udp + 1);
6945 	memset(&src, 0, sizeof(struct sockaddr_in));
6946 	src.sin_family = AF_INET;
6947 	src.sin_len = sizeof(struct sockaddr_in);
6948 	src.sin_port = sh->src_port;
6949 	src.sin_addr = inner_ip->ip_src;
6950 	memset(&dst, 0, sizeof(struct sockaddr_in));
6951 	dst.sin_family = AF_INET;
6952 	dst.sin_len = sizeof(struct sockaddr_in);
6953 	dst.sin_port = sh->dest_port;
6954 	dst.sin_addr = inner_ip->ip_dst;
6955 	/*
6956 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6957 	 * holds our local endpoint address. Thus we reverse the dst and the
6958 	 * src in the lookup.
6959 	 */
6960 	inp = NULL;
6961 	net = NULL;
6962 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6963 	    (struct sockaddr *)&src,
6964 	    &inp, &net, 1,
6965 	    SCTP_DEFAULT_VRFID);
6966 	if ((stcb != NULL) &&
6967 	    (net != NULL) &&
6968 	    (inp != NULL) &&
6969 	    (inp->sctp_socket != NULL)) {
6970 		/* Check the UDP port numbers */
6971 		if ((udp->uh_dport != net->port) ||
6972 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6973 			SCTP_TCB_UNLOCK(stcb);
6974 			return;
6975 		}
6976 		/* Check the verification tag */
6977 		if (ntohl(sh->v_tag) != 0) {
6978 			/*
6979 			 * This must be the verification tag used for
6980 			 * sending out packets. We don't consider packets
6981 			 * reflecting the verification tag.
6982 			 */
6983 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6984 				SCTP_TCB_UNLOCK(stcb);
6985 				return;
6986 			}
6987 		} else {
6988 			if (ntohs(outer_ip->ip_len) >=
6989 			    sizeof(struct ip) +
6990 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6991 				/*
6992 				 * In this case we can check if we got an
6993 				 * INIT chunk and if the initiate tag
6994 				 * matches.
6995 				 */
6996 				ch = (struct sctp_init_chunk *)(sh + 1);
6997 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6998 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6999 					SCTP_TCB_UNLOCK(stcb);
7000 					return;
7001 				}
7002 			} else {
7003 				SCTP_TCB_UNLOCK(stcb);
7004 				return;
7005 			}
7006 		}
7007 		type = icmp->icmp_type;
7008 		code = icmp->icmp_code;
7009 		if ((type == ICMP_UNREACH) &&
7010 		    (code == ICMP_UNREACH_PORT)) {
7011 			code = ICMP_UNREACH_PROTOCOL;
7012 		}
7013 		sctp_notify(inp, stcb, net, type, code,
7014 		    ntohs(inner_ip->ip_len),
7015 		    ntohs(icmp->icmp_nextmtu));
7016 	} else {
7017 		if ((stcb == NULL) && (inp != NULL)) {
7018 			/* reduce ref-count */
7019 			SCTP_INP_WLOCK(inp);
7020 			SCTP_INP_DECR_REF(inp);
7021 			SCTP_INP_WUNLOCK(inp);
7022 		}
7023 		if (stcb) {
7024 			SCTP_TCB_UNLOCK(stcb);
7025 		}
7026 	}
7027 	return;
7028 }
7029 
7030 #endif
7031 
7032 #ifdef INET6
7033 static void
7034 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7035 {
7036 	struct ip6ctlparam *ip6cp;
7037 	struct sctp_inpcb *inp;
7038 	struct sctp_tcb *stcb;
7039 	struct sctp_nets *net;
7040 	struct sctphdr sh;
7041 	struct udphdr udp;
7042 	struct sockaddr_in6 src, dst;
7043 	uint8_t type, code;
7044 
7045 	ip6cp = (struct ip6ctlparam *)d;
7046 	/*
7047 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7048 	 */
7049 	if (ip6cp->ip6c_m == NULL) {
7050 		return;
7051 	}
7052 	/*
7053 	 * Check if we can safely examine the ports and the verification tag
7054 	 * of the SCTP common header.
7055 	 */
7056 	if (ip6cp->ip6c_m->m_pkthdr.len <
7057 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7058 		return;
7059 	}
7060 	/* Copy out the UDP header. */
7061 	memset(&udp, 0, sizeof(struct udphdr));
7062 	m_copydata(ip6cp->ip6c_m,
7063 	    ip6cp->ip6c_off,
7064 	    sizeof(struct udphdr),
7065 	    (caddr_t)&udp);
7066 	/* Copy out the port numbers and the verification tag. */
7067 	memset(&sh, 0, sizeof(struct sctphdr));
7068 	m_copydata(ip6cp->ip6c_m,
7069 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7070 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7071 	    (caddr_t)&sh);
7072 	memset(&src, 0, sizeof(struct sockaddr_in6));
7073 	src.sin6_family = AF_INET6;
7074 	src.sin6_len = sizeof(struct sockaddr_in6);
7075 	src.sin6_port = sh.src_port;
7076 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7077 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7078 		return;
7079 	}
7080 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7081 	dst.sin6_family = AF_INET6;
7082 	dst.sin6_len = sizeof(struct sockaddr_in6);
7083 	dst.sin6_port = sh.dest_port;
7084 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7085 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7086 		return;
7087 	}
7088 	inp = NULL;
7089 	net = NULL;
7090 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7091 	    (struct sockaddr *)&src,
7092 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7093 	if ((stcb != NULL) &&
7094 	    (net != NULL) &&
7095 	    (inp != NULL) &&
7096 	    (inp->sctp_socket != NULL)) {
7097 		/* Check the UDP port numbers */
7098 		if ((udp.uh_dport != net->port) ||
7099 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7100 			SCTP_TCB_UNLOCK(stcb);
7101 			return;
7102 		}
7103 		/* Check the verification tag */
7104 		if (ntohl(sh.v_tag) != 0) {
7105 			/*
7106 			 * This must be the verification tag used for
7107 			 * sending out packets. We don't consider packets
7108 			 * reflecting the verification tag.
7109 			 */
7110 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7111 				SCTP_TCB_UNLOCK(stcb);
7112 				return;
7113 			}
7114 		} else {
7115 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7116 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7117 			    sizeof(struct sctphdr) +
7118 			    sizeof(struct sctp_chunkhdr) +
7119 			    offsetof(struct sctp_init, a_rwnd)) {
7120 				/*
7121 				 * In this case we can check if we got an
7122 				 * INIT chunk and if the initiate tag
7123 				 * matches.
7124 				 */
7125 				uint32_t initiate_tag;
7126 				uint8_t chunk_type;
7127 
7128 				m_copydata(ip6cp->ip6c_m,
7129 				    ip6cp->ip6c_off +
7130 				    sizeof(struct udphdr) +
7131 				    sizeof(struct sctphdr),
7132 				    sizeof(uint8_t),
7133 				    (caddr_t)&chunk_type);
7134 				m_copydata(ip6cp->ip6c_m,
7135 				    ip6cp->ip6c_off +
7136 				    sizeof(struct udphdr) +
7137 				    sizeof(struct sctphdr) +
7138 				    sizeof(struct sctp_chunkhdr),
7139 				    sizeof(uint32_t),
7140 				    (caddr_t)&initiate_tag);
7141 				if ((chunk_type != SCTP_INITIATION) ||
7142 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7143 					SCTP_TCB_UNLOCK(stcb);
7144 					return;
7145 				}
7146 			} else {
7147 				SCTP_TCB_UNLOCK(stcb);
7148 				return;
7149 			}
7150 		}
7151 		type = ip6cp->ip6c_icmp6->icmp6_type;
7152 		code = ip6cp->ip6c_icmp6->icmp6_code;
7153 		if ((type == ICMP6_DST_UNREACH) &&
7154 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7155 			type = ICMP6_PARAM_PROB;
7156 			code = ICMP6_PARAMPROB_NEXTHEADER;
7157 		}
7158 		sctp6_notify(inp, stcb, net, type, code,
7159 		    (uint16_t) ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7160 	} else {
7161 		if ((stcb == NULL) && (inp != NULL)) {
7162 			/* reduce inp's ref-count */
7163 			SCTP_INP_WLOCK(inp);
7164 			SCTP_INP_DECR_REF(inp);
7165 			SCTP_INP_WUNLOCK(inp);
7166 		}
7167 		if (stcb) {
7168 			SCTP_TCB_UNLOCK(stcb);
7169 		}
7170 	}
7171 }
7172 
7173 #endif
7174 
7175 void
7176 sctp_over_udp_stop(void)
7177 {
7178 	/*
7179 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7180 	 * for writting!
7181 	 */
7182 #ifdef INET
7183 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7184 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7185 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7186 	}
7187 #endif
7188 #ifdef INET6
7189 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7190 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7191 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7192 	}
7193 #endif
7194 }
7195 
7196 int
7197 sctp_over_udp_start(void)
7198 {
7199 	uint16_t port;
7200 	int ret;
7201 
7202 #ifdef INET
7203 	struct sockaddr_in sin;
7204 
7205 #endif
7206 #ifdef INET6
7207 	struct sockaddr_in6 sin6;
7208 
7209 #endif
7210 	/*
7211 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7212 	 * for writting!
7213 	 */
7214 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7215 	if (ntohs(port) == 0) {
7216 		/* Must have a port set */
7217 		return (EINVAL);
7218 	}
7219 #ifdef INET
7220 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7221 		/* Already running -- must stop first */
7222 		return (EALREADY);
7223 	}
7224 #endif
7225 #ifdef INET6
7226 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7227 		/* Already running -- must stop first */
7228 		return (EALREADY);
7229 	}
7230 #endif
7231 #ifdef INET
7232 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7233 	    SOCK_DGRAM, IPPROTO_UDP,
7234 	    curthread->td_ucred, curthread))) {
7235 		sctp_over_udp_stop();
7236 		return (ret);
7237 	}
7238 	/* Call the special UDP hook. */
7239 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7240 	    sctp_recv_udp_tunneled_packet,
7241 	    sctp_recv_icmp_tunneled_packet,
7242 	    NULL))) {
7243 		sctp_over_udp_stop();
7244 		return (ret);
7245 	}
7246 	/* Ok, we have a socket, bind it to the port. */
7247 	memset(&sin, 0, sizeof(struct sockaddr_in));
7248 	sin.sin_len = sizeof(struct sockaddr_in);
7249 	sin.sin_family = AF_INET;
7250 	sin.sin_port = htons(port);
7251 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7252 	    (struct sockaddr *)&sin, curthread))) {
7253 		sctp_over_udp_stop();
7254 		return (ret);
7255 	}
7256 #endif
7257 #ifdef INET6
7258 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7259 	    SOCK_DGRAM, IPPROTO_UDP,
7260 	    curthread->td_ucred, curthread))) {
7261 		sctp_over_udp_stop();
7262 		return (ret);
7263 	}
7264 	/* Call the special UDP hook. */
7265 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7266 	    sctp_recv_udp_tunneled_packet,
7267 	    sctp_recv_icmp6_tunneled_packet,
7268 	    NULL))) {
7269 		sctp_over_udp_stop();
7270 		return (ret);
7271 	}
7272 	/* Ok, we have a socket, bind it to the port. */
7273 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7274 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7275 	sin6.sin6_family = AF_INET6;
7276 	sin6.sin6_port = htons(port);
7277 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7278 	    (struct sockaddr *)&sin6, curthread))) {
7279 		sctp_over_udp_stop();
7280 		return (ret);
7281 	}
7282 #endif
7283 	return (0);
7284 }
7285