xref: /freebsd/sys/netinet/sctputil.c (revision 094fc1ed0f2627525c7b0342efcbad5be7a8546a)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61 
62 
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66 
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69 
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 	struct sctp_cwnd_log sctp_clog;
74 
75 	sctp_clog.x.sb.stcb = stcb;
76 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 	if (stcb)
78 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 	else
80 		sctp_clog.x.sb.stcb_sbcc = 0;
81 	sctp_clog.x.sb.incr = incr;
82 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 	    SCTP_LOG_EVENT_SB,
84 	    from,
85 	    sctp_clog.x.misc.log1,
86 	    sctp_clog.x.misc.log2,
87 	    sctp_clog.x.misc.log3,
88 	    sctp_clog.x.misc.log4);
89 }
90 
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 	struct sctp_cwnd_log sctp_clog;
95 
96 	sctp_clog.x.close.inp = (void *)inp;
97 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 	if (stcb) {
99 		sctp_clog.x.close.stcb = (void *)stcb;
100 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 	} else {
102 		sctp_clog.x.close.stcb = 0;
103 		sctp_clog.x.close.state = 0;
104 	}
105 	sctp_clog.x.close.loc = loc;
106 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 	    SCTP_LOG_EVENT_CLOSE,
108 	    0,
109 	    sctp_clog.x.misc.log1,
110 	    sctp_clog.x.misc.log2,
111 	    sctp_clog.x.misc.log3,
112 	    sctp_clog.x.misc.log4);
113 }
114 
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 	struct sctp_cwnd_log sctp_clog;
119 
120 	memset(&sctp_clog, 0, sizeof(sctp_clog));
121 	sctp_clog.x.rto.net = (void *)net;
122 	sctp_clog.x.rto.rtt = net->rtt / 1000;
123 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 	    SCTP_LOG_EVENT_RTT,
125 	    from,
126 	    sctp_clog.x.misc.log1,
127 	    sctp_clog.x.misc.log2,
128 	    sctp_clog.x.misc.log3,
129 	    sctp_clog.x.misc.log4);
130 }
131 
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 	struct sctp_cwnd_log sctp_clog;
136 
137 	sctp_clog.x.strlog.stcb = stcb;
138 	sctp_clog.x.strlog.n_tsn = tsn;
139 	sctp_clog.x.strlog.n_sseq = sseq;
140 	sctp_clog.x.strlog.e_tsn = 0;
141 	sctp_clog.x.strlog.e_sseq = 0;
142 	sctp_clog.x.strlog.strm = stream;
143 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 	    SCTP_LOG_EVENT_STRM,
145 	    from,
146 	    sctp_clog.x.misc.log1,
147 	    sctp_clog.x.misc.log2,
148 	    sctp_clog.x.misc.log3,
149 	    sctp_clog.x.misc.log4);
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 	struct sctp_cwnd_log sctp_clog;
212 
213 	memset(&sctp_clog, 0, sizeof(sctp_clog));
214 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 	sctp_clog.x.fr.tsn = tsn;
217 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 	    SCTP_LOG_EVENT_FR,
219 	    from,
220 	    sctp_clog.x.misc.log1,
221 	    sctp_clog.x.misc.log2,
222 	    sctp_clog.x.misc.log3,
223 	    sctp_clog.x.misc.log4);
224 }
225 
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 	struct sctp_cwnd_log sctp_clog;
231 
232 	sctp_clog.x.mb.mp = m;
233 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 	if (SCTP_BUF_IS_EXTENDED(m)) {
237 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 	} else {
240 		sctp_clog.x.mb.ext = 0;
241 		sctp_clog.x.mb.refcnt = 0;
242 	}
243 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 	    SCTP_LOG_EVENT_MBUF,
245 	    from,
246 	    sctp_clog.x.misc.log1,
247 	    sctp_clog.x.misc.log2,
248 	    sctp_clog.x.misc.log3,
249 	    sctp_clog.x.misc.log4);
250 }
251 
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 	struct mbuf *mat;
256 
257 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 		sctp_log_mb(mat, from);
259 	}
260 }
261 #endif
262 
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 	struct sctp_cwnd_log sctp_clog;
267 
268 	if (control == NULL) {
269 		SCTP_PRINTF("Gak log of NULL?\n");
270 		return;
271 	}
272 	sctp_clog.x.strlog.stcb = control->stcb;
273 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 	sctp_clog.x.strlog.strm = control->sinfo_stream;
276 	if (poschk != NULL) {
277 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 	} else {
280 		sctp_clog.x.strlog.e_tsn = 0;
281 		sctp_clog.x.strlog.e_sseq = 0;
282 	}
283 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 	    SCTP_LOG_EVENT_STRM,
285 	    from,
286 	    sctp_clog.x.misc.log1,
287 	    sctp_clog.x.misc.log2,
288 	    sctp_clog.x.misc.log3,
289 	    sctp_clog.x.misc.log4);
290 }
291 
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 	struct sctp_cwnd_log sctp_clog;
296 
297 	sctp_clog.x.cwnd.net = net;
298 	if (stcb->asoc.send_queue_cnt > 255)
299 		sctp_clog.x.cwnd.cnt_in_send = 255;
300 	else
301 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 	if (stcb->asoc.stream_queue_cnt > 255)
303 		sctp_clog.x.cwnd.cnt_in_str = 255;
304 	else
305 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306 
307 	if (net) {
308 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 		sctp_clog.x.cwnd.inflight = net->flight_size;
310 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 	}
314 	if (SCTP_CWNDLOG_PRESEND == from) {
315 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 	}
317 	sctp_clog.x.cwnd.cwnd_augment = augment;
318 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 	    SCTP_LOG_EVENT_CWND,
320 	    from,
321 	    sctp_clog.x.misc.log1,
322 	    sctp_clog.x.misc.log2,
323 	    sctp_clog.x.misc.log3,
324 	    sctp_clog.x.misc.log4);
325 }
326 
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 	struct sctp_cwnd_log sctp_clog;
331 
332 	memset(&sctp_clog, 0, sizeof(sctp_clog));
333 	if (inp) {
334 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335 
336 	} else {
337 		sctp_clog.x.lock.sock = (void *)NULL;
338 	}
339 	sctp_clog.x.lock.inp = (void *)inp;
340 	if (stcb) {
341 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 	} else {
343 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 	}
345 	if (inp) {
346 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 	} else {
349 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 	if (inp && (inp->sctp_socket)) {
354 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 	} else {
358 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 	}
362 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 	    SCTP_LOG_LOCK_EVENT,
364 	    from,
365 	    sctp_clog.x.misc.log1,
366 	    sctp_clog.x.misc.log2,
367 	    sctp_clog.x.misc.log3,
368 	    sctp_clog.x.misc.log4);
369 }
370 
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 	struct sctp_cwnd_log sctp_clog;
375 
376 	memset(&sctp_clog, 0, sizeof(sctp_clog));
377 	sctp_clog.x.cwnd.net = net;
378 	sctp_clog.x.cwnd.cwnd_new_value = error;
379 	sctp_clog.x.cwnd.inflight = net->flight_size;
380 	sctp_clog.x.cwnd.cwnd_augment = burst;
381 	if (stcb->asoc.send_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_send = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 	if (stcb->asoc.stream_queue_cnt > 255)
386 		sctp_clog.x.cwnd.cnt_in_str = 255;
387 	else
388 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_MAXBURST,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = snd_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = 0;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 	sctp_clog.x.rwnd.send_size = flight_size;
423 	sctp_clog.x.rwnd.overhead = overhead;
424 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_RWND,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 	struct sctp_cwnd_log sctp_clog;
439 
440 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 	sctp_clog.x.mbcnt.size_change = book;
442 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_EVENT_MBCNT,
446 	    from,
447 	    sctp_clog.x.misc.log1,
448 	    sctp_clog.x.misc.log2,
449 	    sctp_clog.x.misc.log3,
450 	    sctp_clog.x.misc.log4);
451 }
452 #endif
453 
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_MISC_EVENT,
459 	    from,
460 	    a, b, c, d);
461 }
462 
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 	struct sctp_cwnd_log sctp_clog;
467 
468 	sctp_clog.x.wake.stcb = (void *)stcb;
469 	sctp_clog.x.wake.wake_cnt = wake_cnt;
470 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473 
474 	if (stcb->asoc.stream_queue_cnt < 0xff)
475 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 	else
477 		sctp_clog.x.wake.stream_qcnt = 0xff;
478 
479 	if (stcb->asoc.chunks_on_out_queue < 0xff)
480 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 	else
482 		sctp_clog.x.wake.chunks_on_oque = 0xff;
483 
484 	sctp_clog.x.wake.sctpflags = 0;
485 	/* set in the defered mode stuff */
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 		sctp_clog.x.wake.sctpflags |= 1;
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 		sctp_clog.x.wake.sctpflags |= 2;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 		sctp_clog.x.wake.sctpflags |= 4;
492 	/* what about the sb */
493 	if (stcb->sctp_socket) {
494 		struct socket *so = stcb->sctp_socket;
495 
496 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 	} else {
498 		sctp_clog.x.wake.sbflags = 0xff;
499 	}
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	    SCTP_LOG_EVENT_WAKE,
502 	    from,
503 	    sctp_clog.x.misc.log1,
504 	    sctp_clog.x.misc.log2,
505 	    sctp_clog.x.misc.log3,
506 	    sctp_clog.x.misc.log4);
507 }
508 
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 	    SCTP_LOG_EVENT_BLOCK,
523 	    from,
524 	    sctp_clog.x.misc.log1,
525 	    sctp_clog.x.misc.log2,
526 	    sctp_clog.x.misc.log3,
527 	    sctp_clog.x.misc.log4);
528 }
529 
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 	/* May need to fix this if ktrdump does not work */
534 	return (0);
535 }
536 
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540 
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 	int i;
546 	int cnt;
547 
548 	cnt = 0;
549 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 		if ((sctp_audit_data[i][0] == 0xe0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if (sctp_audit_data[i][0] == 0xf0) {
555 			cnt = 0;
556 			SCTP_PRINTF("\n");
557 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558 		    (sctp_audit_data[i][1] == 0x01)) {
559 			SCTP_PRINTF("\n");
560 			cnt = 0;
561 		}
562 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 		    (uint32_t)sctp_audit_data[i][1]);
564 		cnt++;
565 		if ((cnt % 14) == 0)
566 			SCTP_PRINTF("\n");
567 	}
568 	for (i = 0; i < sctp_audit_indx; i++) {
569 		if ((sctp_audit_data[i][0] == 0xe0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if (sctp_audit_data[i][0] == 0xf0) {
574 			cnt = 0;
575 			SCTP_PRINTF("\n");
576 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577 		    (sctp_audit_data[i][1] == 0x01)) {
578 			SCTP_PRINTF("\n");
579 			cnt = 0;
580 		}
581 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 		    (uint32_t)sctp_audit_data[i][1]);
583 		cnt++;
584 		if ((cnt % 14) == 0)
585 			SCTP_PRINTF("\n");
586 	}
587 	SCTP_PRINTF("\n");
588 }
589 
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592     struct sctp_nets *net)
593 {
594 	int resend_cnt, tot_out, rep, tot_book_cnt;
595 	struct sctp_nets *lnet;
596 	struct sctp_tmit_chunk *chk;
597 
598 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 	sctp_audit_indx++;
601 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 		sctp_audit_indx = 0;
603 	}
604 	if (inp == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	if (stcb == NULL) {
614 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 		sctp_audit_indx++;
617 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 			sctp_audit_indx = 0;
619 		}
620 		return;
621 	}
622 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 	sctp_audit_data[sctp_audit_indx][1] =
624 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 	sctp_audit_indx++;
626 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 		sctp_audit_indx = 0;
628 	}
629 	rep = 0;
630 	tot_book_cnt = 0;
631 	resend_cnt = tot_out = 0;
632 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 			resend_cnt++;
635 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 			tot_out += chk->book_size;
637 			tot_book_cnt++;
638 		}
639 	}
640 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 		rep = 1;
650 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 		sctp_audit_data[sctp_audit_indx][1] =
653 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 	}
659 	if (tot_out != stcb->asoc.total_flight) {
660 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 		sctp_audit_indx++;
663 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 			sctp_audit_indx = 0;
665 		}
666 		rep = 1;
667 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 		    (int)stcb->asoc.total_flight);
669 		stcb->asoc.total_flight = tot_out;
670 	}
671 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 		sctp_audit_indx++;
675 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 			sctp_audit_indx = 0;
677 		}
678 		rep = 1;
679 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680 
681 		stcb->asoc.total_flight_count = tot_book_cnt;
682 	}
683 	tot_out = 0;
684 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 		tot_out += lnet->flight_size;
686 	}
687 	if (tot_out != stcb->asoc.total_flight) {
688 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 		sctp_audit_indx++;
691 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 			sctp_audit_indx = 0;
693 		}
694 		rep = 1;
695 		SCTP_PRINTF("real flight:%d net total was %d\n",
696 		    stcb->asoc.total_flight, tot_out);
697 		/* now corrective action */
698 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699 
700 			tot_out = 0;
701 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 				if ((chk->whoTo == lnet) &&
703 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 					tot_out += chk->book_size;
705 				}
706 			}
707 			if (lnet->flight_size != tot_out) {
708 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 				    (void *)lnet, lnet->flight_size,
710 				    tot_out);
711 				lnet->flight_size = tot_out;
712 			}
713 		}
714 	}
715 	if (rep) {
716 		sctp_print_audit_report();
717 	}
718 }
719 
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723 
724 	sctp_audit_data[sctp_audit_indx][0] = ev;
725 	sctp_audit_data[sctp_audit_indx][1] = fd;
726 	sctp_audit_indx++;
727 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 		sctp_audit_indx = 0;
729 	}
730 }
731 
732 #endif
733 
734 /*
735  * sctp_stop_timers_for_shutdown() should be called
736  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737  * state to make sure that all timers are stopped.
738  */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 	struct sctp_association *asoc;
743 	struct sctp_nets *net;
744 
745 	asoc = &stcb->asoc;
746 
747 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 	}
756 }
757 
758 /*
759  * a list of sizes based on typical mtu's, used only if next hop size not
760  * returned.
761  */
762 static uint32_t sctp_mtu_sizes[] = {
763 	68,
764 	296,
765 	508,
766 	512,
767 	544,
768 	576,
769 	1006,
770 	1492,
771 	1500,
772 	1536,
773 	2002,
774 	2048,
775 	4352,
776 	4464,
777 	8166,
778 	17914,
779 	32000,
780 	65535
781 };
782 
783 /*
784  * Return the largest MTU smaller than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 	uint32_t i;
791 
792 	if (val <= sctp_mtu_sizes[0]) {
793 		return (val);
794 	}
795 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val <= sctp_mtu_sizes[i]) {
797 			break;
798 		}
799 	}
800 	return (sctp_mtu_sizes[i - 1]);
801 }
802 
803 /*
804  * Return the smallest MTU larger than val. If there is no
805  * entry, just return val.
806  */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 	/* select another MTU that is just bigger than this one */
811 	uint32_t i;
812 
813 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 		if (val < sctp_mtu_sizes[i]) {
815 			return (sctp_mtu_sizes[i]);
816 		}
817 	}
818 	return (val);
819 }
820 
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 	/*
825 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 	 * our counter. The result becomes our good random numbers and we
827 	 * then setup to give these out. Note that we do no locking to
828 	 * protect this. This is ok, since if competing folks call this we
829 	 * will get more gobbled gook in the random store which is what we
830 	 * want. There is a danger that two guys will use the same random
831 	 * numbers, but thats ok too since that is random as well :->
832 	 */
833 	m->store_at = 0;
834 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837 	m->random_counter++;
838 }
839 
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 	/*
844 	 * A true implementation should use random selection process to get
845 	 * the initial stream sequence number, using RFC1750 as a good
846 	 * guideline
847 	 */
848 	uint32_t x, *xp;
849 	uint8_t *p;
850 	int store_at, new_store;
851 
852 	if (inp->initial_sequence_debug != 0) {
853 		uint32_t ret;
854 
855 		ret = inp->initial_sequence_debug;
856 		inp->initial_sequence_debug++;
857 		return (ret);
858 	}
859 retry:
860 	store_at = inp->store_at;
861 	new_store = store_at + sizeof(uint32_t);
862 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 		new_store = 0;
864 	}
865 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 		goto retry;
867 	}
868 	if (new_store == 0) {
869 		/* Refill the random store */
870 		sctp_fill_random_store(inp);
871 	}
872 	p = &inp->random_store[store_at];
873 	xp = (uint32_t *)p;
874 	x = *xp;
875 	return (x);
876 }
877 
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 	uint32_t x;
882 	struct timeval now;
883 
884 	if (check) {
885 		(void)SCTP_GETTIME_TIMEVAL(&now);
886 	}
887 	for (;;) {
888 		x = sctp_select_initial_TSN(&inp->sctp_ep);
889 		if (x == 0) {
890 			/* we never use 0 */
891 			continue;
892 		}
893 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 			break;
895 		}
896 	}
897 	return (x);
898 }
899 
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 	int32_t user_state;
904 
905 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 		user_state = SCTP_CLOSED;
907 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 		user_state = SCTP_SHUTDOWN_PENDING;
909 	} else {
910 		switch (kernel_state & SCTP_STATE_MASK) {
911 		case SCTP_STATE_EMPTY:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_INUSE:
915 			user_state = SCTP_CLOSED;
916 			break;
917 		case SCTP_STATE_COOKIE_WAIT:
918 			user_state = SCTP_COOKIE_WAIT;
919 			break;
920 		case SCTP_STATE_COOKIE_ECHOED:
921 			user_state = SCTP_COOKIE_ECHOED;
922 			break;
923 		case SCTP_STATE_OPEN:
924 			user_state = SCTP_ESTABLISHED;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_SENT:
927 			user_state = SCTP_SHUTDOWN_SENT;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_RECEIVED:
930 			user_state = SCTP_SHUTDOWN_RECEIVED;
931 			break;
932 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 			user_state = SCTP_SHUTDOWN_ACK_SENT;
934 			break;
935 		default:
936 			user_state = SCTP_CLOSED;
937 			break;
938 		}
939 	}
940 	return (user_state);
941 }
942 
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 	struct sctp_association *asoc;
948 
949 	/*
950 	 * Anything set to zero is taken care of by the allocation routine's
951 	 * bzero
952 	 */
953 
954 	/*
955 	 * Up front select what scoping to apply on addresses I tell my peer
956 	 * Not sure what to do with these right now, we will need to come up
957 	 * with a way to set them. We may need to pass them through from the
958 	 * caller in the sctp_aloc_assoc() function.
959 	 */
960 	int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 #endif
964 
965 	asoc = &stcb->asoc;
966 	/* init all variables to a known value. */
967 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 	asoc->max_burst = inp->sctp_ep.max_burst;
969 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 	asoc->ecn_supported = inp->ecn_supported;
974 	asoc->prsctp_supported = inp->prsctp_supported;
975 	asoc->idata_supported = inp->idata_supported;
976 	asoc->auth_supported = inp->auth_supported;
977 	asoc->asconf_supported = inp->asconf_supported;
978 	asoc->reconfig_supported = inp->reconfig_supported;
979 	asoc->nrsack_supported = inp->nrsack_supported;
980 	asoc->pktdrop_supported = inp->pktdrop_supported;
981 	asoc->idata_supported = inp->idata_supported;
982 	asoc->sctp_cmt_pf = (uint8_t)0;
983 	asoc->sctp_frag_point = inp->sctp_frag_point;
984 	asoc->sctp_features = inp->sctp_features;
985 	asoc->default_dscp = inp->sctp_ep.default_dscp;
986 	asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 	if (inp->sctp_ep.default_flowlabel) {
989 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 	} else {
991 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 			asoc->default_flowlabel &= 0x000fffff;
994 			asoc->default_flowlabel |= 0x80000000;
995 		} else {
996 			asoc->default_flowlabel = 0;
997 		}
998 	}
999 #endif
1000 	asoc->sb_send_resv = 0;
1001 	if (override_tag) {
1002 		asoc->my_vtag = override_tag;
1003 	} else {
1004 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 	}
1006 	/* Get the nonce tags */
1007 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->vrf_id = vrf_id;
1010 
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 	asoc->tsn_in_at = 0;
1013 	asoc->tsn_out_at = 0;
1014 	asoc->tsn_in_wrapped = 0;
1015 	asoc->tsn_out_wrapped = 0;
1016 	asoc->cumack_log_at = 0;
1017 	asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 	asoc->fs_index = 0;
1021 #endif
1022 	asoc->refcnt = 0;
1023 	asoc->assoc_up_sent = 0;
1024 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 	    sctp_select_initial_TSN(&inp->sctp_ep);
1026 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 	/* we are optimisitic here */
1028 	asoc->peer_supports_nat = 0;
1029 	asoc->sent_queue_retran_cnt = 0;
1030 
1031 	/* for CMT */
1032 	asoc->last_net_cmt_send_started = NULL;
1033 
1034 	/* This will need to be adjusted */
1035 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 	asoc->asconf_seq_in = asoc->last_acked_seq;
1038 
1039 	/* here we are different, we hold the next one we expect */
1040 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041 
1042 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044 
1045 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1046 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1047 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1048 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1049 	asoc->free_chunk_cnt = 0;
1050 
1051 	asoc->iam_blocking = 0;
1052 	asoc->context = inp->sctp_context;
1053 	asoc->local_strreset_support = inp->local_strreset_support;
1054 	asoc->def_send = inp->def_send;
1055 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1056 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1057 	asoc->pr_sctp_cnt = 0;
1058 	asoc->total_output_queue_size = 0;
1059 
1060 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1061 		asoc->scope.ipv6_addr_legal = 1;
1062 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1063 			asoc->scope.ipv4_addr_legal = 1;
1064 		} else {
1065 			asoc->scope.ipv4_addr_legal = 0;
1066 		}
1067 	} else {
1068 		asoc->scope.ipv6_addr_legal = 0;
1069 		asoc->scope.ipv4_addr_legal = 1;
1070 	}
1071 
1072 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1073 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1074 
1075 	asoc->smallest_mtu = inp->sctp_frag_point;
1076 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1077 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1078 
1079 	asoc->stream_locked_on = 0;
1080 	asoc->ecn_echo_cnt_onq = 0;
1081 	asoc->stream_locked = 0;
1082 
1083 	asoc->send_sack = 1;
1084 
1085 	LIST_INIT(&asoc->sctp_restricted_addrs);
1086 
1087 	TAILQ_INIT(&asoc->nets);
1088 	TAILQ_INIT(&asoc->pending_reply_queue);
1089 	TAILQ_INIT(&asoc->asconf_ack_sent);
1090 	/* Setup to fill the hb random cache at first HB */
1091 	asoc->hb_random_idx = 4;
1092 
1093 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1094 
1095 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1096 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1097 
1098 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1099 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    o_strms;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_mid_ordered = 0;
1125 		asoc->strmout[i].next_mid_unordered = 0;
1126 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127 		asoc->strmout[i].chunks_on_queues = 0;
1128 #if defined(SCTP_DETAILED_STR_STATS)
1129 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1130 			asoc->strmout[i].abandoned_sent[j] = 0;
1131 			asoc->strmout[i].abandoned_unsent[j] = 0;
1132 		}
1133 #else
1134 		asoc->strmout[i].abandoned_sent[0] = 0;
1135 		asoc->strmout[i].abandoned_unsent[0] = 0;
1136 #endif
1137 		asoc->strmout[i].sid = i;
1138 		asoc->strmout[i].last_msg_incomplete = 0;
1139 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1140 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1141 	}
1142 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1143 
1144 	/* Now the mapping array */
1145 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1146 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1147 	    SCTP_M_MAP);
1148 	if (asoc->mapping_array == NULL) {
1149 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1150 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1151 		return (ENOMEM);
1152 	}
1153 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1154 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1155 	    SCTP_M_MAP);
1156 	if (asoc->nr_mapping_array == NULL) {
1157 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1158 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1159 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1160 		return (ENOMEM);
1161 	}
1162 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1163 
1164 	/* Now the init of the other outqueues */
1165 	TAILQ_INIT(&asoc->free_chunks);
1166 	TAILQ_INIT(&asoc->control_send_queue);
1167 	TAILQ_INIT(&asoc->asconf_send_queue);
1168 	TAILQ_INIT(&asoc->send_queue);
1169 	TAILQ_INIT(&asoc->sent_queue);
1170 	TAILQ_INIT(&asoc->resetHead);
1171 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1172 	TAILQ_INIT(&asoc->asconf_queue);
1173 	/* authentication fields */
1174 	asoc->authinfo.random = NULL;
1175 	asoc->authinfo.active_keyid = 0;
1176 	asoc->authinfo.assoc_key = NULL;
1177 	asoc->authinfo.assoc_keyid = 0;
1178 	asoc->authinfo.recv_key = NULL;
1179 	asoc->authinfo.recv_keyid = 0;
1180 	LIST_INIT(&asoc->shared_keys);
1181 	asoc->marked_retrans = 0;
1182 	asoc->port = inp->sctp_ep.port;
1183 	asoc->timoinit = 0;
1184 	asoc->timodata = 0;
1185 	asoc->timosack = 0;
1186 	asoc->timoshutdown = 0;
1187 	asoc->timoheartbeat = 0;
1188 	asoc->timocookie = 0;
1189 	asoc->timoshutdownack = 0;
1190 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1191 	asoc->discontinuity_time = asoc->start_time;
1192 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1193 		asoc->abandoned_unsent[i] = 0;
1194 		asoc->abandoned_sent[i] = 0;
1195 	}
1196 	/*
1197 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1198 	 * freed later when the association is freed.
1199 	 */
1200 	return (0);
1201 }
1202 
1203 void
1204 sctp_print_mapping_array(struct sctp_association *asoc)
1205 {
1206 	unsigned int i, limit;
1207 
1208 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1209 	    asoc->mapping_array_size,
1210 	    asoc->mapping_array_base_tsn,
1211 	    asoc->cumulative_tsn,
1212 	    asoc->highest_tsn_inside_map,
1213 	    asoc->highest_tsn_inside_nr_map);
1214 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1215 		if (asoc->mapping_array[limit - 1] != 0) {
1216 			break;
1217 		}
1218 	}
1219 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1220 	for (i = 0; i < limit; i++) {
1221 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1222 	}
1223 	if (limit % 16)
1224 		SCTP_PRINTF("\n");
1225 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1226 		if (asoc->nr_mapping_array[limit - 1]) {
1227 			break;
1228 		}
1229 	}
1230 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1231 	for (i = 0; i < limit; i++) {
1232 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1233 	}
1234 	if (limit % 16)
1235 		SCTP_PRINTF("\n");
1236 }
1237 
1238 int
1239 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1240 {
1241 	/* mapping array needs to grow */
1242 	uint8_t *new_array1, *new_array2;
1243 	uint32_t new_size;
1244 
1245 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1246 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1247 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1248 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1249 		/* can't get more, forget it */
1250 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1251 		if (new_array1) {
1252 			SCTP_FREE(new_array1, SCTP_M_MAP);
1253 		}
1254 		if (new_array2) {
1255 			SCTP_FREE(new_array2, SCTP_M_MAP);
1256 		}
1257 		return (-1);
1258 	}
1259 	memset(new_array1, 0, new_size);
1260 	memset(new_array2, 0, new_size);
1261 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1262 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1263 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1264 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1265 	asoc->mapping_array = new_array1;
1266 	asoc->nr_mapping_array = new_array2;
1267 	asoc->mapping_array_size = new_size;
1268 	return (0);
1269 }
1270 
1271 
1272 static void
1273 sctp_iterator_work(struct sctp_iterator *it)
1274 {
1275 	int iteration_count = 0;
1276 	int inp_skip = 0;
1277 	int first_in = 1;
1278 	struct sctp_inpcb *tinp;
1279 
1280 	SCTP_INP_INFO_RLOCK();
1281 	SCTP_ITERATOR_LOCK();
1282 	sctp_it_ctl.cur_it = it;
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		sctp_it_ctl.cur_it = NULL;
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		CURVNET_RESTORE();
1437 		SCTP_IPI_ITERATOR_WQ_LOCK();
1438 		/* sa_ignore FREED_MEMORY */
1439 	}
1440 	sctp_it_ctl.iterator_running = 0;
1441 	return;
1442 }
1443 
1444 
1445 static void
1446 sctp_handle_addr_wq(void)
1447 {
1448 	/* deal with the ADDR wq from the rtsock calls */
1449 	struct sctp_laddr *wi, *nwi;
1450 	struct sctp_asconf_iterator *asc;
1451 
1452 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1453 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1454 	if (asc == NULL) {
1455 		/* Try later, no memory */
1456 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1457 		    (struct sctp_inpcb *)NULL,
1458 		    (struct sctp_tcb *)NULL,
1459 		    (struct sctp_nets *)NULL);
1460 		return;
1461 	}
1462 	LIST_INIT(&asc->list_of_work);
1463 	asc->cnt = 0;
1464 
1465 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1466 		LIST_REMOVE(wi, sctp_nxt_addr);
1467 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1468 		asc->cnt++;
1469 	}
1470 
1471 	if (asc->cnt == 0) {
1472 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1473 	} else {
1474 		int ret;
1475 
1476 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1477 		    sctp_asconf_iterator_stcb,
1478 		    NULL,	/* No ep end for boundall */
1479 		    SCTP_PCB_FLAGS_BOUNDALL,
1480 		    SCTP_PCB_ANY_FEATURES,
1481 		    SCTP_ASOC_ANY_STATE,
1482 		    (void *)asc, 0,
1483 		    sctp_asconf_iterator_end, NULL, 0);
1484 		if (ret) {
1485 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1486 			/*
1487 			 * Freeing if we are stopping or put back on the
1488 			 * addr_wq.
1489 			 */
1490 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1491 				sctp_asconf_iterator_end(asc, 0);
1492 			} else {
1493 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1494 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1495 				}
1496 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1497 			}
1498 		}
1499 	}
1500 }
1501 
1502 void
1503 sctp_timeout_handler(void *t)
1504 {
1505 	struct sctp_inpcb *inp;
1506 	struct sctp_tcb *stcb;
1507 	struct sctp_nets *net;
1508 	struct sctp_timer *tmr;
1509 	struct mbuf *op_err;
1510 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1511 	struct socket *so;
1512 #endif
1513 	int did_output;
1514 	int type;
1515 
1516 	tmr = (struct sctp_timer *)t;
1517 	inp = (struct sctp_inpcb *)tmr->ep;
1518 	stcb = (struct sctp_tcb *)tmr->tcb;
1519 	net = (struct sctp_nets *)tmr->net;
1520 	CURVNET_SET((struct vnet *)tmr->vnet);
1521 	did_output = 1;
1522 
1523 #ifdef SCTP_AUDITING_ENABLED
1524 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1525 	sctp_auditing(3, inp, stcb, net);
1526 #endif
1527 
1528 	/* sanity checks... */
1529 	if (tmr->self != (void *)tmr) {
1530 		/*
1531 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1532 		 * (void *)tmr);
1533 		 */
1534 		CURVNET_RESTORE();
1535 		return;
1536 	}
1537 	tmr->stopped_from = 0xa001;
1538 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1539 		/*
1540 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1541 		 * tmr->type);
1542 		 */
1543 		CURVNET_RESTORE();
1544 		return;
1545 	}
1546 	tmr->stopped_from = 0xa002;
1547 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1548 		CURVNET_RESTORE();
1549 		return;
1550 	}
1551 	/* if this is an iterator timeout, get the struct and clear inp */
1552 	tmr->stopped_from = 0xa003;
1553 	if (inp) {
1554 		SCTP_INP_INCR_REF(inp);
1555 		if ((inp->sctp_socket == NULL) &&
1556 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1557 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1558 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1559 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1560 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1565 			SCTP_INP_DECR_REF(inp);
1566 			CURVNET_RESTORE();
1567 			return;
1568 		}
1569 	}
1570 	tmr->stopped_from = 0xa004;
1571 	if (stcb) {
1572 		atomic_add_int(&stcb->asoc.refcnt, 1);
1573 		if (stcb->asoc.state == 0) {
1574 			atomic_add_int(&stcb->asoc.refcnt, -1);
1575 			if (inp) {
1576 				SCTP_INP_DECR_REF(inp);
1577 			}
1578 			CURVNET_RESTORE();
1579 			return;
1580 		}
1581 	}
1582 	type = tmr->type;
1583 	tmr->stopped_from = 0xa005;
1584 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1585 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1586 		if (inp) {
1587 			SCTP_INP_DECR_REF(inp);
1588 		}
1589 		if (stcb) {
1590 			atomic_add_int(&stcb->asoc.refcnt, -1);
1591 		}
1592 		CURVNET_RESTORE();
1593 		return;
1594 	}
1595 	tmr->stopped_from = 0xa006;
1596 
1597 	if (stcb) {
1598 		SCTP_TCB_LOCK(stcb);
1599 		atomic_add_int(&stcb->asoc.refcnt, -1);
1600 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1601 		    ((stcb->asoc.state == 0) ||
1602 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1603 			SCTP_TCB_UNLOCK(stcb);
1604 			if (inp) {
1605 				SCTP_INP_DECR_REF(inp);
1606 			}
1607 			CURVNET_RESTORE();
1608 			return;
1609 		}
1610 	} else if (inp != NULL) {
1611 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1612 			SCTP_INP_WLOCK(inp);
1613 		}
1614 	} else {
1615 		SCTP_WQ_ADDR_LOCK();
1616 	}
1617 	/* record in stopped what t-o occurred */
1618 	tmr->stopped_from = type;
1619 
1620 	/* mark as being serviced now */
1621 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1622 		/*
1623 		 * Callout has been rescheduled.
1624 		 */
1625 		goto get_out;
1626 	}
1627 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1628 		/*
1629 		 * Not active, so no action.
1630 		 */
1631 		goto get_out;
1632 	}
1633 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1634 
1635 	/* call the handler for the appropriate timer type */
1636 	switch (type) {
1637 	case SCTP_TIMER_TYPE_ADDR_WQ:
1638 		sctp_handle_addr_wq();
1639 		break;
1640 	case SCTP_TIMER_TYPE_SEND:
1641 		if ((stcb == NULL) || (inp == NULL)) {
1642 			break;
1643 		}
1644 		SCTP_STAT_INCR(sctps_timodata);
1645 		stcb->asoc.timodata++;
1646 		stcb->asoc.num_send_timers_up--;
1647 		if (stcb->asoc.num_send_timers_up < 0) {
1648 			stcb->asoc.num_send_timers_up = 0;
1649 		}
1650 		SCTP_TCB_LOCK_ASSERT(stcb);
1651 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1652 			/* no need to unlock on tcb its gone */
1653 
1654 			goto out_decr;
1655 		}
1656 		SCTP_TCB_LOCK_ASSERT(stcb);
1657 #ifdef SCTP_AUDITING_ENABLED
1658 		sctp_auditing(4, inp, stcb, net);
1659 #endif
1660 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1661 		if ((stcb->asoc.num_send_timers_up == 0) &&
1662 		    (stcb->asoc.sent_queue_cnt > 0)) {
1663 			struct sctp_tmit_chunk *chk;
1664 
1665 			/*
1666 			 * safeguard. If there on some on the sent queue
1667 			 * somewhere but no timers running something is
1668 			 * wrong... so we start a timer on the first chunk
1669 			 * on the send queue on whatever net it is sent to.
1670 			 */
1671 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1672 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1673 			    chk->whoTo);
1674 		}
1675 		break;
1676 	case SCTP_TIMER_TYPE_INIT:
1677 		if ((stcb == NULL) || (inp == NULL)) {
1678 			break;
1679 		}
1680 		SCTP_STAT_INCR(sctps_timoinit);
1681 		stcb->asoc.timoinit++;
1682 		if (sctp_t1init_timer(inp, stcb, net)) {
1683 			/* no need to unlock on tcb its gone */
1684 			goto out_decr;
1685 		}
1686 		/* We do output but not here */
1687 		did_output = 0;
1688 		break;
1689 	case SCTP_TIMER_TYPE_RECV:
1690 		if ((stcb == NULL) || (inp == NULL)) {
1691 			break;
1692 		}
1693 		SCTP_STAT_INCR(sctps_timosack);
1694 		stcb->asoc.timosack++;
1695 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1696 #ifdef SCTP_AUDITING_ENABLED
1697 		sctp_auditing(4, inp, stcb, net);
1698 #endif
1699 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1700 		break;
1701 	case SCTP_TIMER_TYPE_SHUTDOWN:
1702 		if ((stcb == NULL) || (inp == NULL)) {
1703 			break;
1704 		}
1705 		if (sctp_shutdown_timer(inp, stcb, net)) {
1706 			/* no need to unlock on tcb its gone */
1707 			goto out_decr;
1708 		}
1709 		SCTP_STAT_INCR(sctps_timoshutdown);
1710 		stcb->asoc.timoshutdown++;
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_HEARTBEAT:
1717 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1718 			break;
1719 		}
1720 		SCTP_STAT_INCR(sctps_timoheartbeat);
1721 		stcb->asoc.timoheartbeat++;
1722 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1723 			/* no need to unlock on tcb its gone */
1724 			goto out_decr;
1725 		}
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1730 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1731 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1732 		}
1733 		break;
1734 	case SCTP_TIMER_TYPE_COOKIE:
1735 		if ((stcb == NULL) || (inp == NULL)) {
1736 			break;
1737 		}
1738 		if (sctp_cookie_timer(inp, stcb, net)) {
1739 			/* no need to unlock on tcb its gone */
1740 			goto out_decr;
1741 		}
1742 		SCTP_STAT_INCR(sctps_timocookie);
1743 		stcb->asoc.timocookie++;
1744 #ifdef SCTP_AUDITING_ENABLED
1745 		sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 		/*
1748 		 * We consider T3 and Cookie timer pretty much the same with
1749 		 * respect to where from in chunk_output.
1750 		 */
1751 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1752 		break;
1753 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1754 		{
1755 			struct timeval tv;
1756 			int i, secret;
1757 
1758 			if (inp == NULL) {
1759 				break;
1760 			}
1761 			SCTP_STAT_INCR(sctps_timosecret);
1762 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1763 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1764 			inp->sctp_ep.last_secret_number =
1765 			    inp->sctp_ep.current_secret_number;
1766 			inp->sctp_ep.current_secret_number++;
1767 			if (inp->sctp_ep.current_secret_number >=
1768 			    SCTP_HOW_MANY_SECRETS) {
1769 				inp->sctp_ep.current_secret_number = 0;
1770 			}
1771 			secret = (int)inp->sctp_ep.current_secret_number;
1772 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1773 				inp->sctp_ep.secret_key[secret][i] =
1774 				    sctp_select_initial_TSN(&inp->sctp_ep);
1775 			}
1776 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1777 		}
1778 		did_output = 0;
1779 		break;
1780 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1781 		if ((stcb == NULL) || (inp == NULL)) {
1782 			break;
1783 		}
1784 		SCTP_STAT_INCR(sctps_timopathmtu);
1785 		sctp_pathmtu_timer(inp, stcb, net);
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1793 			/* no need to unlock on tcb its gone */
1794 			goto out_decr;
1795 		}
1796 		SCTP_STAT_INCR(sctps_timoshutdownack);
1797 		stcb->asoc.timoshutdownack++;
1798 #ifdef SCTP_AUDITING_ENABLED
1799 		sctp_auditing(4, inp, stcb, net);
1800 #endif
1801 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1802 		break;
1803 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1804 		if ((stcb == NULL) || (inp == NULL)) {
1805 			break;
1806 		}
1807 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1808 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1809 		    "Shutdown guard timer expired");
1810 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1811 		/* no need to unlock on tcb its gone */
1812 		goto out_decr;
1813 
1814 	case SCTP_TIMER_TYPE_STRRESET:
1815 		if ((stcb == NULL) || (inp == NULL)) {
1816 			break;
1817 		}
1818 		if (sctp_strreset_timer(inp, stcb, net)) {
1819 			/* no need to unlock on tcb its gone */
1820 			goto out_decr;
1821 		}
1822 		SCTP_STAT_INCR(sctps_timostrmrst);
1823 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1824 		break;
1825 	case SCTP_TIMER_TYPE_ASCONF:
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		if (sctp_asconf_timer(inp, stcb, net)) {
1830 			/* no need to unlock on tcb its gone */
1831 			goto out_decr;
1832 		}
1833 		SCTP_STAT_INCR(sctps_timoasconf);
1834 #ifdef SCTP_AUDITING_ENABLED
1835 		sctp_auditing(4, inp, stcb, net);
1836 #endif
1837 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1838 		break;
1839 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1840 		if ((stcb == NULL) || (inp == NULL)) {
1841 			break;
1842 		}
1843 		sctp_delete_prim_timer(inp, stcb, net);
1844 		SCTP_STAT_INCR(sctps_timodelprim);
1845 		break;
1846 
1847 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1848 		if ((stcb == NULL) || (inp == NULL)) {
1849 			break;
1850 		}
1851 		SCTP_STAT_INCR(sctps_timoautoclose);
1852 		sctp_autoclose_timer(inp, stcb, net);
1853 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1854 		did_output = 0;
1855 		break;
1856 	case SCTP_TIMER_TYPE_ASOCKILL:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		SCTP_STAT_INCR(sctps_timoassockill);
1861 		/* Can we free it yet? */
1862 		SCTP_INP_DECR_REF(inp);
1863 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1864 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1865 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1866 		so = SCTP_INP_SO(inp);
1867 		atomic_add_int(&stcb->asoc.refcnt, 1);
1868 		SCTP_TCB_UNLOCK(stcb);
1869 		SCTP_SOCKET_LOCK(so, 1);
1870 		SCTP_TCB_LOCK(stcb);
1871 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1872 #endif
1873 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1874 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1875 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1876 		SCTP_SOCKET_UNLOCK(so, 1);
1877 #endif
1878 		/*
1879 		 * free asoc, always unlocks (or destroy's) so prevent
1880 		 * duplicate unlock or unlock of a free mtx :-0
1881 		 */
1882 		stcb = NULL;
1883 		goto out_no_decr;
1884 	case SCTP_TIMER_TYPE_INPKILL:
1885 		SCTP_STAT_INCR(sctps_timoinpkill);
1886 		if (inp == NULL) {
1887 			break;
1888 		}
1889 		/*
1890 		 * special case, take away our increment since WE are the
1891 		 * killer
1892 		 */
1893 		SCTP_INP_DECR_REF(inp);
1894 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1895 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1896 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1897 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1898 		inp = NULL;
1899 		goto out_no_decr;
1900 	default:
1901 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1902 		    type);
1903 		break;
1904 	}
1905 #ifdef SCTP_AUDITING_ENABLED
1906 	sctp_audit_log(0xF1, (uint8_t)type);
1907 	if (inp)
1908 		sctp_auditing(5, inp, stcb, net);
1909 #endif
1910 	if ((did_output) && stcb) {
1911 		/*
1912 		 * Now we need to clean up the control chunk chain if an
1913 		 * ECNE is on it. It must be marked as UNSENT again so next
1914 		 * call will continue to send it until such time that we get
1915 		 * a CWR, to remove it. It is, however, less likely that we
1916 		 * will find a ecn echo on the chain though.
1917 		 */
1918 		sctp_fix_ecn_echo(&stcb->asoc);
1919 	}
1920 get_out:
1921 	if (stcb) {
1922 		SCTP_TCB_UNLOCK(stcb);
1923 	} else if (inp != NULL) {
1924 		SCTP_INP_WUNLOCK(inp);
1925 	} else {
1926 		SCTP_WQ_ADDR_UNLOCK();
1927 	}
1928 
1929 out_decr:
1930 	if (inp) {
1931 		SCTP_INP_DECR_REF(inp);
1932 	}
1933 out_no_decr:
1934 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1935 	CURVNET_RESTORE();
1936 }
1937 
1938 void
1939 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1940     struct sctp_nets *net)
1941 {
1942 	uint32_t to_ticks;
1943 	struct sctp_timer *tmr;
1944 
1945 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1946 		return;
1947 
1948 	tmr = NULL;
1949 	if (stcb) {
1950 		SCTP_TCB_LOCK_ASSERT(stcb);
1951 	}
1952 	switch (t_type) {
1953 	case SCTP_TIMER_TYPE_ADDR_WQ:
1954 		/* Only 1 tick away :-) */
1955 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1956 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1957 		break;
1958 	case SCTP_TIMER_TYPE_SEND:
1959 		/* Here we use the RTO timer */
1960 		{
1961 			int rto_val;
1962 
1963 			if ((stcb == NULL) || (net == NULL)) {
1964 				return;
1965 			}
1966 			tmr = &net->rxt_timer;
1967 			if (net->RTO == 0) {
1968 				rto_val = stcb->asoc.initial_rto;
1969 			} else {
1970 				rto_val = net->RTO;
1971 			}
1972 			to_ticks = MSEC_TO_TICKS(rto_val);
1973 		}
1974 		break;
1975 	case SCTP_TIMER_TYPE_INIT:
1976 		/*
1977 		 * Here we use the INIT timer default usually about 1
1978 		 * minute.
1979 		 */
1980 		if ((stcb == NULL) || (net == NULL)) {
1981 			return;
1982 		}
1983 		tmr = &net->rxt_timer;
1984 		if (net->RTO == 0) {
1985 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1986 		} else {
1987 			to_ticks = MSEC_TO_TICKS(net->RTO);
1988 		}
1989 		break;
1990 	case SCTP_TIMER_TYPE_RECV:
1991 		/*
1992 		 * Here we use the Delayed-Ack timer value from the inp
1993 		 * ususually about 200ms.
1994 		 */
1995 		if (stcb == NULL) {
1996 			return;
1997 		}
1998 		tmr = &stcb->asoc.dack_timer;
1999 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2000 		break;
2001 	case SCTP_TIMER_TYPE_SHUTDOWN:
2002 		/* Here we use the RTO of the destination. */
2003 		if ((stcb == NULL) || (net == NULL)) {
2004 			return;
2005 		}
2006 		if (net->RTO == 0) {
2007 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2008 		} else {
2009 			to_ticks = MSEC_TO_TICKS(net->RTO);
2010 		}
2011 		tmr = &net->rxt_timer;
2012 		break;
2013 	case SCTP_TIMER_TYPE_HEARTBEAT:
2014 		/*
2015 		 * the net is used here so that we can add in the RTO. Even
2016 		 * though we use a different timer. We also add the HB timer
2017 		 * PLUS a random jitter.
2018 		 */
2019 		if ((stcb == NULL) || (net == NULL)) {
2020 			return;
2021 		} else {
2022 			uint32_t rndval;
2023 			uint32_t jitter;
2024 
2025 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2026 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2027 				return;
2028 			}
2029 			if (net->RTO == 0) {
2030 				to_ticks = stcb->asoc.initial_rto;
2031 			} else {
2032 				to_ticks = net->RTO;
2033 			}
2034 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2035 			jitter = rndval % to_ticks;
2036 			if (jitter >= (to_ticks >> 1)) {
2037 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2038 			} else {
2039 				to_ticks = to_ticks - jitter;
2040 			}
2041 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2042 			    !(net->dest_state & SCTP_ADDR_PF)) {
2043 				to_ticks += net->heart_beat_delay;
2044 			}
2045 			/*
2046 			 * Now we must convert the to_ticks that are now in
2047 			 * ms to ticks.
2048 			 */
2049 			to_ticks = MSEC_TO_TICKS(to_ticks);
2050 			tmr = &net->hb_timer;
2051 		}
2052 		break;
2053 	case SCTP_TIMER_TYPE_COOKIE:
2054 		/*
2055 		 * Here we can use the RTO timer from the network since one
2056 		 * RTT was compelete. If a retran happened then we will be
2057 		 * using the RTO initial value.
2058 		 */
2059 		if ((stcb == NULL) || (net == NULL)) {
2060 			return;
2061 		}
2062 		if (net->RTO == 0) {
2063 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2064 		} else {
2065 			to_ticks = MSEC_TO_TICKS(net->RTO);
2066 		}
2067 		tmr = &net->rxt_timer;
2068 		break;
2069 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2070 		/*
2071 		 * nothing needed but the endpoint here ususually about 60
2072 		 * minutes.
2073 		 */
2074 		tmr = &inp->sctp_ep.signature_change;
2075 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2076 		break;
2077 	case SCTP_TIMER_TYPE_ASOCKILL:
2078 		if (stcb == NULL) {
2079 			return;
2080 		}
2081 		tmr = &stcb->asoc.strreset_timer;
2082 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2083 		break;
2084 	case SCTP_TIMER_TYPE_INPKILL:
2085 		/*
2086 		 * The inp is setup to die. We re-use the signature_chage
2087 		 * timer since that has stopped and we are in the GONE
2088 		 * state.
2089 		 */
2090 		tmr = &inp->sctp_ep.signature_change;
2091 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2092 		break;
2093 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2094 		/*
2095 		 * Here we use the value found in the EP for PMTU ususually
2096 		 * about 10 minutes.
2097 		 */
2098 		if ((stcb == NULL) || (net == NULL)) {
2099 			return;
2100 		}
2101 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2102 			return;
2103 		}
2104 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2105 		tmr = &net->pmtu_timer;
2106 		break;
2107 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2108 		/* Here we use the RTO of the destination */
2109 		if ((stcb == NULL) || (net == NULL)) {
2110 			return;
2111 		}
2112 		if (net->RTO == 0) {
2113 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2114 		} else {
2115 			to_ticks = MSEC_TO_TICKS(net->RTO);
2116 		}
2117 		tmr = &net->rxt_timer;
2118 		break;
2119 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2120 		/*
2121 		 * Here we use the endpoints shutdown guard timer usually
2122 		 * about 3 minutes.
2123 		 */
2124 		if (stcb == NULL) {
2125 			return;
2126 		}
2127 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2128 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2129 		} else {
2130 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2131 		}
2132 		tmr = &stcb->asoc.shut_guard_timer;
2133 		break;
2134 	case SCTP_TIMER_TYPE_STRRESET:
2135 		/*
2136 		 * Here the timer comes from the stcb but its value is from
2137 		 * the net's RTO.
2138 		 */
2139 		if ((stcb == NULL) || (net == NULL)) {
2140 			return;
2141 		}
2142 		if (net->RTO == 0) {
2143 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2144 		} else {
2145 			to_ticks = MSEC_TO_TICKS(net->RTO);
2146 		}
2147 		tmr = &stcb->asoc.strreset_timer;
2148 		break;
2149 	case SCTP_TIMER_TYPE_ASCONF:
2150 		/*
2151 		 * Here the timer comes from the stcb but its value is from
2152 		 * the net's RTO.
2153 		 */
2154 		if ((stcb == NULL) || (net == NULL)) {
2155 			return;
2156 		}
2157 		if (net->RTO == 0) {
2158 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2159 		} else {
2160 			to_ticks = MSEC_TO_TICKS(net->RTO);
2161 		}
2162 		tmr = &stcb->asoc.asconf_timer;
2163 		break;
2164 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2165 		if ((stcb == NULL) || (net != NULL)) {
2166 			return;
2167 		}
2168 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2169 		tmr = &stcb->asoc.delete_prim_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2172 		if (stcb == NULL) {
2173 			return;
2174 		}
2175 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2176 			/*
2177 			 * Really an error since stcb is NOT set to
2178 			 * autoclose
2179 			 */
2180 			return;
2181 		}
2182 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2183 		tmr = &stcb->asoc.autoclose_timer;
2184 		break;
2185 	default:
2186 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2187 		    __func__, t_type);
2188 		return;
2189 		break;
2190 	}
2191 	if ((to_ticks <= 0) || (tmr == NULL)) {
2192 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2193 		    __func__, t_type, to_ticks, (void *)tmr);
2194 		return;
2195 	}
2196 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2197 		/*
2198 		 * we do NOT allow you to have it already running. if it is
2199 		 * we leave the current one up unchanged
2200 		 */
2201 		return;
2202 	}
2203 	/* At this point we can proceed */
2204 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2205 		stcb->asoc.num_send_timers_up++;
2206 	}
2207 	tmr->stopped_from = 0;
2208 	tmr->type = t_type;
2209 	tmr->ep = (void *)inp;
2210 	tmr->tcb = (void *)stcb;
2211 	tmr->net = (void *)net;
2212 	tmr->self = (void *)tmr;
2213 	tmr->vnet = (void *)curvnet;
2214 	tmr->ticks = sctp_get_tick_count();
2215 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2216 	return;
2217 }
2218 
2219 void
2220 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2221     struct sctp_nets *net, uint32_t from)
2222 {
2223 	struct sctp_timer *tmr;
2224 
2225 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2226 	    (inp == NULL))
2227 		return;
2228 
2229 	tmr = NULL;
2230 	if (stcb) {
2231 		SCTP_TCB_LOCK_ASSERT(stcb);
2232 	}
2233 	switch (t_type) {
2234 	case SCTP_TIMER_TYPE_ADDR_WQ:
2235 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2236 		break;
2237 	case SCTP_TIMER_TYPE_SEND:
2238 		if ((stcb == NULL) || (net == NULL)) {
2239 			return;
2240 		}
2241 		tmr = &net->rxt_timer;
2242 		break;
2243 	case SCTP_TIMER_TYPE_INIT:
2244 		if ((stcb == NULL) || (net == NULL)) {
2245 			return;
2246 		}
2247 		tmr = &net->rxt_timer;
2248 		break;
2249 	case SCTP_TIMER_TYPE_RECV:
2250 		if (stcb == NULL) {
2251 			return;
2252 		}
2253 		tmr = &stcb->asoc.dack_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_SHUTDOWN:
2256 		if ((stcb == NULL) || (net == NULL)) {
2257 			return;
2258 		}
2259 		tmr = &net->rxt_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_HEARTBEAT:
2262 		if ((stcb == NULL) || (net == NULL)) {
2263 			return;
2264 		}
2265 		tmr = &net->hb_timer;
2266 		break;
2267 	case SCTP_TIMER_TYPE_COOKIE:
2268 		if ((stcb == NULL) || (net == NULL)) {
2269 			return;
2270 		}
2271 		tmr = &net->rxt_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2274 		/* nothing needed but the endpoint here */
2275 		tmr = &inp->sctp_ep.signature_change;
2276 		/*
2277 		 * We re-use the newcookie timer for the INP kill timer. We
2278 		 * must assure that we do not kill it by accident.
2279 		 */
2280 		break;
2281 	case SCTP_TIMER_TYPE_ASOCKILL:
2282 		/*
2283 		 * Stop the asoc kill timer.
2284 		 */
2285 		if (stcb == NULL) {
2286 			return;
2287 		}
2288 		tmr = &stcb->asoc.strreset_timer;
2289 		break;
2290 
2291 	case SCTP_TIMER_TYPE_INPKILL:
2292 		/*
2293 		 * The inp is setup to die. We re-use the signature_chage
2294 		 * timer since that has stopped and we are in the GONE
2295 		 * state.
2296 		 */
2297 		tmr = &inp->sctp_ep.signature_change;
2298 		break;
2299 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2300 		if ((stcb == NULL) || (net == NULL)) {
2301 			return;
2302 		}
2303 		tmr = &net->pmtu_timer;
2304 		break;
2305 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2306 		if ((stcb == NULL) || (net == NULL)) {
2307 			return;
2308 		}
2309 		tmr = &net->rxt_timer;
2310 		break;
2311 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2312 		if (stcb == NULL) {
2313 			return;
2314 		}
2315 		tmr = &stcb->asoc.shut_guard_timer;
2316 		break;
2317 	case SCTP_TIMER_TYPE_STRRESET:
2318 		if (stcb == NULL) {
2319 			return;
2320 		}
2321 		tmr = &stcb->asoc.strreset_timer;
2322 		break;
2323 	case SCTP_TIMER_TYPE_ASCONF:
2324 		if (stcb == NULL) {
2325 			return;
2326 		}
2327 		tmr = &stcb->asoc.asconf_timer;
2328 		break;
2329 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2330 		if (stcb == NULL) {
2331 			return;
2332 		}
2333 		tmr = &stcb->asoc.delete_prim_timer;
2334 		break;
2335 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2336 		if (stcb == NULL) {
2337 			return;
2338 		}
2339 		tmr = &stcb->asoc.autoclose_timer;
2340 		break;
2341 	default:
2342 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2343 		    __func__, t_type);
2344 		break;
2345 	}
2346 	if (tmr == NULL) {
2347 		return;
2348 	}
2349 	if ((tmr->type != t_type) && tmr->type) {
2350 		/*
2351 		 * Ok we have a timer that is under joint use. Cookie timer
2352 		 * per chance with the SEND timer. We therefore are NOT
2353 		 * running the timer that the caller wants stopped.  So just
2354 		 * return.
2355 		 */
2356 		return;
2357 	}
2358 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2359 		stcb->asoc.num_send_timers_up--;
2360 		if (stcb->asoc.num_send_timers_up < 0) {
2361 			stcb->asoc.num_send_timers_up = 0;
2362 		}
2363 	}
2364 	tmr->self = NULL;
2365 	tmr->stopped_from = from;
2366 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2367 	return;
2368 }
2369 
2370 uint32_t
2371 sctp_calculate_len(struct mbuf *m)
2372 {
2373 	uint32_t tlen = 0;
2374 	struct mbuf *at;
2375 
2376 	at = m;
2377 	while (at) {
2378 		tlen += SCTP_BUF_LEN(at);
2379 		at = SCTP_BUF_NEXT(at);
2380 	}
2381 	return (tlen);
2382 }
2383 
2384 void
2385 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2386     struct sctp_association *asoc, uint32_t mtu)
2387 {
2388 	/*
2389 	 * Reset the P-MTU size on this association, this involves changing
2390 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2391 	 * allow the DF flag to be cleared.
2392 	 */
2393 	struct sctp_tmit_chunk *chk;
2394 	unsigned int eff_mtu, ovh;
2395 
2396 	asoc->smallest_mtu = mtu;
2397 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2398 		ovh = SCTP_MIN_OVERHEAD;
2399 	} else {
2400 		ovh = SCTP_MIN_V4_OVERHEAD;
2401 	}
2402 	eff_mtu = mtu - ovh;
2403 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2404 		if (chk->send_size > eff_mtu) {
2405 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2406 		}
2407 	}
2408 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2409 		if (chk->send_size > eff_mtu) {
2410 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2411 		}
2412 	}
2413 }
2414 
2415 
2416 /*
2417  * given an association and starting time of the current RTT period return
2418  * RTO in number of msecs net should point to the current network
2419  */
2420 
2421 uint32_t
2422 sctp_calculate_rto(struct sctp_tcb *stcb,
2423     struct sctp_association *asoc,
2424     struct sctp_nets *net,
2425     struct timeval *told,
2426     int safe, int rtt_from_sack)
2427 {
2428 	/*-
2429 	 * given an association and the starting time of the current RTT
2430 	 * period (in value1/value2) return RTO in number of msecs.
2431 	 */
2432 	int32_t rtt;		/* RTT in ms */
2433 	uint32_t new_rto;
2434 	int first_measure = 0;
2435 	struct timeval now, then, *old;
2436 
2437 	/* Copy it out for sparc64 */
2438 	if (safe == sctp_align_unsafe_makecopy) {
2439 		old = &then;
2440 		memcpy(&then, told, sizeof(struct timeval));
2441 	} else if (safe == sctp_align_safe_nocopy) {
2442 		old = told;
2443 	} else {
2444 		/* error */
2445 		SCTP_PRINTF("Huh, bad rto calc call\n");
2446 		return (0);
2447 	}
2448 	/************************/
2449 	/* 1. calculate new RTT */
2450 	/************************/
2451 	/* get the current time */
2452 	if (stcb->asoc.use_precise_time) {
2453 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2454 	} else {
2455 		(void)SCTP_GETTIME_TIMEVAL(&now);
2456 	}
2457 	timevalsub(&now, old);
2458 	/* store the current RTT in us */
2459 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2460 	        (uint64_t)now.tv_usec;
2461 
2462 	/* compute rtt in ms */
2463 	rtt = (int32_t)(net->rtt / 1000);
2464 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2465 		/*
2466 		 * Tell the CC module that a new update has just occurred
2467 		 * from a sack
2468 		 */
2469 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2470 	}
2471 	/*
2472 	 * Do we need to determine the lan? We do this only on sacks i.e.
2473 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2474 	 */
2475 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2476 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2477 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2478 			net->lan_type = SCTP_LAN_INTERNET;
2479 		} else {
2480 			net->lan_type = SCTP_LAN_LOCAL;
2481 		}
2482 	}
2483 	/***************************/
2484 	/* 2. update RTTVAR & SRTT */
2485 	/***************************/
2486 	/*-
2487 	 * Compute the scaled average lastsa and the
2488 	 * scaled variance lastsv as described in van Jacobson
2489 	 * Paper "Congestion Avoidance and Control", Annex A.
2490 	 *
2491 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2492 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2493 	 */
2494 	if (net->RTO_measured) {
2495 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2496 		net->lastsa += rtt;
2497 		if (rtt < 0) {
2498 			rtt = -rtt;
2499 		}
2500 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2501 		net->lastsv += rtt;
2502 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2503 			rto_logging(net, SCTP_LOG_RTTVAR);
2504 		}
2505 	} else {
2506 		/* First RTO measurment */
2507 		net->RTO_measured = 1;
2508 		first_measure = 1;
2509 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2510 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2511 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2512 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2513 		}
2514 	}
2515 	if (net->lastsv == 0) {
2516 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2517 	}
2518 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2519 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2520 	    (stcb->asoc.sat_network_lockout == 0)) {
2521 		stcb->asoc.sat_network = 1;
2522 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2523 		stcb->asoc.sat_network = 0;
2524 		stcb->asoc.sat_network_lockout = 1;
2525 	}
2526 	/* bound it, per C6/C7 in Section 5.3.1 */
2527 	if (new_rto < stcb->asoc.minrto) {
2528 		new_rto = stcb->asoc.minrto;
2529 	}
2530 	if (new_rto > stcb->asoc.maxrto) {
2531 		new_rto = stcb->asoc.maxrto;
2532 	}
2533 	/* we are now returning the RTO */
2534 	return (new_rto);
2535 }
2536 
2537 /*
2538  * return a pointer to a contiguous piece of data from the given mbuf chain
2539  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2540  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2541  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2542  */
2543 caddr_t
2544 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2545 {
2546 	uint32_t count;
2547 	uint8_t *ptr;
2548 
2549 	ptr = in_ptr;
2550 	if ((off < 0) || (len <= 0))
2551 		return (NULL);
2552 
2553 	/* find the desired start location */
2554 	while ((m != NULL) && (off > 0)) {
2555 		if (off < SCTP_BUF_LEN(m))
2556 			break;
2557 		off -= SCTP_BUF_LEN(m);
2558 		m = SCTP_BUF_NEXT(m);
2559 	}
2560 	if (m == NULL)
2561 		return (NULL);
2562 
2563 	/* is the current mbuf large enough (eg. contiguous)? */
2564 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2565 		return (mtod(m, caddr_t)+off);
2566 	} else {
2567 		/* else, it spans more than one mbuf, so save a temp copy... */
2568 		while ((m != NULL) && (len > 0)) {
2569 			count = min(SCTP_BUF_LEN(m) - off, len);
2570 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2571 			len -= count;
2572 			ptr += count;
2573 			off = 0;
2574 			m = SCTP_BUF_NEXT(m);
2575 		}
2576 		if ((m == NULL) && (len > 0))
2577 			return (NULL);
2578 		else
2579 			return ((caddr_t)in_ptr);
2580 	}
2581 }
2582 
2583 
2584 
2585 struct sctp_paramhdr *
2586 sctp_get_next_param(struct mbuf *m,
2587     int offset,
2588     struct sctp_paramhdr *pull,
2589     int pull_limit)
2590 {
2591 	/* This just provides a typed signature to Peter's Pull routine */
2592 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2593 	    (uint8_t *)pull));
2594 }
2595 
2596 
2597 struct mbuf *
2598 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2599 {
2600 	struct mbuf *m_last;
2601 	caddr_t dp;
2602 
2603 	if (padlen > 3) {
2604 		return (NULL);
2605 	}
2606 	if (padlen <= M_TRAILINGSPACE(m)) {
2607 		/*
2608 		 * The easy way. We hope the majority of the time we hit
2609 		 * here :)
2610 		 */
2611 		m_last = m;
2612 	} else {
2613 		/* Hard way we must grow the mbuf chain */
2614 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2615 		if (m_last == NULL) {
2616 			return (NULL);
2617 		}
2618 		SCTP_BUF_LEN(m_last) = 0;
2619 		SCTP_BUF_NEXT(m_last) = NULL;
2620 		SCTP_BUF_NEXT(m) = m_last;
2621 	}
2622 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2623 	SCTP_BUF_LEN(m_last) += padlen;
2624 	memset(dp, 0, padlen);
2625 	return (m_last);
2626 }
2627 
2628 struct mbuf *
2629 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2630 {
2631 	/* find the last mbuf in chain and pad it */
2632 	struct mbuf *m_at;
2633 
2634 	if (last_mbuf != NULL) {
2635 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2636 	} else {
2637 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2638 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2639 				return (sctp_add_pad_tombuf(m_at, padval));
2640 			}
2641 		}
2642 	}
2643 	return (NULL);
2644 }
2645 
2646 static void
2647 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2648     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2649 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2650     SCTP_UNUSED
2651 #endif
2652 )
2653 {
2654 	struct mbuf *m_notify;
2655 	struct sctp_assoc_change *sac;
2656 	struct sctp_queued_to_read *control;
2657 	unsigned int notif_len;
2658 	uint16_t abort_len;
2659 	unsigned int i;
2660 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2661 	struct socket *so;
2662 #endif
2663 
2664 	if (stcb == NULL) {
2665 		return;
2666 	}
2667 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2668 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2669 		if (abort != NULL) {
2670 			abort_len = ntohs(abort->ch.chunk_length);
2671 		} else {
2672 			abort_len = 0;
2673 		}
2674 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2675 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2676 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2677 			notif_len += abort_len;
2678 		}
2679 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2680 		if (m_notify == NULL) {
2681 			/* Retry with smaller value. */
2682 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2683 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2684 			if (m_notify == NULL) {
2685 				goto set_error;
2686 			}
2687 		}
2688 		SCTP_BUF_NEXT(m_notify) = NULL;
2689 		sac = mtod(m_notify, struct sctp_assoc_change *);
2690 		memset(sac, 0, notif_len);
2691 		sac->sac_type = SCTP_ASSOC_CHANGE;
2692 		sac->sac_flags = 0;
2693 		sac->sac_length = sizeof(struct sctp_assoc_change);
2694 		sac->sac_state = state;
2695 		sac->sac_error = error;
2696 		/* XXX verify these stream counts */
2697 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2698 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2699 		sac->sac_assoc_id = sctp_get_associd(stcb);
2700 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2701 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2702 				i = 0;
2703 				if (stcb->asoc.prsctp_supported == 1) {
2704 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2705 				}
2706 				if (stcb->asoc.auth_supported == 1) {
2707 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2708 				}
2709 				if (stcb->asoc.asconf_supported == 1) {
2710 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2711 				}
2712 				if (stcb->asoc.idata_supported == 1) {
2713 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2714 				}
2715 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2716 				if (stcb->asoc.reconfig_supported == 1) {
2717 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2718 				}
2719 				sac->sac_length += i;
2720 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2721 				memcpy(sac->sac_info, abort, abort_len);
2722 				sac->sac_length += abort_len;
2723 			}
2724 		}
2725 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2726 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2727 		    0, 0, stcb->asoc.context, 0, 0, 0,
2728 		    m_notify);
2729 		if (control != NULL) {
2730 			control->length = SCTP_BUF_LEN(m_notify);
2731 			control->spec_flags = M_NOTIFICATION;
2732 			/* not that we need this */
2733 			control->tail_mbuf = m_notify;
2734 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2735 			    control,
2736 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2737 			    so_locked);
2738 		} else {
2739 			sctp_m_freem(m_notify);
2740 		}
2741 	}
2742 	/*
2743 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2744 	 * comes in.
2745 	 */
2746 set_error:
2747 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2748 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2749 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2750 		SOCK_LOCK(stcb->sctp_socket);
2751 		if (from_peer) {
2752 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2753 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2754 				stcb->sctp_socket->so_error = ECONNREFUSED;
2755 			} else {
2756 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2757 				stcb->sctp_socket->so_error = ECONNRESET;
2758 			}
2759 		} else {
2760 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2761 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2762 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2763 				stcb->sctp_socket->so_error = ETIMEDOUT;
2764 			} else {
2765 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2766 				stcb->sctp_socket->so_error = ECONNABORTED;
2767 			}
2768 		}
2769 		SOCK_UNLOCK(stcb->sctp_socket);
2770 	}
2771 	/* Wake ANY sleepers */
2772 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2773 	so = SCTP_INP_SO(stcb->sctp_ep);
2774 	if (!so_locked) {
2775 		atomic_add_int(&stcb->asoc.refcnt, 1);
2776 		SCTP_TCB_UNLOCK(stcb);
2777 		SCTP_SOCKET_LOCK(so, 1);
2778 		SCTP_TCB_LOCK(stcb);
2779 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2780 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2781 			SCTP_SOCKET_UNLOCK(so, 1);
2782 			return;
2783 		}
2784 	}
2785 #endif
2786 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2787 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2788 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2789 		socantrcvmore(stcb->sctp_socket);
2790 	}
2791 	sorwakeup(stcb->sctp_socket);
2792 	sowwakeup(stcb->sctp_socket);
2793 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2794 	if (!so_locked) {
2795 		SCTP_SOCKET_UNLOCK(so, 1);
2796 	}
2797 #endif
2798 }
2799 
2800 static void
2801 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2802     struct sockaddr *sa, uint32_t error, int so_locked
2803 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2804     SCTP_UNUSED
2805 #endif
2806 )
2807 {
2808 	struct mbuf *m_notify;
2809 	struct sctp_paddr_change *spc;
2810 	struct sctp_queued_to_read *control;
2811 
2812 	if ((stcb == NULL) ||
2813 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2814 		/* event not enabled */
2815 		return;
2816 	}
2817 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2818 	if (m_notify == NULL)
2819 		return;
2820 	SCTP_BUF_LEN(m_notify) = 0;
2821 	spc = mtod(m_notify, struct sctp_paddr_change *);
2822 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2823 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2824 	spc->spc_flags = 0;
2825 	spc->spc_length = sizeof(struct sctp_paddr_change);
2826 	switch (sa->sa_family) {
2827 #ifdef INET
2828 	case AF_INET:
2829 #ifdef INET6
2830 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2831 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2832 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2833 		} else {
2834 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2835 		}
2836 #else
2837 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2838 #endif
2839 		break;
2840 #endif
2841 #ifdef INET6
2842 	case AF_INET6:
2843 		{
2844 			struct sockaddr_in6 *sin6;
2845 
2846 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2847 
2848 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2849 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2850 				if (sin6->sin6_scope_id == 0) {
2851 					/* recover scope_id for user */
2852 					(void)sa6_recoverscope(sin6);
2853 				} else {
2854 					/* clear embedded scope_id for user */
2855 					in6_clearscope(&sin6->sin6_addr);
2856 				}
2857 			}
2858 			break;
2859 		}
2860 #endif
2861 	default:
2862 		/* TSNH */
2863 		break;
2864 	}
2865 	spc->spc_state = state;
2866 	spc->spc_error = error;
2867 	spc->spc_assoc_id = sctp_get_associd(stcb);
2868 
2869 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2870 	SCTP_BUF_NEXT(m_notify) = NULL;
2871 
2872 	/* append to socket */
2873 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2874 	    0, 0, stcb->asoc.context, 0, 0, 0,
2875 	    m_notify);
2876 	if (control == NULL) {
2877 		/* no memory */
2878 		sctp_m_freem(m_notify);
2879 		return;
2880 	}
2881 	control->length = SCTP_BUF_LEN(m_notify);
2882 	control->spec_flags = M_NOTIFICATION;
2883 	/* not that we need this */
2884 	control->tail_mbuf = m_notify;
2885 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2886 	    control,
2887 	    &stcb->sctp_socket->so_rcv, 1,
2888 	    SCTP_READ_LOCK_NOT_HELD,
2889 	    so_locked);
2890 }
2891 
2892 
2893 static void
2894 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2895     struct sctp_tmit_chunk *chk, int so_locked
2896 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2897     SCTP_UNUSED
2898 #endif
2899 )
2900 {
2901 	struct mbuf *m_notify;
2902 	struct sctp_send_failed *ssf;
2903 	struct sctp_send_failed_event *ssfe;
2904 	struct sctp_queued_to_read *control;
2905 	struct sctp_chunkhdr *chkhdr;
2906 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2907 
2908 	if ((stcb == NULL) ||
2909 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2910 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2911 		/* event not enabled */
2912 		return;
2913 	}
2914 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2915 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2916 	} else {
2917 		notifhdr_len = sizeof(struct sctp_send_failed);
2918 	}
2919 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2920 	if (m_notify == NULL)
2921 		/* no space left */
2922 		return;
2923 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2924 	if (stcb->asoc.idata_supported) {
2925 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2926 	} else {
2927 		chkhdr_len = sizeof(struct sctp_data_chunk);
2928 	}
2929 	/* Use some defaults in case we can't access the chunk header */
2930 	if (chk->send_size >= chkhdr_len) {
2931 		payload_len = chk->send_size - chkhdr_len;
2932 	} else {
2933 		payload_len = 0;
2934 	}
2935 	padding_len = 0;
2936 	if (chk->data != NULL) {
2937 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2938 		if (chkhdr != NULL) {
2939 			chk_len = ntohs(chkhdr->chunk_length);
2940 			if ((chk_len >= chkhdr_len) &&
2941 			    (chk->send_size >= chk_len) &&
2942 			    (chk->send_size - chk_len < 4)) {
2943 				padding_len = chk->send_size - chk_len;
2944 				payload_len = chk->send_size - chkhdr_len - padding_len;
2945 			}
2946 		}
2947 	}
2948 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2949 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2950 		memset(ssfe, 0, notifhdr_len);
2951 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2952 		if (sent) {
2953 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2954 		} else {
2955 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2956 		}
2957 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2958 		ssfe->ssfe_error = error;
2959 		/* not exactly what the user sent in, but should be close :) */
2960 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2961 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2962 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2963 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2964 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2965 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2966 	} else {
2967 		ssf = mtod(m_notify, struct sctp_send_failed *);
2968 		memset(ssf, 0, notifhdr_len);
2969 		ssf->ssf_type = SCTP_SEND_FAILED;
2970 		if (sent) {
2971 			ssf->ssf_flags = SCTP_DATA_SENT;
2972 		} else {
2973 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2974 		}
2975 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2976 		ssf->ssf_error = error;
2977 		/* not exactly what the user sent in, but should be close :) */
2978 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2979 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
2980 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2981 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
2982 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2983 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2984 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2985 	}
2986 	if (chk->data != NULL) {
2987 		/* Trim off the sctp chunk header (it should be there) */
2988 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
2989 			m_adj(chk->data, chkhdr_len);
2990 			m_adj(chk->data, -padding_len);
2991 			sctp_mbuf_crush(chk->data);
2992 			chk->send_size -= (chkhdr_len + padding_len);
2993 		}
2994 	}
2995 	SCTP_BUF_NEXT(m_notify) = chk->data;
2996 	/* Steal off the mbuf */
2997 	chk->data = NULL;
2998 	/*
2999 	 * For this case, we check the actual socket buffer, since the assoc
3000 	 * is going away we don't want to overfill the socket buffer for a
3001 	 * non-reader
3002 	 */
3003 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3004 		sctp_m_freem(m_notify);
3005 		return;
3006 	}
3007 	/* append to socket */
3008 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3009 	    0, 0, stcb->asoc.context, 0, 0, 0,
3010 	    m_notify);
3011 	if (control == NULL) {
3012 		/* no memory */
3013 		sctp_m_freem(m_notify);
3014 		return;
3015 	}
3016 	control->length = SCTP_BUF_LEN(m_notify);
3017 	control->spec_flags = M_NOTIFICATION;
3018 	/* not that we need this */
3019 	control->tail_mbuf = m_notify;
3020 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3021 	    control,
3022 	    &stcb->sctp_socket->so_rcv, 1,
3023 	    SCTP_READ_LOCK_NOT_HELD,
3024 	    so_locked);
3025 }
3026 
3027 
3028 static void
3029 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3030     struct sctp_stream_queue_pending *sp, int so_locked
3031 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3032     SCTP_UNUSED
3033 #endif
3034 )
3035 {
3036 	struct mbuf *m_notify;
3037 	struct sctp_send_failed *ssf;
3038 	struct sctp_send_failed_event *ssfe;
3039 	struct sctp_queued_to_read *control;
3040 	int notifhdr_len;
3041 
3042 	if ((stcb == NULL) ||
3043 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3044 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3045 		/* event not enabled */
3046 		return;
3047 	}
3048 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3049 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3050 	} else {
3051 		notifhdr_len = sizeof(struct sctp_send_failed);
3052 	}
3053 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3054 	if (m_notify == NULL) {
3055 		/* no space left */
3056 		return;
3057 	}
3058 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3059 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3060 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3061 		memset(ssfe, 0, notifhdr_len);
3062 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3063 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3064 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3065 		ssfe->ssfe_error = error;
3066 		/* not exactly what the user sent in, but should be close :) */
3067 		ssfe->ssfe_info.snd_sid = sp->sid;
3068 		if (sp->some_taken) {
3069 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3070 		} else {
3071 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3072 		}
3073 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3074 		ssfe->ssfe_info.snd_context = sp->context;
3075 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3076 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3077 	} else {
3078 		ssf = mtod(m_notify, struct sctp_send_failed *);
3079 		memset(ssf, 0, notifhdr_len);
3080 		ssf->ssf_type = SCTP_SEND_FAILED;
3081 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3082 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3083 		ssf->ssf_error = error;
3084 		/* not exactly what the user sent in, but should be close :) */
3085 		ssf->ssf_info.sinfo_stream = sp->sid;
3086 		ssf->ssf_info.sinfo_ssn = 0;
3087 		if (sp->some_taken) {
3088 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3089 		} else {
3090 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3091 		}
3092 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3093 		ssf->ssf_info.sinfo_context = sp->context;
3094 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3095 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3096 	}
3097 	SCTP_BUF_NEXT(m_notify) = sp->data;
3098 
3099 	/* Steal off the mbuf */
3100 	sp->data = NULL;
3101 	/*
3102 	 * For this case, we check the actual socket buffer, since the assoc
3103 	 * is going away we don't want to overfill the socket buffer for a
3104 	 * non-reader
3105 	 */
3106 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3107 		sctp_m_freem(m_notify);
3108 		return;
3109 	}
3110 	/* append to socket */
3111 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3112 	    0, 0, stcb->asoc.context, 0, 0, 0,
3113 	    m_notify);
3114 	if (control == NULL) {
3115 		/* no memory */
3116 		sctp_m_freem(m_notify);
3117 		return;
3118 	}
3119 	control->length = SCTP_BUF_LEN(m_notify);
3120 	control->spec_flags = M_NOTIFICATION;
3121 	/* not that we need this */
3122 	control->tail_mbuf = m_notify;
3123 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3124 	    control,
3125 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3126 }
3127 
3128 
3129 
3130 static void
3131 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3132 {
3133 	struct mbuf *m_notify;
3134 	struct sctp_adaptation_event *sai;
3135 	struct sctp_queued_to_read *control;
3136 
3137 	if ((stcb == NULL) ||
3138 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3139 		/* event not enabled */
3140 		return;
3141 	}
3142 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3143 	if (m_notify == NULL)
3144 		/* no space left */
3145 		return;
3146 	SCTP_BUF_LEN(m_notify) = 0;
3147 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3148 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3149 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3150 	sai->sai_flags = 0;
3151 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3152 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3153 	sai->sai_assoc_id = sctp_get_associd(stcb);
3154 
3155 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3156 	SCTP_BUF_NEXT(m_notify) = NULL;
3157 
3158 	/* append to socket */
3159 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3160 	    0, 0, stcb->asoc.context, 0, 0, 0,
3161 	    m_notify);
3162 	if (control == NULL) {
3163 		/* no memory */
3164 		sctp_m_freem(m_notify);
3165 		return;
3166 	}
3167 	control->length = SCTP_BUF_LEN(m_notify);
3168 	control->spec_flags = M_NOTIFICATION;
3169 	/* not that we need this */
3170 	control->tail_mbuf = m_notify;
3171 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3172 	    control,
3173 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3174 }
3175 
3176 /* This always must be called with the read-queue LOCKED in the INP */
3177 static void
3178 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3179     uint32_t val, int so_locked
3180 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3181     SCTP_UNUSED
3182 #endif
3183 )
3184 {
3185 	struct mbuf *m_notify;
3186 	struct sctp_pdapi_event *pdapi;
3187 	struct sctp_queued_to_read *control;
3188 	struct sockbuf *sb;
3189 
3190 	if ((stcb == NULL) ||
3191 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3192 		/* event not enabled */
3193 		return;
3194 	}
3195 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3196 		return;
3197 	}
3198 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3199 	if (m_notify == NULL)
3200 		/* no space left */
3201 		return;
3202 	SCTP_BUF_LEN(m_notify) = 0;
3203 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3204 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3205 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3206 	pdapi->pdapi_flags = 0;
3207 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3208 	pdapi->pdapi_indication = error;
3209 	pdapi->pdapi_stream = (val >> 16);
3210 	pdapi->pdapi_seq = (val & 0x0000ffff);
3211 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3212 
3213 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3214 	SCTP_BUF_NEXT(m_notify) = NULL;
3215 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3216 	    0, 0, stcb->asoc.context, 0, 0, 0,
3217 	    m_notify);
3218 	if (control == NULL) {
3219 		/* no memory */
3220 		sctp_m_freem(m_notify);
3221 		return;
3222 	}
3223 	control->length = SCTP_BUF_LEN(m_notify);
3224 	control->spec_flags = M_NOTIFICATION;
3225 	/* not that we need this */
3226 	control->tail_mbuf = m_notify;
3227 	sb = &stcb->sctp_socket->so_rcv;
3228 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3229 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3230 	}
3231 	sctp_sballoc(stcb, sb, m_notify);
3232 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3233 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3234 	}
3235 	control->end_added = 1;
3236 	if (stcb->asoc.control_pdapi)
3237 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3238 	else {
3239 		/* we really should not see this case */
3240 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3241 	}
3242 	if (stcb->sctp_ep && stcb->sctp_socket) {
3243 		/* This should always be the case */
3244 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3245 		struct socket *so;
3246 
3247 		so = SCTP_INP_SO(stcb->sctp_ep);
3248 		if (!so_locked) {
3249 			atomic_add_int(&stcb->asoc.refcnt, 1);
3250 			SCTP_TCB_UNLOCK(stcb);
3251 			SCTP_SOCKET_LOCK(so, 1);
3252 			SCTP_TCB_LOCK(stcb);
3253 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3254 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3255 				SCTP_SOCKET_UNLOCK(so, 1);
3256 				return;
3257 			}
3258 		}
3259 #endif
3260 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3261 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3262 		if (!so_locked) {
3263 			SCTP_SOCKET_UNLOCK(so, 1);
3264 		}
3265 #endif
3266 	}
3267 }
3268 
3269 static void
3270 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3271 {
3272 	struct mbuf *m_notify;
3273 	struct sctp_shutdown_event *sse;
3274 	struct sctp_queued_to_read *control;
3275 
3276 	/*
3277 	 * For TCP model AND UDP connected sockets we will send an error up
3278 	 * when an SHUTDOWN completes
3279 	 */
3280 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3281 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3282 		/* mark socket closed for read/write and wakeup! */
3283 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3284 		struct socket *so;
3285 
3286 		so = SCTP_INP_SO(stcb->sctp_ep);
3287 		atomic_add_int(&stcb->asoc.refcnt, 1);
3288 		SCTP_TCB_UNLOCK(stcb);
3289 		SCTP_SOCKET_LOCK(so, 1);
3290 		SCTP_TCB_LOCK(stcb);
3291 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3292 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3293 			SCTP_SOCKET_UNLOCK(so, 1);
3294 			return;
3295 		}
3296 #endif
3297 		socantsendmore(stcb->sctp_socket);
3298 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3299 		SCTP_SOCKET_UNLOCK(so, 1);
3300 #endif
3301 	}
3302 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3303 		/* event not enabled */
3304 		return;
3305 	}
3306 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3307 	if (m_notify == NULL)
3308 		/* no space left */
3309 		return;
3310 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3311 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3312 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3313 	sse->sse_flags = 0;
3314 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3315 	sse->sse_assoc_id = sctp_get_associd(stcb);
3316 
3317 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3318 	SCTP_BUF_NEXT(m_notify) = NULL;
3319 
3320 	/* append to socket */
3321 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3322 	    0, 0, stcb->asoc.context, 0, 0, 0,
3323 	    m_notify);
3324 	if (control == NULL) {
3325 		/* no memory */
3326 		sctp_m_freem(m_notify);
3327 		return;
3328 	}
3329 	control->length = SCTP_BUF_LEN(m_notify);
3330 	control->spec_flags = M_NOTIFICATION;
3331 	/* not that we need this */
3332 	control->tail_mbuf = m_notify;
3333 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3334 	    control,
3335 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3336 }
3337 
3338 static void
3339 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3340     int so_locked
3341 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3342     SCTP_UNUSED
3343 #endif
3344 )
3345 {
3346 	struct mbuf *m_notify;
3347 	struct sctp_sender_dry_event *event;
3348 	struct sctp_queued_to_read *control;
3349 
3350 	if ((stcb == NULL) ||
3351 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3352 		/* event not enabled */
3353 		return;
3354 	}
3355 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3356 	if (m_notify == NULL) {
3357 		/* no space left */
3358 		return;
3359 	}
3360 	SCTP_BUF_LEN(m_notify) = 0;
3361 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3362 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3363 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3364 	event->sender_dry_flags = 0;
3365 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3366 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3367 
3368 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3369 	SCTP_BUF_NEXT(m_notify) = NULL;
3370 
3371 	/* append to socket */
3372 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3373 	    0, 0, stcb->asoc.context, 0, 0, 0,
3374 	    m_notify);
3375 	if (control == NULL) {
3376 		/* no memory */
3377 		sctp_m_freem(m_notify);
3378 		return;
3379 	}
3380 	control->length = SCTP_BUF_LEN(m_notify);
3381 	control->spec_flags = M_NOTIFICATION;
3382 	/* not that we need this */
3383 	control->tail_mbuf = m_notify;
3384 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3385 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3386 }
3387 
3388 
3389 void
3390 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3391 {
3392 	struct mbuf *m_notify;
3393 	struct sctp_queued_to_read *control;
3394 	struct sctp_stream_change_event *stradd;
3395 
3396 	if ((stcb == NULL) ||
3397 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3398 		/* event not enabled */
3399 		return;
3400 	}
3401 	if ((stcb->asoc.peer_req_out) && flag) {
3402 		/* Peer made the request, don't tell the local user */
3403 		stcb->asoc.peer_req_out = 0;
3404 		return;
3405 	}
3406 	stcb->asoc.peer_req_out = 0;
3407 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3408 	if (m_notify == NULL)
3409 		/* no space left */
3410 		return;
3411 	SCTP_BUF_LEN(m_notify) = 0;
3412 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3413 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3414 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3415 	stradd->strchange_flags = flag;
3416 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3417 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3418 	stradd->strchange_instrms = numberin;
3419 	stradd->strchange_outstrms = numberout;
3420 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3421 	SCTP_BUF_NEXT(m_notify) = NULL;
3422 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3423 		/* no space */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	/* append to socket */
3428 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3429 	    0, 0, stcb->asoc.context, 0, 0, 0,
3430 	    m_notify);
3431 	if (control == NULL) {
3432 		/* no memory */
3433 		sctp_m_freem(m_notify);
3434 		return;
3435 	}
3436 	control->length = SCTP_BUF_LEN(m_notify);
3437 	control->spec_flags = M_NOTIFICATION;
3438 	/* not that we need this */
3439 	control->tail_mbuf = m_notify;
3440 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3441 	    control,
3442 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3443 }
3444 
3445 void
3446 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3447 {
3448 	struct mbuf *m_notify;
3449 	struct sctp_queued_to_read *control;
3450 	struct sctp_assoc_reset_event *strasoc;
3451 
3452 	if ((stcb == NULL) ||
3453 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3454 		/* event not enabled */
3455 		return;
3456 	}
3457 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3458 	if (m_notify == NULL)
3459 		/* no space left */
3460 		return;
3461 	SCTP_BUF_LEN(m_notify) = 0;
3462 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3463 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3464 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3465 	strasoc->assocreset_flags = flag;
3466 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3467 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3468 	strasoc->assocreset_local_tsn = sending_tsn;
3469 	strasoc->assocreset_remote_tsn = recv_tsn;
3470 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3471 	SCTP_BUF_NEXT(m_notify) = NULL;
3472 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3473 		/* no space */
3474 		sctp_m_freem(m_notify);
3475 		return;
3476 	}
3477 	/* append to socket */
3478 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3479 	    0, 0, stcb->asoc.context, 0, 0, 0,
3480 	    m_notify);
3481 	if (control == NULL) {
3482 		/* no memory */
3483 		sctp_m_freem(m_notify);
3484 		return;
3485 	}
3486 	control->length = SCTP_BUF_LEN(m_notify);
3487 	control->spec_flags = M_NOTIFICATION;
3488 	/* not that we need this */
3489 	control->tail_mbuf = m_notify;
3490 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3491 	    control,
3492 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3493 }
3494 
3495 
3496 
3497 static void
3498 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3499     int number_entries, uint16_t *list, int flag)
3500 {
3501 	struct mbuf *m_notify;
3502 	struct sctp_queued_to_read *control;
3503 	struct sctp_stream_reset_event *strreset;
3504 	int len;
3505 
3506 	if ((stcb == NULL) ||
3507 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3508 		/* event not enabled */
3509 		return;
3510 	}
3511 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3512 	if (m_notify == NULL)
3513 		/* no space left */
3514 		return;
3515 	SCTP_BUF_LEN(m_notify) = 0;
3516 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3517 	if (len > M_TRAILINGSPACE(m_notify)) {
3518 		/* never enough room */
3519 		sctp_m_freem(m_notify);
3520 		return;
3521 	}
3522 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3523 	memset(strreset, 0, len);
3524 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3525 	strreset->strreset_flags = flag;
3526 	strreset->strreset_length = len;
3527 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3528 	if (number_entries) {
3529 		int i;
3530 
3531 		for (i = 0; i < number_entries; i++) {
3532 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3533 		}
3534 	}
3535 	SCTP_BUF_LEN(m_notify) = len;
3536 	SCTP_BUF_NEXT(m_notify) = NULL;
3537 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3538 		/* no space */
3539 		sctp_m_freem(m_notify);
3540 		return;
3541 	}
3542 	/* append to socket */
3543 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3544 	    0, 0, stcb->asoc.context, 0, 0, 0,
3545 	    m_notify);
3546 	if (control == NULL) {
3547 		/* no memory */
3548 		sctp_m_freem(m_notify);
3549 		return;
3550 	}
3551 	control->length = SCTP_BUF_LEN(m_notify);
3552 	control->spec_flags = M_NOTIFICATION;
3553 	/* not that we need this */
3554 	control->tail_mbuf = m_notify;
3555 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3556 	    control,
3557 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3558 }
3559 
3560 
3561 static void
3562 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3563 {
3564 	struct mbuf *m_notify;
3565 	struct sctp_remote_error *sre;
3566 	struct sctp_queued_to_read *control;
3567 	unsigned int notif_len;
3568 	uint16_t chunk_len;
3569 
3570 	if ((stcb == NULL) ||
3571 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3572 		return;
3573 	}
3574 	if (chunk != NULL) {
3575 		chunk_len = ntohs(chunk->ch.chunk_length);
3576 	} else {
3577 		chunk_len = 0;
3578 	}
3579 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3580 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3581 	if (m_notify == NULL) {
3582 		/* Retry with smaller value. */
3583 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3584 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3585 		if (m_notify == NULL) {
3586 			return;
3587 		}
3588 	}
3589 	SCTP_BUF_NEXT(m_notify) = NULL;
3590 	sre = mtod(m_notify, struct sctp_remote_error *);
3591 	memset(sre, 0, notif_len);
3592 	sre->sre_type = SCTP_REMOTE_ERROR;
3593 	sre->sre_flags = 0;
3594 	sre->sre_length = sizeof(struct sctp_remote_error);
3595 	sre->sre_error = error;
3596 	sre->sre_assoc_id = sctp_get_associd(stcb);
3597 	if (notif_len > sizeof(struct sctp_remote_error)) {
3598 		memcpy(sre->sre_data, chunk, chunk_len);
3599 		sre->sre_length += chunk_len;
3600 	}
3601 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3602 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3603 	    0, 0, stcb->asoc.context, 0, 0, 0,
3604 	    m_notify);
3605 	if (control != NULL) {
3606 		control->length = SCTP_BUF_LEN(m_notify);
3607 		control->spec_flags = M_NOTIFICATION;
3608 		/* not that we need this */
3609 		control->tail_mbuf = m_notify;
3610 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3611 		    control,
3612 		    &stcb->sctp_socket->so_rcv, 1,
3613 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3614 	} else {
3615 		sctp_m_freem(m_notify);
3616 	}
3617 }
3618 
3619 
3620 void
3621 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3622     uint32_t error, void *data, int so_locked
3623 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3624     SCTP_UNUSED
3625 #endif
3626 )
3627 {
3628 	if ((stcb == NULL) ||
3629 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3630 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3631 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3632 		/* If the socket is gone we are out of here */
3633 		return;
3634 	}
3635 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3636 		return;
3637 	}
3638 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3639 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3640 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3641 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3642 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3643 			/* Don't report these in front states */
3644 			return;
3645 		}
3646 	}
3647 	switch (notification) {
3648 	case SCTP_NOTIFY_ASSOC_UP:
3649 		if (stcb->asoc.assoc_up_sent == 0) {
3650 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3651 			stcb->asoc.assoc_up_sent = 1;
3652 		}
3653 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3654 			sctp_notify_adaptation_layer(stcb);
3655 		}
3656 		if (stcb->asoc.auth_supported == 0) {
3657 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3658 			    NULL, so_locked);
3659 		}
3660 		break;
3661 	case SCTP_NOTIFY_ASSOC_DOWN:
3662 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3663 		break;
3664 	case SCTP_NOTIFY_INTERFACE_DOWN:
3665 		{
3666 			struct sctp_nets *net;
3667 
3668 			net = (struct sctp_nets *)data;
3669 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3670 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3671 			break;
3672 		}
3673 	case SCTP_NOTIFY_INTERFACE_UP:
3674 		{
3675 			struct sctp_nets *net;
3676 
3677 			net = (struct sctp_nets *)data;
3678 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3679 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3680 			break;
3681 		}
3682 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3683 		{
3684 			struct sctp_nets *net;
3685 
3686 			net = (struct sctp_nets *)data;
3687 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3688 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3689 			break;
3690 		}
3691 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3692 		sctp_notify_send_failed2(stcb, error,
3693 		    (struct sctp_stream_queue_pending *)data, so_locked);
3694 		break;
3695 	case SCTP_NOTIFY_SENT_DG_FAIL:
3696 		sctp_notify_send_failed(stcb, 1, error,
3697 		    (struct sctp_tmit_chunk *)data, so_locked);
3698 		break;
3699 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3700 		sctp_notify_send_failed(stcb, 0, error,
3701 		    (struct sctp_tmit_chunk *)data, so_locked);
3702 		break;
3703 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3704 		{
3705 			uint32_t val;
3706 
3707 			val = *((uint32_t *)data);
3708 
3709 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3710 			break;
3711 		}
3712 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3713 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3714 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3715 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3716 		} else {
3717 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3718 		}
3719 		break;
3720 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3721 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3722 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3723 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3724 		} else {
3725 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3726 		}
3727 		break;
3728 	case SCTP_NOTIFY_ASSOC_RESTART:
3729 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3730 		if (stcb->asoc.auth_supported == 0) {
3731 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3732 			    NULL, so_locked);
3733 		}
3734 		break;
3735 	case SCTP_NOTIFY_STR_RESET_SEND:
3736 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3737 		break;
3738 	case SCTP_NOTIFY_STR_RESET_RECV:
3739 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3740 		break;
3741 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3742 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3743 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3744 		break;
3745 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3746 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3747 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3748 		break;
3749 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3750 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3751 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3752 		break;
3753 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3754 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3755 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3756 		break;
3757 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3758 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3759 		    error, so_locked);
3760 		break;
3761 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3762 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3763 		    error, so_locked);
3764 		break;
3765 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3766 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3767 		    error, so_locked);
3768 		break;
3769 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3770 		sctp_notify_shutdown_event(stcb);
3771 		break;
3772 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3773 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3774 		    (uint16_t)(uintptr_t)data,
3775 		    so_locked);
3776 		break;
3777 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3778 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3779 		    (uint16_t)(uintptr_t)data,
3780 		    so_locked);
3781 		break;
3782 	case SCTP_NOTIFY_NO_PEER_AUTH:
3783 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3784 		    (uint16_t)(uintptr_t)data,
3785 		    so_locked);
3786 		break;
3787 	case SCTP_NOTIFY_SENDER_DRY:
3788 		sctp_notify_sender_dry_event(stcb, so_locked);
3789 		break;
3790 	case SCTP_NOTIFY_REMOTE_ERROR:
3791 		sctp_notify_remote_error(stcb, error, data);
3792 		break;
3793 	default:
3794 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3795 		    __func__, notification, notification);
3796 		break;
3797 	}			/* end switch */
3798 }
3799 
3800 void
3801 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3802 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3803     SCTP_UNUSED
3804 #endif
3805 )
3806 {
3807 	struct sctp_association *asoc;
3808 	struct sctp_stream_out *outs;
3809 	struct sctp_tmit_chunk *chk, *nchk;
3810 	struct sctp_stream_queue_pending *sp, *nsp;
3811 	int i;
3812 
3813 	if (stcb == NULL) {
3814 		return;
3815 	}
3816 	asoc = &stcb->asoc;
3817 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3818 		/* already being freed */
3819 		return;
3820 	}
3821 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3822 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3823 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3824 		return;
3825 	}
3826 	/* now through all the gunk freeing chunks */
3827 	if (holds_lock == 0) {
3828 		SCTP_TCB_SEND_LOCK(stcb);
3829 	}
3830 	/* sent queue SHOULD be empty */
3831 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3832 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3833 		asoc->sent_queue_cnt--;
3834 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3835 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3836 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3837 #ifdef INVARIANTS
3838 			} else {
3839 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3840 #endif
3841 			}
3842 		}
3843 		if (chk->data != NULL) {
3844 			sctp_free_bufspace(stcb, asoc, chk, 1);
3845 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3846 			    error, chk, so_locked);
3847 			if (chk->data) {
3848 				sctp_m_freem(chk->data);
3849 				chk->data = NULL;
3850 			}
3851 		}
3852 		sctp_free_a_chunk(stcb, chk, so_locked);
3853 		/* sa_ignore FREED_MEMORY */
3854 	}
3855 	/* pending send queue SHOULD be empty */
3856 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3857 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3858 		asoc->send_queue_cnt--;
3859 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3860 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3861 #ifdef INVARIANTS
3862 		} else {
3863 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3864 #endif
3865 		}
3866 		if (chk->data != NULL) {
3867 			sctp_free_bufspace(stcb, asoc, chk, 1);
3868 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3869 			    error, chk, so_locked);
3870 			if (chk->data) {
3871 				sctp_m_freem(chk->data);
3872 				chk->data = NULL;
3873 			}
3874 		}
3875 		sctp_free_a_chunk(stcb, chk, so_locked);
3876 		/* sa_ignore FREED_MEMORY */
3877 	}
3878 	for (i = 0; i < asoc->streamoutcnt; i++) {
3879 		/* For each stream */
3880 		outs = &asoc->strmout[i];
3881 		/* clean up any sends there */
3882 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3883 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3884 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3885 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3886 			sctp_free_spbufspace(stcb, asoc, sp);
3887 			if (sp->data) {
3888 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3889 				    error, (void *)sp, so_locked);
3890 				if (sp->data) {
3891 					sctp_m_freem(sp->data);
3892 					sp->data = NULL;
3893 					sp->tail_mbuf = NULL;
3894 					sp->length = 0;
3895 				}
3896 			}
3897 			if (sp->net) {
3898 				sctp_free_remote_addr(sp->net);
3899 				sp->net = NULL;
3900 			}
3901 			/* Free the chunk */
3902 			sctp_free_a_strmoq(stcb, sp, so_locked);
3903 			/* sa_ignore FREED_MEMORY */
3904 		}
3905 	}
3906 
3907 	if (holds_lock == 0) {
3908 		SCTP_TCB_SEND_UNLOCK(stcb);
3909 	}
3910 }
3911 
3912 void
3913 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3914     struct sctp_abort_chunk *abort, int so_locked
3915 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3916     SCTP_UNUSED
3917 #endif
3918 )
3919 {
3920 	if (stcb == NULL) {
3921 		return;
3922 	}
3923 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3924 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3925 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3926 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3927 	}
3928 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3929 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3930 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3931 		return;
3932 	}
3933 	/* Tell them we lost the asoc */
3934 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3935 	if (from_peer) {
3936 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3937 	} else {
3938 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3939 	}
3940 }
3941 
3942 void
3943 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3944     struct mbuf *m, int iphlen,
3945     struct sockaddr *src, struct sockaddr *dst,
3946     struct sctphdr *sh, struct mbuf *op_err,
3947     uint8_t mflowtype, uint32_t mflowid,
3948     uint32_t vrf_id, uint16_t port)
3949 {
3950 	uint32_t vtag;
3951 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3952 	struct socket *so;
3953 #endif
3954 
3955 	vtag = 0;
3956 	if (stcb != NULL) {
3957 		vtag = stcb->asoc.peer_vtag;
3958 		vrf_id = stcb->asoc.vrf_id;
3959 	}
3960 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3961 	    mflowtype, mflowid, inp->fibnum,
3962 	    vrf_id, port);
3963 	if (stcb != NULL) {
3964 		/* We have a TCB to abort, send notification too */
3965 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3966 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3967 		/* Ok, now lets free it */
3968 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3969 		so = SCTP_INP_SO(inp);
3970 		atomic_add_int(&stcb->asoc.refcnt, 1);
3971 		SCTP_TCB_UNLOCK(stcb);
3972 		SCTP_SOCKET_LOCK(so, 1);
3973 		SCTP_TCB_LOCK(stcb);
3974 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3975 #endif
3976 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3977 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3978 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3979 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3980 		}
3981 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3982 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3983 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3984 		SCTP_SOCKET_UNLOCK(so, 1);
3985 #endif
3986 	}
3987 }
3988 #ifdef SCTP_ASOCLOG_OF_TSNS
3989 void
3990 sctp_print_out_track_log(struct sctp_tcb *stcb)
3991 {
3992 #ifdef NOSIY_PRINTS
3993 	int i;
3994 
3995 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3996 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3997 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3998 		SCTP_PRINTF("None rcvd\n");
3999 		goto none_in;
4000 	}
4001 	if (stcb->asoc.tsn_in_wrapped) {
4002 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4003 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4004 			    stcb->asoc.in_tsnlog[i].tsn,
4005 			    stcb->asoc.in_tsnlog[i].strm,
4006 			    stcb->asoc.in_tsnlog[i].seq,
4007 			    stcb->asoc.in_tsnlog[i].flgs,
4008 			    stcb->asoc.in_tsnlog[i].sz);
4009 		}
4010 	}
4011 	if (stcb->asoc.tsn_in_at) {
4012 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4013 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4014 			    stcb->asoc.in_tsnlog[i].tsn,
4015 			    stcb->asoc.in_tsnlog[i].strm,
4016 			    stcb->asoc.in_tsnlog[i].seq,
4017 			    stcb->asoc.in_tsnlog[i].flgs,
4018 			    stcb->asoc.in_tsnlog[i].sz);
4019 		}
4020 	}
4021 none_in:
4022 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4023 	if ((stcb->asoc.tsn_out_at == 0) &&
4024 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4025 		SCTP_PRINTF("None sent\n");
4026 	}
4027 	if (stcb->asoc.tsn_out_wrapped) {
4028 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4029 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4030 			    stcb->asoc.out_tsnlog[i].tsn,
4031 			    stcb->asoc.out_tsnlog[i].strm,
4032 			    stcb->asoc.out_tsnlog[i].seq,
4033 			    stcb->asoc.out_tsnlog[i].flgs,
4034 			    stcb->asoc.out_tsnlog[i].sz);
4035 		}
4036 	}
4037 	if (stcb->asoc.tsn_out_at) {
4038 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4039 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4040 			    stcb->asoc.out_tsnlog[i].tsn,
4041 			    stcb->asoc.out_tsnlog[i].strm,
4042 			    stcb->asoc.out_tsnlog[i].seq,
4043 			    stcb->asoc.out_tsnlog[i].flgs,
4044 			    stcb->asoc.out_tsnlog[i].sz);
4045 		}
4046 	}
4047 #endif
4048 }
4049 #endif
4050 
4051 void
4052 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4053     struct mbuf *op_err,
4054     int so_locked
4055 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4056     SCTP_UNUSED
4057 #endif
4058 )
4059 {
4060 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4061 	struct socket *so;
4062 #endif
4063 
4064 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4065 	so = SCTP_INP_SO(inp);
4066 #endif
4067 	if (stcb == NULL) {
4068 		/* Got to have a TCB */
4069 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4070 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4071 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4072 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4073 			}
4074 		}
4075 		return;
4076 	} else {
4077 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4078 	}
4079 	/* notify the peer */
4080 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4081 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4082 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4083 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4084 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4085 	}
4086 	/* notify the ulp */
4087 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4088 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4089 	}
4090 	/* now free the asoc */
4091 #ifdef SCTP_ASOCLOG_OF_TSNS
4092 	sctp_print_out_track_log(stcb);
4093 #endif
4094 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4095 	if (!so_locked) {
4096 		atomic_add_int(&stcb->asoc.refcnt, 1);
4097 		SCTP_TCB_UNLOCK(stcb);
4098 		SCTP_SOCKET_LOCK(so, 1);
4099 		SCTP_TCB_LOCK(stcb);
4100 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4101 	}
4102 #endif
4103 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4104 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4105 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4106 	if (!so_locked) {
4107 		SCTP_SOCKET_UNLOCK(so, 1);
4108 	}
4109 #endif
4110 }
4111 
4112 void
4113 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4114     struct sockaddr *src, struct sockaddr *dst,
4115     struct sctphdr *sh, struct sctp_inpcb *inp,
4116     struct mbuf *cause,
4117     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4118     uint32_t vrf_id, uint16_t port)
4119 {
4120 	struct sctp_chunkhdr *ch, chunk_buf;
4121 	unsigned int chk_length;
4122 	int contains_init_chunk;
4123 
4124 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4125 	/* Generate a TO address for future reference */
4126 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4127 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4128 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4129 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4130 		}
4131 	}
4132 	contains_init_chunk = 0;
4133 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4134 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4135 	while (ch != NULL) {
4136 		chk_length = ntohs(ch->chunk_length);
4137 		if (chk_length < sizeof(*ch)) {
4138 			/* break to abort land */
4139 			break;
4140 		}
4141 		switch (ch->chunk_type) {
4142 		case SCTP_INIT:
4143 			contains_init_chunk = 1;
4144 			break;
4145 		case SCTP_PACKET_DROPPED:
4146 			/* we don't respond to pkt-dropped */
4147 			return;
4148 		case SCTP_ABORT_ASSOCIATION:
4149 			/* we don't respond with an ABORT to an ABORT */
4150 			return;
4151 		case SCTP_SHUTDOWN_COMPLETE:
4152 			/*
4153 			 * we ignore it since we are not waiting for it and
4154 			 * peer is gone
4155 			 */
4156 			return;
4157 		case SCTP_SHUTDOWN_ACK:
4158 			sctp_send_shutdown_complete2(src, dst, sh,
4159 			    mflowtype, mflowid, fibnum,
4160 			    vrf_id, port);
4161 			return;
4162 		default:
4163 			break;
4164 		}
4165 		offset += SCTP_SIZE32(chk_length);
4166 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4167 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4168 	}
4169 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4170 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4171 	    (contains_init_chunk == 0))) {
4172 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4173 		    mflowtype, mflowid, fibnum,
4174 		    vrf_id, port);
4175 	}
4176 }
4177 
4178 /*
4179  * check the inbound datagram to make sure there is not an abort inside it,
4180  * if there is return 1, else return 0.
4181  */
4182 int
4183 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4184 {
4185 	struct sctp_chunkhdr *ch;
4186 	struct sctp_init_chunk *init_chk, chunk_buf;
4187 	int offset;
4188 	unsigned int chk_length;
4189 
4190 	offset = iphlen + sizeof(struct sctphdr);
4191 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4192 	    (uint8_t *)&chunk_buf);
4193 	while (ch != NULL) {
4194 		chk_length = ntohs(ch->chunk_length);
4195 		if (chk_length < sizeof(*ch)) {
4196 			/* packet is probably corrupt */
4197 			break;
4198 		}
4199 		/* we seem to be ok, is it an abort? */
4200 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4201 			/* yep, tell them */
4202 			return (1);
4203 		}
4204 		if (ch->chunk_type == SCTP_INITIATION) {
4205 			/* need to update the Vtag */
4206 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4207 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4208 			if (init_chk != NULL) {
4209 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4210 			}
4211 		}
4212 		/* Nope, move to the next chunk */
4213 		offset += SCTP_SIZE32(chk_length);
4214 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4215 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4216 	}
4217 	return (0);
4218 }
4219 
4220 /*
4221  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4222  * set (i.e. it's 0) so, create this function to compare link local scopes
4223  */
4224 #ifdef INET6
4225 uint32_t
4226 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4227 {
4228 	struct sockaddr_in6 a, b;
4229 
4230 	/* save copies */
4231 	a = *addr1;
4232 	b = *addr2;
4233 
4234 	if (a.sin6_scope_id == 0)
4235 		if (sa6_recoverscope(&a)) {
4236 			/* can't get scope, so can't match */
4237 			return (0);
4238 		}
4239 	if (b.sin6_scope_id == 0)
4240 		if (sa6_recoverscope(&b)) {
4241 			/* can't get scope, so can't match */
4242 			return (0);
4243 		}
4244 	if (a.sin6_scope_id != b.sin6_scope_id)
4245 		return (0);
4246 
4247 	return (1);
4248 }
4249 
4250 /*
4251  * returns a sockaddr_in6 with embedded scope recovered and removed
4252  */
4253 struct sockaddr_in6 *
4254 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4255 {
4256 	/* check and strip embedded scope junk */
4257 	if (addr->sin6_family == AF_INET6) {
4258 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4259 			if (addr->sin6_scope_id == 0) {
4260 				*store = *addr;
4261 				if (!sa6_recoverscope(store)) {
4262 					/* use the recovered scope */
4263 					addr = store;
4264 				}
4265 			} else {
4266 				/* else, return the original "to" addr */
4267 				in6_clearscope(&addr->sin6_addr);
4268 			}
4269 		}
4270 	}
4271 	return (addr);
4272 }
4273 #endif
4274 
4275 /*
4276  * are the two addresses the same?  currently a "scopeless" check returns: 1
4277  * if same, 0 if not
4278  */
4279 int
4280 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4281 {
4282 
4283 	/* must be valid */
4284 	if (sa1 == NULL || sa2 == NULL)
4285 		return (0);
4286 
4287 	/* must be the same family */
4288 	if (sa1->sa_family != sa2->sa_family)
4289 		return (0);
4290 
4291 	switch (sa1->sa_family) {
4292 #ifdef INET6
4293 	case AF_INET6:
4294 		{
4295 			/* IPv6 addresses */
4296 			struct sockaddr_in6 *sin6_1, *sin6_2;
4297 
4298 			sin6_1 = (struct sockaddr_in6 *)sa1;
4299 			sin6_2 = (struct sockaddr_in6 *)sa2;
4300 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4301 			    sin6_2));
4302 		}
4303 #endif
4304 #ifdef INET
4305 	case AF_INET:
4306 		{
4307 			/* IPv4 addresses */
4308 			struct sockaddr_in *sin_1, *sin_2;
4309 
4310 			sin_1 = (struct sockaddr_in *)sa1;
4311 			sin_2 = (struct sockaddr_in *)sa2;
4312 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4313 		}
4314 #endif
4315 	default:
4316 		/* we don't do these... */
4317 		return (0);
4318 	}
4319 }
4320 
4321 void
4322 sctp_print_address(struct sockaddr *sa)
4323 {
4324 #ifdef INET6
4325 	char ip6buf[INET6_ADDRSTRLEN];
4326 #endif
4327 
4328 	switch (sa->sa_family) {
4329 #ifdef INET6
4330 	case AF_INET6:
4331 		{
4332 			struct sockaddr_in6 *sin6;
4333 
4334 			sin6 = (struct sockaddr_in6 *)sa;
4335 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4336 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4337 			    ntohs(sin6->sin6_port),
4338 			    sin6->sin6_scope_id);
4339 			break;
4340 		}
4341 #endif
4342 #ifdef INET
4343 	case AF_INET:
4344 		{
4345 			struct sockaddr_in *sin;
4346 			unsigned char *p;
4347 
4348 			sin = (struct sockaddr_in *)sa;
4349 			p = (unsigned char *)&sin->sin_addr;
4350 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4351 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4352 			break;
4353 		}
4354 #endif
4355 	default:
4356 		SCTP_PRINTF("?\n");
4357 		break;
4358 	}
4359 }
4360 
4361 void
4362 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4363     struct sctp_inpcb *new_inp,
4364     struct sctp_tcb *stcb,
4365     int waitflags)
4366 {
4367 	/*
4368 	 * go through our old INP and pull off any control structures that
4369 	 * belong to stcb and move then to the new inp.
4370 	 */
4371 	struct socket *old_so, *new_so;
4372 	struct sctp_queued_to_read *control, *nctl;
4373 	struct sctp_readhead tmp_queue;
4374 	struct mbuf *m;
4375 	int error = 0;
4376 
4377 	old_so = old_inp->sctp_socket;
4378 	new_so = new_inp->sctp_socket;
4379 	TAILQ_INIT(&tmp_queue);
4380 	error = sblock(&old_so->so_rcv, waitflags);
4381 	if (error) {
4382 		/*
4383 		 * Gak, can't get sblock, we have a problem. data will be
4384 		 * left stranded.. and we don't dare look at it since the
4385 		 * other thread may be reading something. Oh well, its a
4386 		 * screwed up app that does a peeloff OR a accept while
4387 		 * reading from the main socket... actually its only the
4388 		 * peeloff() case, since I think read will fail on a
4389 		 * listening socket..
4390 		 */
4391 		return;
4392 	}
4393 	/* lock the socket buffers */
4394 	SCTP_INP_READ_LOCK(old_inp);
4395 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4396 		/* Pull off all for out target stcb */
4397 		if (control->stcb == stcb) {
4398 			/* remove it we want it */
4399 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4400 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4401 			m = control->data;
4402 			while (m) {
4403 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4404 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4405 				}
4406 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4407 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4408 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4409 				}
4410 				m = SCTP_BUF_NEXT(m);
4411 			}
4412 		}
4413 	}
4414 	SCTP_INP_READ_UNLOCK(old_inp);
4415 	/* Remove the sb-lock on the old socket */
4416 
4417 	sbunlock(&old_so->so_rcv);
4418 	/* Now we move them over to the new socket buffer */
4419 	SCTP_INP_READ_LOCK(new_inp);
4420 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4421 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4422 		m = control->data;
4423 		while (m) {
4424 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4425 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4426 			}
4427 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4428 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4429 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4430 			}
4431 			m = SCTP_BUF_NEXT(m);
4432 		}
4433 	}
4434 	SCTP_INP_READ_UNLOCK(new_inp);
4435 }
4436 
4437 void
4438 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4439     struct sctp_tcb *stcb,
4440     int so_locked
4441 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4442     SCTP_UNUSED
4443 #endif
4444 )
4445 {
4446 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4447 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4448 		struct socket *so;
4449 
4450 		so = SCTP_INP_SO(inp);
4451 		if (!so_locked) {
4452 			if (stcb) {
4453 				atomic_add_int(&stcb->asoc.refcnt, 1);
4454 				SCTP_TCB_UNLOCK(stcb);
4455 			}
4456 			SCTP_SOCKET_LOCK(so, 1);
4457 			if (stcb) {
4458 				SCTP_TCB_LOCK(stcb);
4459 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4460 			}
4461 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4462 				SCTP_SOCKET_UNLOCK(so, 1);
4463 				return;
4464 			}
4465 		}
4466 #endif
4467 		sctp_sorwakeup(inp, inp->sctp_socket);
4468 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4469 		if (!so_locked) {
4470 			SCTP_SOCKET_UNLOCK(so, 1);
4471 		}
4472 #endif
4473 	}
4474 }
4475 
4476 void
4477 sctp_add_to_readq(struct sctp_inpcb *inp,
4478     struct sctp_tcb *stcb,
4479     struct sctp_queued_to_read *control,
4480     struct sockbuf *sb,
4481     int end,
4482     int inp_read_lock_held,
4483     int so_locked
4484 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4485     SCTP_UNUSED
4486 #endif
4487 )
4488 {
4489 	/*
4490 	 * Here we must place the control on the end of the socket read
4491 	 * queue AND increment sb_cc so that select will work properly on
4492 	 * read.
4493 	 */
4494 	struct mbuf *m, *prev = NULL;
4495 
4496 	if (inp == NULL) {
4497 		/* Gak, TSNH!! */
4498 #ifdef INVARIANTS
4499 		panic("Gak, inp NULL on add_to_readq");
4500 #endif
4501 		return;
4502 	}
4503 	if (inp_read_lock_held == 0)
4504 		SCTP_INP_READ_LOCK(inp);
4505 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4506 		sctp_free_remote_addr(control->whoFrom);
4507 		if (control->data) {
4508 			sctp_m_freem(control->data);
4509 			control->data = NULL;
4510 		}
4511 		sctp_free_a_readq(stcb, control);
4512 		if (inp_read_lock_held == 0)
4513 			SCTP_INP_READ_UNLOCK(inp);
4514 		return;
4515 	}
4516 	if (!(control->spec_flags & M_NOTIFICATION)) {
4517 		atomic_add_int(&inp->total_recvs, 1);
4518 		if (!control->do_not_ref_stcb) {
4519 			atomic_add_int(&stcb->total_recvs, 1);
4520 		}
4521 	}
4522 	m = control->data;
4523 	control->held_length = 0;
4524 	control->length = 0;
4525 	while (m) {
4526 		if (SCTP_BUF_LEN(m) == 0) {
4527 			/* Skip mbufs with NO length */
4528 			if (prev == NULL) {
4529 				/* First one */
4530 				control->data = sctp_m_free(m);
4531 				m = control->data;
4532 			} else {
4533 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4534 				m = SCTP_BUF_NEXT(prev);
4535 			}
4536 			if (m == NULL) {
4537 				control->tail_mbuf = prev;
4538 			}
4539 			continue;
4540 		}
4541 		prev = m;
4542 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4543 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4544 		}
4545 		sctp_sballoc(stcb, sb, m);
4546 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4547 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4548 		}
4549 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4550 		m = SCTP_BUF_NEXT(m);
4551 	}
4552 	if (prev != NULL) {
4553 		control->tail_mbuf = prev;
4554 	} else {
4555 		/* Everything got collapsed out?? */
4556 		sctp_free_remote_addr(control->whoFrom);
4557 		sctp_free_a_readq(stcb, control);
4558 		if (inp_read_lock_held == 0)
4559 			SCTP_INP_READ_UNLOCK(inp);
4560 		return;
4561 	}
4562 	if (end) {
4563 		control->end_added = 1;
4564 	}
4565 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4566 	control->on_read_q = 1;
4567 	if (inp_read_lock_held == 0)
4568 		SCTP_INP_READ_UNLOCK(inp);
4569 	if (inp && inp->sctp_socket) {
4570 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4571 	}
4572 }
4573 
4574 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4575  *************ALTERNATE ROUTING CODE
4576  */
4577 
4578 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4579  *************ALTERNATE ROUTING CODE
4580  */
4581 
4582 struct mbuf *
4583 sctp_generate_cause(uint16_t code, char *info)
4584 {
4585 	struct mbuf *m;
4586 	struct sctp_gen_error_cause *cause;
4587 	size_t info_len;
4588 	uint16_t len;
4589 
4590 	if ((code == 0) || (info == NULL)) {
4591 		return (NULL);
4592 	}
4593 	info_len = strlen(info);
4594 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4595 		return (NULL);
4596 	}
4597 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4598 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4599 	if (m != NULL) {
4600 		SCTP_BUF_LEN(m) = len;
4601 		cause = mtod(m, struct sctp_gen_error_cause *);
4602 		cause->code = htons(code);
4603 		cause->length = htons(len);
4604 		memcpy(cause->info, info, info_len);
4605 	}
4606 	return (m);
4607 }
4608 
4609 struct mbuf *
4610 sctp_generate_no_user_data_cause(uint32_t tsn)
4611 {
4612 	struct mbuf *m;
4613 	struct sctp_error_no_user_data *no_user_data_cause;
4614 	uint16_t len;
4615 
4616 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4617 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4618 	if (m != NULL) {
4619 		SCTP_BUF_LEN(m) = len;
4620 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4621 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4622 		no_user_data_cause->cause.length = htons(len);
4623 		no_user_data_cause->tsn = htonl(tsn);
4624 	}
4625 	return (m);
4626 }
4627 
4628 #ifdef SCTP_MBCNT_LOGGING
4629 void
4630 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4631     struct sctp_tmit_chunk *tp1, int chk_cnt)
4632 {
4633 	if (tp1->data == NULL) {
4634 		return;
4635 	}
4636 	asoc->chunks_on_out_queue -= chk_cnt;
4637 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4638 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4639 		    asoc->total_output_queue_size,
4640 		    tp1->book_size,
4641 		    0,
4642 		    tp1->mbcnt);
4643 	}
4644 	if (asoc->total_output_queue_size >= tp1->book_size) {
4645 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4646 	} else {
4647 		asoc->total_output_queue_size = 0;
4648 	}
4649 
4650 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4651 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4652 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4653 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4654 		} else {
4655 			stcb->sctp_socket->so_snd.sb_cc = 0;
4656 
4657 		}
4658 	}
4659 }
4660 
4661 #endif
4662 
4663 int
4664 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4665     uint8_t sent, int so_locked
4666 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4667     SCTP_UNUSED
4668 #endif
4669 )
4670 {
4671 	struct sctp_stream_out *strq;
4672 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4673 	struct sctp_stream_queue_pending *sp;
4674 	uint32_t mid;
4675 	uint16_t sid;
4676 	uint8_t foundeom = 0;
4677 	int ret_sz = 0;
4678 	int notdone;
4679 	int do_wakeup_routine = 0;
4680 
4681 	sid = tp1->rec.data.sid;
4682 	mid = tp1->rec.data.mid;
4683 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4684 		stcb->asoc.abandoned_sent[0]++;
4685 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4686 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4687 #if defined(SCTP_DETAILED_STR_STATS)
4688 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4689 #endif
4690 	} else {
4691 		stcb->asoc.abandoned_unsent[0]++;
4692 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4693 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4694 #if defined(SCTP_DETAILED_STR_STATS)
4695 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4696 #endif
4697 	}
4698 	do {
4699 		ret_sz += tp1->book_size;
4700 		if (tp1->data != NULL) {
4701 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4702 				sctp_flight_size_decrease(tp1);
4703 				sctp_total_flight_decrease(stcb, tp1);
4704 			}
4705 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4706 			stcb->asoc.peers_rwnd += tp1->send_size;
4707 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4708 			if (sent) {
4709 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4710 			} else {
4711 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4712 			}
4713 			if (tp1->data) {
4714 				sctp_m_freem(tp1->data);
4715 				tp1->data = NULL;
4716 			}
4717 			do_wakeup_routine = 1;
4718 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4719 				stcb->asoc.sent_queue_cnt_removeable--;
4720 			}
4721 		}
4722 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4723 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4724 		    SCTP_DATA_NOT_FRAG) {
4725 			/* not frag'ed we ae done   */
4726 			notdone = 0;
4727 			foundeom = 1;
4728 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4729 			/* end of frag, we are done */
4730 			notdone = 0;
4731 			foundeom = 1;
4732 		} else {
4733 			/*
4734 			 * Its a begin or middle piece, we must mark all of
4735 			 * it
4736 			 */
4737 			notdone = 1;
4738 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4739 		}
4740 	} while (tp1 && notdone);
4741 	if (foundeom == 0) {
4742 		/*
4743 		 * The multi-part message was scattered across the send and
4744 		 * sent queue.
4745 		 */
4746 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4747 			if ((tp1->rec.data.sid != sid) ||
4748 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4749 				break;
4750 			}
4751 			/*
4752 			 * save to chk in case we have some on stream out
4753 			 * queue. If so and we have an un-transmitted one we
4754 			 * don't have to fudge the TSN.
4755 			 */
4756 			chk = tp1;
4757 			ret_sz += tp1->book_size;
4758 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4759 			if (sent) {
4760 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4761 			} else {
4762 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4763 			}
4764 			if (tp1->data) {
4765 				sctp_m_freem(tp1->data);
4766 				tp1->data = NULL;
4767 			}
4768 			/* No flight involved here book the size to 0 */
4769 			tp1->book_size = 0;
4770 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4771 				foundeom = 1;
4772 			}
4773 			do_wakeup_routine = 1;
4774 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4775 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4776 			/*
4777 			 * on to the sent queue so we can wait for it to be
4778 			 * passed by.
4779 			 */
4780 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4781 			    sctp_next);
4782 			stcb->asoc.send_queue_cnt--;
4783 			stcb->asoc.sent_queue_cnt++;
4784 		}
4785 	}
4786 	if (foundeom == 0) {
4787 		/*
4788 		 * Still no eom found. That means there is stuff left on the
4789 		 * stream out queue.. yuck.
4790 		 */
4791 		SCTP_TCB_SEND_LOCK(stcb);
4792 		strq = &stcb->asoc.strmout[sid];
4793 		sp = TAILQ_FIRST(&strq->outqueue);
4794 		if (sp != NULL) {
4795 			sp->discard_rest = 1;
4796 			/*
4797 			 * We may need to put a chunk on the queue that
4798 			 * holds the TSN that would have been sent with the
4799 			 * LAST bit.
4800 			 */
4801 			if (chk == NULL) {
4802 				/* Yep, we have to */
4803 				sctp_alloc_a_chunk(stcb, chk);
4804 				if (chk == NULL) {
4805 					/*
4806 					 * we are hosed. All we can do is
4807 					 * nothing.. which will cause an
4808 					 * abort if the peer is paying
4809 					 * attention.
4810 					 */
4811 					goto oh_well;
4812 				}
4813 				memset(chk, 0, sizeof(*chk));
4814 				chk->rec.data.rcv_flags = 0;
4815 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4816 				chk->asoc = &stcb->asoc;
4817 				if (stcb->asoc.idata_supported == 0) {
4818 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4819 						chk->rec.data.mid = 0;
4820 					} else {
4821 						chk->rec.data.mid = strq->next_mid_ordered;
4822 					}
4823 				} else {
4824 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4825 						chk->rec.data.mid = strq->next_mid_unordered;
4826 					} else {
4827 						chk->rec.data.mid = strq->next_mid_ordered;
4828 					}
4829 				}
4830 				chk->rec.data.sid = sp->sid;
4831 				chk->rec.data.ppid = sp->ppid;
4832 				chk->rec.data.context = sp->context;
4833 				chk->flags = sp->act_flags;
4834 				chk->whoTo = NULL;
4835 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4836 				strq->chunks_on_queues++;
4837 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4838 				stcb->asoc.sent_queue_cnt++;
4839 				stcb->asoc.pr_sctp_cnt++;
4840 			}
4841 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4842 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4843 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4844 			}
4845 			if (stcb->asoc.idata_supported == 0) {
4846 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4847 					strq->next_mid_ordered++;
4848 				}
4849 			} else {
4850 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4851 					strq->next_mid_unordered++;
4852 				} else {
4853 					strq->next_mid_ordered++;
4854 				}
4855 			}
4856 	oh_well:
4857 			if (sp->data) {
4858 				/*
4859 				 * Pull any data to free up the SB and allow
4860 				 * sender to "add more" while we will throw
4861 				 * away :-)
4862 				 */
4863 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4864 				ret_sz += sp->length;
4865 				do_wakeup_routine = 1;
4866 				sp->some_taken = 1;
4867 				sctp_m_freem(sp->data);
4868 				sp->data = NULL;
4869 				sp->tail_mbuf = NULL;
4870 				sp->length = 0;
4871 			}
4872 		}
4873 		SCTP_TCB_SEND_UNLOCK(stcb);
4874 	}
4875 	if (do_wakeup_routine) {
4876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4877 		struct socket *so;
4878 
4879 		so = SCTP_INP_SO(stcb->sctp_ep);
4880 		if (!so_locked) {
4881 			atomic_add_int(&stcb->asoc.refcnt, 1);
4882 			SCTP_TCB_UNLOCK(stcb);
4883 			SCTP_SOCKET_LOCK(so, 1);
4884 			SCTP_TCB_LOCK(stcb);
4885 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4886 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4887 				/* assoc was freed while we were unlocked */
4888 				SCTP_SOCKET_UNLOCK(so, 1);
4889 				return (ret_sz);
4890 			}
4891 		}
4892 #endif
4893 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4894 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4895 		if (!so_locked) {
4896 			SCTP_SOCKET_UNLOCK(so, 1);
4897 		}
4898 #endif
4899 	}
4900 	return (ret_sz);
4901 }
4902 
4903 /*
4904  * checks to see if the given address, sa, is one that is currently known by
4905  * the kernel note: can't distinguish the same address on multiple interfaces
4906  * and doesn't handle multiple addresses with different zone/scope id's note:
4907  * ifa_ifwithaddr() compares the entire sockaddr struct
4908  */
4909 struct sctp_ifa *
4910 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4911     int holds_lock)
4912 {
4913 	struct sctp_laddr *laddr;
4914 
4915 	if (holds_lock == 0) {
4916 		SCTP_INP_RLOCK(inp);
4917 	}
4918 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4919 		if (laddr->ifa == NULL)
4920 			continue;
4921 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4922 			continue;
4923 #ifdef INET
4924 		if (addr->sa_family == AF_INET) {
4925 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4926 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4927 				/* found him. */
4928 				if (holds_lock == 0) {
4929 					SCTP_INP_RUNLOCK(inp);
4930 				}
4931 				return (laddr->ifa);
4932 				break;
4933 			}
4934 		}
4935 #endif
4936 #ifdef INET6
4937 		if (addr->sa_family == AF_INET6) {
4938 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4939 			    &laddr->ifa->address.sin6)) {
4940 				/* found him. */
4941 				if (holds_lock == 0) {
4942 					SCTP_INP_RUNLOCK(inp);
4943 				}
4944 				return (laddr->ifa);
4945 				break;
4946 			}
4947 		}
4948 #endif
4949 	}
4950 	if (holds_lock == 0) {
4951 		SCTP_INP_RUNLOCK(inp);
4952 	}
4953 	return (NULL);
4954 }
4955 
4956 uint32_t
4957 sctp_get_ifa_hash_val(struct sockaddr *addr)
4958 {
4959 	switch (addr->sa_family) {
4960 #ifdef INET
4961 	case AF_INET:
4962 		{
4963 			struct sockaddr_in *sin;
4964 
4965 			sin = (struct sockaddr_in *)addr;
4966 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4967 		}
4968 #endif
4969 #ifdef INET6
4970 	case AF_INET6:
4971 		{
4972 			struct sockaddr_in6 *sin6;
4973 			uint32_t hash_of_addr;
4974 
4975 			sin6 = (struct sockaddr_in6 *)addr;
4976 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4977 			    sin6->sin6_addr.s6_addr32[1] +
4978 			    sin6->sin6_addr.s6_addr32[2] +
4979 			    sin6->sin6_addr.s6_addr32[3]);
4980 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4981 			return (hash_of_addr);
4982 		}
4983 #endif
4984 	default:
4985 		break;
4986 	}
4987 	return (0);
4988 }
4989 
4990 struct sctp_ifa *
4991 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4992 {
4993 	struct sctp_ifa *sctp_ifap;
4994 	struct sctp_vrf *vrf;
4995 	struct sctp_ifalist *hash_head;
4996 	uint32_t hash_of_addr;
4997 
4998 	if (holds_lock == 0)
4999 		SCTP_IPI_ADDR_RLOCK();
5000 
5001 	vrf = sctp_find_vrf(vrf_id);
5002 	if (vrf == NULL) {
5003 		if (holds_lock == 0)
5004 			SCTP_IPI_ADDR_RUNLOCK();
5005 		return (NULL);
5006 	}
5007 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5008 
5009 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5010 	if (hash_head == NULL) {
5011 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5012 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5013 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5014 		sctp_print_address(addr);
5015 		SCTP_PRINTF("No such bucket for address\n");
5016 		if (holds_lock == 0)
5017 			SCTP_IPI_ADDR_RUNLOCK();
5018 
5019 		return (NULL);
5020 	}
5021 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5022 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5023 			continue;
5024 #ifdef INET
5025 		if (addr->sa_family == AF_INET) {
5026 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5027 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5028 				/* found him. */
5029 				if (holds_lock == 0)
5030 					SCTP_IPI_ADDR_RUNLOCK();
5031 				return (sctp_ifap);
5032 				break;
5033 			}
5034 		}
5035 #endif
5036 #ifdef INET6
5037 		if (addr->sa_family == AF_INET6) {
5038 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5039 			    &sctp_ifap->address.sin6)) {
5040 				/* found him. */
5041 				if (holds_lock == 0)
5042 					SCTP_IPI_ADDR_RUNLOCK();
5043 				return (sctp_ifap);
5044 				break;
5045 			}
5046 		}
5047 #endif
5048 	}
5049 	if (holds_lock == 0)
5050 		SCTP_IPI_ADDR_RUNLOCK();
5051 	return (NULL);
5052 }
5053 
5054 static void
5055 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5056     uint32_t rwnd_req)
5057 {
5058 	/* User pulled some data, do we need a rwnd update? */
5059 	int r_unlocked = 0;
5060 	uint32_t dif, rwnd;
5061 	struct socket *so = NULL;
5062 
5063 	if (stcb == NULL)
5064 		return;
5065 
5066 	atomic_add_int(&stcb->asoc.refcnt, 1);
5067 
5068 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5069 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5070 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5071 		/* Pre-check If we are freeing no update */
5072 		goto no_lock;
5073 	}
5074 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5075 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5076 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5077 		goto out;
5078 	}
5079 	so = stcb->sctp_socket;
5080 	if (so == NULL) {
5081 		goto out;
5082 	}
5083 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5084 	/* Have you have freed enough to look */
5085 	*freed_so_far = 0;
5086 	/* Yep, its worth a look and the lock overhead */
5087 
5088 	/* Figure out what the rwnd would be */
5089 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5090 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5091 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5092 	} else {
5093 		dif = 0;
5094 	}
5095 	if (dif >= rwnd_req) {
5096 		if (hold_rlock) {
5097 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5098 			r_unlocked = 1;
5099 		}
5100 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5101 			/*
5102 			 * One last check before we allow the guy possibly
5103 			 * to get in. There is a race, where the guy has not
5104 			 * reached the gate. In that case
5105 			 */
5106 			goto out;
5107 		}
5108 		SCTP_TCB_LOCK(stcb);
5109 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5110 			/* No reports here */
5111 			SCTP_TCB_UNLOCK(stcb);
5112 			goto out;
5113 		}
5114 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5115 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5116 
5117 		sctp_chunk_output(stcb->sctp_ep, stcb,
5118 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5119 		/* make sure no timer is running */
5120 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5121 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5122 		SCTP_TCB_UNLOCK(stcb);
5123 	} else {
5124 		/* Update how much we have pending */
5125 		stcb->freed_by_sorcv_sincelast = dif;
5126 	}
5127 out:
5128 	if (so && r_unlocked && hold_rlock) {
5129 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5130 	}
5131 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5132 no_lock:
5133 	atomic_add_int(&stcb->asoc.refcnt, -1);
5134 	return;
5135 }
5136 
5137 int
5138 sctp_sorecvmsg(struct socket *so,
5139     struct uio *uio,
5140     struct mbuf **mp,
5141     struct sockaddr *from,
5142     int fromlen,
5143     int *msg_flags,
5144     struct sctp_sndrcvinfo *sinfo,
5145     int filling_sinfo)
5146 {
5147 	/*
5148 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5149 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5150 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5151 	 * On the way out we may send out any combination of:
5152 	 * MSG_NOTIFICATION MSG_EOR
5153 	 *
5154 	 */
5155 	struct sctp_inpcb *inp = NULL;
5156 	int my_len = 0;
5157 	int cp_len = 0, error = 0;
5158 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5159 	struct mbuf *m = NULL;
5160 	struct sctp_tcb *stcb = NULL;
5161 	int wakeup_read_socket = 0;
5162 	int freecnt_applied = 0;
5163 	int out_flags = 0, in_flags = 0;
5164 	int block_allowed = 1;
5165 	uint32_t freed_so_far = 0;
5166 	uint32_t copied_so_far = 0;
5167 	int in_eeor_mode = 0;
5168 	int no_rcv_needed = 0;
5169 	uint32_t rwnd_req = 0;
5170 	int hold_sblock = 0;
5171 	int hold_rlock = 0;
5172 	ssize_t slen = 0;
5173 	uint32_t held_length = 0;
5174 	int sockbuf_lock = 0;
5175 
5176 	if (uio == NULL) {
5177 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5178 		return (EINVAL);
5179 	}
5180 	if (msg_flags) {
5181 		in_flags = *msg_flags;
5182 		if (in_flags & MSG_PEEK)
5183 			SCTP_STAT_INCR(sctps_read_peeks);
5184 	} else {
5185 		in_flags = 0;
5186 	}
5187 	slen = uio->uio_resid;
5188 
5189 	/* Pull in and set up our int flags */
5190 	if (in_flags & MSG_OOB) {
5191 		/* Out of band's NOT supported */
5192 		return (EOPNOTSUPP);
5193 	}
5194 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5195 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5196 		return (EINVAL);
5197 	}
5198 	if ((in_flags & (MSG_DONTWAIT
5199 	    | MSG_NBIO
5200 	    )) ||
5201 	    SCTP_SO_IS_NBIO(so)) {
5202 		block_allowed = 0;
5203 	}
5204 	/* setup the endpoint */
5205 	inp = (struct sctp_inpcb *)so->so_pcb;
5206 	if (inp == NULL) {
5207 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5208 		return (EFAULT);
5209 	}
5210 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5211 	/* Must be at least a MTU's worth */
5212 	if (rwnd_req < SCTP_MIN_RWND)
5213 		rwnd_req = SCTP_MIN_RWND;
5214 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5215 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5216 		sctp_misc_ints(SCTP_SORECV_ENTER,
5217 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5218 	}
5219 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5220 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5221 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5222 	}
5223 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5224 	if (error) {
5225 		goto release_unlocked;
5226 	}
5227 	sockbuf_lock = 1;
5228 restart:
5229 
5230 
5231 restart_nosblocks:
5232 	if (hold_sblock == 0) {
5233 		SOCKBUF_LOCK(&so->so_rcv);
5234 		hold_sblock = 1;
5235 	}
5236 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5237 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5238 		goto out;
5239 	}
5240 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5241 		if (so->so_error) {
5242 			error = so->so_error;
5243 			if ((in_flags & MSG_PEEK) == 0)
5244 				so->so_error = 0;
5245 			goto out;
5246 		} else {
5247 			if (so->so_rcv.sb_cc == 0) {
5248 				/* indicate EOF */
5249 				error = 0;
5250 				goto out;
5251 			}
5252 		}
5253 	}
5254 	if (so->so_rcv.sb_cc <= held_length) {
5255 		if (so->so_error) {
5256 			error = so->so_error;
5257 			if ((in_flags & MSG_PEEK) == 0) {
5258 				so->so_error = 0;
5259 			}
5260 			goto out;
5261 		}
5262 		if ((so->so_rcv.sb_cc == 0) &&
5263 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5264 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5265 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5266 				/*
5267 				 * For active open side clear flags for
5268 				 * re-use passive open is blocked by
5269 				 * connect.
5270 				 */
5271 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5272 					/*
5273 					 * You were aborted, passive side
5274 					 * always hits here
5275 					 */
5276 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5277 					error = ECONNRESET;
5278 				}
5279 				so->so_state &= ~(SS_ISCONNECTING |
5280 				    SS_ISDISCONNECTING |
5281 				    SS_ISCONFIRMING |
5282 				    SS_ISCONNECTED);
5283 				if (error == 0) {
5284 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5285 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5286 						error = ENOTCONN;
5287 					}
5288 				}
5289 				goto out;
5290 			}
5291 		}
5292 		if (block_allowed) {
5293 			error = sbwait(&so->so_rcv);
5294 			if (error) {
5295 				goto out;
5296 			}
5297 			held_length = 0;
5298 			goto restart_nosblocks;
5299 		} else {
5300 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5301 			error = EWOULDBLOCK;
5302 			goto out;
5303 		}
5304 	}
5305 	if (hold_sblock == 1) {
5306 		SOCKBUF_UNLOCK(&so->so_rcv);
5307 		hold_sblock = 0;
5308 	}
5309 	/* we possibly have data we can read */
5310 	/* sa_ignore FREED_MEMORY */
5311 	control = TAILQ_FIRST(&inp->read_queue);
5312 	if (control == NULL) {
5313 		/*
5314 		 * This could be happening since the appender did the
5315 		 * increment but as not yet did the tailq insert onto the
5316 		 * read_queue
5317 		 */
5318 		if (hold_rlock == 0) {
5319 			SCTP_INP_READ_LOCK(inp);
5320 		}
5321 		control = TAILQ_FIRST(&inp->read_queue);
5322 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5323 #ifdef INVARIANTS
5324 			panic("Huh, its non zero and nothing on control?");
5325 #endif
5326 			so->so_rcv.sb_cc = 0;
5327 		}
5328 		SCTP_INP_READ_UNLOCK(inp);
5329 		hold_rlock = 0;
5330 		goto restart;
5331 	}
5332 	if ((control->length == 0) &&
5333 	    (control->do_not_ref_stcb)) {
5334 		/*
5335 		 * Clean up code for freeing assoc that left behind a
5336 		 * pdapi.. maybe a peer in EEOR that just closed after
5337 		 * sending and never indicated a EOR.
5338 		 */
5339 		if (hold_rlock == 0) {
5340 			hold_rlock = 1;
5341 			SCTP_INP_READ_LOCK(inp);
5342 		}
5343 		control->held_length = 0;
5344 		if (control->data) {
5345 			/* Hmm there is data here .. fix */
5346 			struct mbuf *m_tmp;
5347 			int cnt = 0;
5348 
5349 			m_tmp = control->data;
5350 			while (m_tmp) {
5351 				cnt += SCTP_BUF_LEN(m_tmp);
5352 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5353 					control->tail_mbuf = m_tmp;
5354 					control->end_added = 1;
5355 				}
5356 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5357 			}
5358 			control->length = cnt;
5359 		} else {
5360 			/* remove it */
5361 			TAILQ_REMOVE(&inp->read_queue, control, next);
5362 			/* Add back any hiddend data */
5363 			sctp_free_remote_addr(control->whoFrom);
5364 			sctp_free_a_readq(stcb, control);
5365 		}
5366 		if (hold_rlock) {
5367 			hold_rlock = 0;
5368 			SCTP_INP_READ_UNLOCK(inp);
5369 		}
5370 		goto restart;
5371 	}
5372 	if ((control->length == 0) &&
5373 	    (control->end_added == 1)) {
5374 		/*
5375 		 * Do we also need to check for (control->pdapi_aborted ==
5376 		 * 1)?
5377 		 */
5378 		if (hold_rlock == 0) {
5379 			hold_rlock = 1;
5380 			SCTP_INP_READ_LOCK(inp);
5381 		}
5382 		TAILQ_REMOVE(&inp->read_queue, control, next);
5383 		if (control->data) {
5384 #ifdef INVARIANTS
5385 			panic("control->data not null but control->length == 0");
5386 #else
5387 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5388 			sctp_m_freem(control->data);
5389 			control->data = NULL;
5390 #endif
5391 		}
5392 		if (control->aux_data) {
5393 			sctp_m_free(control->aux_data);
5394 			control->aux_data = NULL;
5395 		}
5396 #ifdef INVARIANTS
5397 		if (control->on_strm_q) {
5398 			panic("About to free ctl:%p so:%p and its in %d",
5399 			    control, so, control->on_strm_q);
5400 		}
5401 #endif
5402 		sctp_free_remote_addr(control->whoFrom);
5403 		sctp_free_a_readq(stcb, control);
5404 		if (hold_rlock) {
5405 			hold_rlock = 0;
5406 			SCTP_INP_READ_UNLOCK(inp);
5407 		}
5408 		goto restart;
5409 	}
5410 	if (control->length == 0) {
5411 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5412 		    (filling_sinfo)) {
5413 			/* find a more suitable one then this */
5414 			ctl = TAILQ_NEXT(control, next);
5415 			while (ctl) {
5416 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5417 				    (ctl->some_taken ||
5418 				    (ctl->spec_flags & M_NOTIFICATION) ||
5419 				    ((ctl->do_not_ref_stcb == 0) &&
5420 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5421 				    ) {
5422 					/*-
5423 					 * If we have a different TCB next, and there is data
5424 					 * present. If we have already taken some (pdapi), OR we can
5425 					 * ref the tcb and no delivery as started on this stream, we
5426 					 * take it. Note we allow a notification on a different
5427 					 * assoc to be delivered..
5428 					 */
5429 					control = ctl;
5430 					goto found_one;
5431 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5432 					    (ctl->length) &&
5433 					    ((ctl->some_taken) ||
5434 					    ((ctl->do_not_ref_stcb == 0) &&
5435 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5436 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5437 					/*-
5438 					 * If we have the same tcb, and there is data present, and we
5439 					 * have the strm interleave feature present. Then if we have
5440 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5441 					 * not started a delivery for this stream, we can take it.
5442 					 * Note we do NOT allow a notificaiton on the same assoc to
5443 					 * be delivered.
5444 					 */
5445 					control = ctl;
5446 					goto found_one;
5447 				}
5448 				ctl = TAILQ_NEXT(ctl, next);
5449 			}
5450 		}
5451 		/*
5452 		 * if we reach here, not suitable replacement is available
5453 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5454 		 * into the our held count, and its time to sleep again.
5455 		 */
5456 		held_length = so->so_rcv.sb_cc;
5457 		control->held_length = so->so_rcv.sb_cc;
5458 		goto restart;
5459 	}
5460 	/* Clear the held length since there is something to read */
5461 	control->held_length = 0;
5462 found_one:
5463 	/*
5464 	 * If we reach here, control has a some data for us to read off.
5465 	 * Note that stcb COULD be NULL.
5466 	 */
5467 	if (hold_rlock == 0) {
5468 		hold_rlock = 1;
5469 		SCTP_INP_READ_LOCK(inp);
5470 	}
5471 	control->some_taken++;
5472 	stcb = control->stcb;
5473 	if (stcb) {
5474 		if ((control->do_not_ref_stcb == 0) &&
5475 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5476 			if (freecnt_applied == 0)
5477 				stcb = NULL;
5478 		} else if (control->do_not_ref_stcb == 0) {
5479 			/* you can't free it on me please */
5480 			/*
5481 			 * The lock on the socket buffer protects us so the
5482 			 * free code will stop. But since we used the
5483 			 * socketbuf lock and the sender uses the tcb_lock
5484 			 * to increment, we need to use the atomic add to
5485 			 * the refcnt
5486 			 */
5487 			if (freecnt_applied) {
5488 #ifdef INVARIANTS
5489 				panic("refcnt already incremented");
5490 #else
5491 				SCTP_PRINTF("refcnt already incremented?\n");
5492 #endif
5493 			} else {
5494 				atomic_add_int(&stcb->asoc.refcnt, 1);
5495 				freecnt_applied = 1;
5496 			}
5497 			/*
5498 			 * Setup to remember how much we have not yet told
5499 			 * the peer our rwnd has opened up. Note we grab the
5500 			 * value from the tcb from last time. Note too that
5501 			 * sack sending clears this when a sack is sent,
5502 			 * which is fine. Once we hit the rwnd_req, we then
5503 			 * will go to the sctp_user_rcvd() that will not
5504 			 * lock until it KNOWs it MUST send a WUP-SACK.
5505 			 */
5506 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5507 			stcb->freed_by_sorcv_sincelast = 0;
5508 		}
5509 	}
5510 	if (stcb &&
5511 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5512 	    control->do_not_ref_stcb == 0) {
5513 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5514 	}
5515 	/* First lets get off the sinfo and sockaddr info */
5516 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5517 		sinfo->sinfo_stream = control->sinfo_stream;
5518 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5519 		sinfo->sinfo_flags = control->sinfo_flags;
5520 		sinfo->sinfo_ppid = control->sinfo_ppid;
5521 		sinfo->sinfo_context = control->sinfo_context;
5522 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5523 		sinfo->sinfo_tsn = control->sinfo_tsn;
5524 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5525 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5526 		nxt = TAILQ_NEXT(control, next);
5527 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5528 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5529 			struct sctp_extrcvinfo *s_extra;
5530 
5531 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5532 			if ((nxt) &&
5533 			    (nxt->length)) {
5534 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5535 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5536 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5537 				}
5538 				if (nxt->spec_flags & M_NOTIFICATION) {
5539 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5540 				}
5541 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5542 				s_extra->serinfo_next_length = nxt->length;
5543 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5544 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5545 				if (nxt->tail_mbuf != NULL) {
5546 					if (nxt->end_added) {
5547 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5548 					}
5549 				}
5550 			} else {
5551 				/*
5552 				 * we explicitly 0 this, since the memcpy
5553 				 * got some other things beyond the older
5554 				 * sinfo_ that is on the control's structure
5555 				 * :-D
5556 				 */
5557 				nxt = NULL;
5558 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5559 				s_extra->serinfo_next_aid = 0;
5560 				s_extra->serinfo_next_length = 0;
5561 				s_extra->serinfo_next_ppid = 0;
5562 				s_extra->serinfo_next_stream = 0;
5563 			}
5564 		}
5565 		/*
5566 		 * update off the real current cum-ack, if we have an stcb.
5567 		 */
5568 		if ((control->do_not_ref_stcb == 0) && stcb)
5569 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5570 		/*
5571 		 * mask off the high bits, we keep the actual chunk bits in
5572 		 * there.
5573 		 */
5574 		sinfo->sinfo_flags &= 0x00ff;
5575 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5576 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5577 		}
5578 	}
5579 #ifdef SCTP_ASOCLOG_OF_TSNS
5580 	{
5581 		int index, newindex;
5582 		struct sctp_pcbtsn_rlog *entry;
5583 
5584 		do {
5585 			index = inp->readlog_index;
5586 			newindex = index + 1;
5587 			if (newindex >= SCTP_READ_LOG_SIZE) {
5588 				newindex = 0;
5589 			}
5590 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5591 		entry = &inp->readlog[index];
5592 		entry->vtag = control->sinfo_assoc_id;
5593 		entry->strm = control->sinfo_stream;
5594 		entry->seq = (uint16_t)control->mid;
5595 		entry->sz = control->length;
5596 		entry->flgs = control->sinfo_flags;
5597 	}
5598 #endif
5599 	if ((fromlen > 0) && (from != NULL)) {
5600 		union sctp_sockstore store;
5601 		size_t len;
5602 
5603 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5604 #ifdef INET6
5605 		case AF_INET6:
5606 			len = sizeof(struct sockaddr_in6);
5607 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5608 			store.sin6.sin6_port = control->port_from;
5609 			break;
5610 #endif
5611 #ifdef INET
5612 		case AF_INET:
5613 #ifdef INET6
5614 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5615 				len = sizeof(struct sockaddr_in6);
5616 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5617 				    &store.sin6);
5618 				store.sin6.sin6_port = control->port_from;
5619 			} else {
5620 				len = sizeof(struct sockaddr_in);
5621 				store.sin = control->whoFrom->ro._l_addr.sin;
5622 				store.sin.sin_port = control->port_from;
5623 			}
5624 #else
5625 			len = sizeof(struct sockaddr_in);
5626 			store.sin = control->whoFrom->ro._l_addr.sin;
5627 			store.sin.sin_port = control->port_from;
5628 #endif
5629 			break;
5630 #endif
5631 		default:
5632 			len = 0;
5633 			break;
5634 		}
5635 		memcpy(from, &store, min((size_t)fromlen, len));
5636 #ifdef INET6
5637 		{
5638 			struct sockaddr_in6 lsa6, *from6;
5639 
5640 			from6 = (struct sockaddr_in6 *)from;
5641 			sctp_recover_scope_mac(from6, (&lsa6));
5642 		}
5643 #endif
5644 	}
5645 	if (hold_rlock) {
5646 		SCTP_INP_READ_UNLOCK(inp);
5647 		hold_rlock = 0;
5648 	}
5649 	if (hold_sblock) {
5650 		SOCKBUF_UNLOCK(&so->so_rcv);
5651 		hold_sblock = 0;
5652 	}
5653 	/* now copy out what data we can */
5654 	if (mp == NULL) {
5655 		/* copy out each mbuf in the chain up to length */
5656 get_more_data:
5657 		m = control->data;
5658 		while (m) {
5659 			/* Move out all we can */
5660 			cp_len = (int)uio->uio_resid;
5661 			my_len = (int)SCTP_BUF_LEN(m);
5662 			if (cp_len > my_len) {
5663 				/* not enough in this buf */
5664 				cp_len = my_len;
5665 			}
5666 			if (hold_rlock) {
5667 				SCTP_INP_READ_UNLOCK(inp);
5668 				hold_rlock = 0;
5669 			}
5670 			if (cp_len > 0)
5671 				error = uiomove(mtod(m, char *), cp_len, uio);
5672 			/* re-read */
5673 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5674 				goto release;
5675 			}
5676 			if ((control->do_not_ref_stcb == 0) && stcb &&
5677 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5678 				no_rcv_needed = 1;
5679 			}
5680 			if (error) {
5681 				/* error we are out of here */
5682 				goto release;
5683 			}
5684 			SCTP_INP_READ_LOCK(inp);
5685 			hold_rlock = 1;
5686 			if (cp_len == SCTP_BUF_LEN(m)) {
5687 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5688 				    (control->end_added)) {
5689 					out_flags |= MSG_EOR;
5690 					if ((control->do_not_ref_stcb == 0) &&
5691 					    (control->stcb != NULL) &&
5692 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5693 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5694 				}
5695 				if (control->spec_flags & M_NOTIFICATION) {
5696 					out_flags |= MSG_NOTIFICATION;
5697 				}
5698 				/* we ate up the mbuf */
5699 				if (in_flags & MSG_PEEK) {
5700 					/* just looking */
5701 					m = SCTP_BUF_NEXT(m);
5702 					copied_so_far += cp_len;
5703 				} else {
5704 					/* dispose of the mbuf */
5705 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5706 						sctp_sblog(&so->so_rcv,
5707 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5708 					}
5709 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5710 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5711 						sctp_sblog(&so->so_rcv,
5712 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5713 					}
5714 					copied_so_far += cp_len;
5715 					freed_so_far += cp_len;
5716 					freed_so_far += MSIZE;
5717 					atomic_subtract_int(&control->length, cp_len);
5718 					control->data = sctp_m_free(m);
5719 					m = control->data;
5720 					/*
5721 					 * been through it all, must hold sb
5722 					 * lock ok to null tail
5723 					 */
5724 					if (control->data == NULL) {
5725 #ifdef INVARIANTS
5726 						if ((control->end_added == 0) ||
5727 						    (TAILQ_NEXT(control, next) == NULL)) {
5728 							/*
5729 							 * If the end is not
5730 							 * added, OR the
5731 							 * next is NOT null
5732 							 * we MUST have the
5733 							 * lock.
5734 							 */
5735 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5736 								panic("Hmm we don't own the lock?");
5737 							}
5738 						}
5739 #endif
5740 						control->tail_mbuf = NULL;
5741 #ifdef INVARIANTS
5742 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5743 							panic("end_added, nothing left and no MSG_EOR");
5744 						}
5745 #endif
5746 					}
5747 				}
5748 			} else {
5749 				/* Do we need to trim the mbuf? */
5750 				if (control->spec_flags & M_NOTIFICATION) {
5751 					out_flags |= MSG_NOTIFICATION;
5752 				}
5753 				if ((in_flags & MSG_PEEK) == 0) {
5754 					SCTP_BUF_RESV_UF(m, cp_len);
5755 					SCTP_BUF_LEN(m) -= cp_len;
5756 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5757 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5758 					}
5759 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5760 					if ((control->do_not_ref_stcb == 0) &&
5761 					    stcb) {
5762 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5763 					}
5764 					copied_so_far += cp_len;
5765 					freed_so_far += cp_len;
5766 					freed_so_far += MSIZE;
5767 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5768 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5769 						    SCTP_LOG_SBRESULT, 0);
5770 					}
5771 					atomic_subtract_int(&control->length, cp_len);
5772 				} else {
5773 					copied_so_far += cp_len;
5774 				}
5775 			}
5776 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5777 				break;
5778 			}
5779 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5780 			    (control->do_not_ref_stcb == 0) &&
5781 			    (freed_so_far >= rwnd_req)) {
5782 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5783 			}
5784 		}		/* end while(m) */
5785 		/*
5786 		 * At this point we have looked at it all and we either have
5787 		 * a MSG_EOR/or read all the user wants... <OR>
5788 		 * control->length == 0.
5789 		 */
5790 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5791 			/* we are done with this control */
5792 			if (control->length == 0) {
5793 				if (control->data) {
5794 #ifdef INVARIANTS
5795 					panic("control->data not null at read eor?");
5796 #else
5797 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5798 					sctp_m_freem(control->data);
5799 					control->data = NULL;
5800 #endif
5801 				}
5802 		done_with_control:
5803 				if (hold_rlock == 0) {
5804 					SCTP_INP_READ_LOCK(inp);
5805 					hold_rlock = 1;
5806 				}
5807 				TAILQ_REMOVE(&inp->read_queue, control, next);
5808 				/* Add back any hiddend data */
5809 				if (control->held_length) {
5810 					held_length = 0;
5811 					control->held_length = 0;
5812 					wakeup_read_socket = 1;
5813 				}
5814 				if (control->aux_data) {
5815 					sctp_m_free(control->aux_data);
5816 					control->aux_data = NULL;
5817 				}
5818 				no_rcv_needed = control->do_not_ref_stcb;
5819 				sctp_free_remote_addr(control->whoFrom);
5820 				control->data = NULL;
5821 #ifdef INVARIANTS
5822 				if (control->on_strm_q) {
5823 					panic("About to free ctl:%p so:%p and its in %d",
5824 					    control, so, control->on_strm_q);
5825 				}
5826 #endif
5827 				sctp_free_a_readq(stcb, control);
5828 				control = NULL;
5829 				if ((freed_so_far >= rwnd_req) &&
5830 				    (no_rcv_needed == 0))
5831 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5832 
5833 			} else {
5834 				/*
5835 				 * The user did not read all of this
5836 				 * message, turn off the returned MSG_EOR
5837 				 * since we are leaving more behind on the
5838 				 * control to read.
5839 				 */
5840 #ifdef INVARIANTS
5841 				if (control->end_added &&
5842 				    (control->data == NULL) &&
5843 				    (control->tail_mbuf == NULL)) {
5844 					panic("Gak, control->length is corrupt?");
5845 				}
5846 #endif
5847 				no_rcv_needed = control->do_not_ref_stcb;
5848 				out_flags &= ~MSG_EOR;
5849 			}
5850 		}
5851 		if (out_flags & MSG_EOR) {
5852 			goto release;
5853 		}
5854 		if ((uio->uio_resid == 0) ||
5855 		    ((in_eeor_mode) &&
5856 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5857 			goto release;
5858 		}
5859 		/*
5860 		 * If I hit here the receiver wants more and this message is
5861 		 * NOT done (pd-api). So two questions. Can we block? if not
5862 		 * we are done. Did the user NOT set MSG_WAITALL?
5863 		 */
5864 		if (block_allowed == 0) {
5865 			goto release;
5866 		}
5867 		/*
5868 		 * We need to wait for more data a few things: - We don't
5869 		 * sbunlock() so we don't get someone else reading. - We
5870 		 * must be sure to account for the case where what is added
5871 		 * is NOT to our control when we wakeup.
5872 		 */
5873 
5874 		/*
5875 		 * Do we need to tell the transport a rwnd update might be
5876 		 * needed before we go to sleep?
5877 		 */
5878 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5879 		    ((freed_so_far >= rwnd_req) &&
5880 		    (control->do_not_ref_stcb == 0) &&
5881 		    (no_rcv_needed == 0))) {
5882 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5883 		}
5884 wait_some_more:
5885 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5886 			goto release;
5887 		}
5888 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5889 			goto release;
5890 
5891 		if (hold_rlock == 1) {
5892 			SCTP_INP_READ_UNLOCK(inp);
5893 			hold_rlock = 0;
5894 		}
5895 		if (hold_sblock == 0) {
5896 			SOCKBUF_LOCK(&so->so_rcv);
5897 			hold_sblock = 1;
5898 		}
5899 		if ((copied_so_far) && (control->length == 0) &&
5900 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5901 			goto release;
5902 		}
5903 		if (so->so_rcv.sb_cc <= control->held_length) {
5904 			error = sbwait(&so->so_rcv);
5905 			if (error) {
5906 				goto release;
5907 			}
5908 			control->held_length = 0;
5909 		}
5910 		if (hold_sblock) {
5911 			SOCKBUF_UNLOCK(&so->so_rcv);
5912 			hold_sblock = 0;
5913 		}
5914 		if (control->length == 0) {
5915 			/* still nothing here */
5916 			if (control->end_added == 1) {
5917 				/* he aborted, or is done i.e.did a shutdown */
5918 				out_flags |= MSG_EOR;
5919 				if (control->pdapi_aborted) {
5920 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5921 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5922 
5923 					out_flags |= MSG_TRUNC;
5924 				} else {
5925 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5926 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5927 				}
5928 				goto done_with_control;
5929 			}
5930 			if (so->so_rcv.sb_cc > held_length) {
5931 				control->held_length = so->so_rcv.sb_cc;
5932 				held_length = 0;
5933 			}
5934 			goto wait_some_more;
5935 		} else if (control->data == NULL) {
5936 			/*
5937 			 * we must re-sync since data is probably being
5938 			 * added
5939 			 */
5940 			SCTP_INP_READ_LOCK(inp);
5941 			if ((control->length > 0) && (control->data == NULL)) {
5942 				/*
5943 				 * big trouble.. we have the lock and its
5944 				 * corrupt?
5945 				 */
5946 #ifdef INVARIANTS
5947 				panic("Impossible data==NULL length !=0");
5948 #endif
5949 				out_flags |= MSG_EOR;
5950 				out_flags |= MSG_TRUNC;
5951 				control->length = 0;
5952 				SCTP_INP_READ_UNLOCK(inp);
5953 				goto done_with_control;
5954 			}
5955 			SCTP_INP_READ_UNLOCK(inp);
5956 			/* We will fall around to get more data */
5957 		}
5958 		goto get_more_data;
5959 	} else {
5960 		/*-
5961 		 * Give caller back the mbuf chain,
5962 		 * store in uio_resid the length
5963 		 */
5964 		wakeup_read_socket = 0;
5965 		if ((control->end_added == 0) ||
5966 		    (TAILQ_NEXT(control, next) == NULL)) {
5967 			/* Need to get rlock */
5968 			if (hold_rlock == 0) {
5969 				SCTP_INP_READ_LOCK(inp);
5970 				hold_rlock = 1;
5971 			}
5972 		}
5973 		if (control->end_added) {
5974 			out_flags |= MSG_EOR;
5975 			if ((control->do_not_ref_stcb == 0) &&
5976 			    (control->stcb != NULL) &&
5977 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5978 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5979 		}
5980 		if (control->spec_flags & M_NOTIFICATION) {
5981 			out_flags |= MSG_NOTIFICATION;
5982 		}
5983 		uio->uio_resid = control->length;
5984 		*mp = control->data;
5985 		m = control->data;
5986 		while (m) {
5987 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5988 				sctp_sblog(&so->so_rcv,
5989 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5990 			}
5991 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5992 			freed_so_far += SCTP_BUF_LEN(m);
5993 			freed_so_far += MSIZE;
5994 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5995 				sctp_sblog(&so->so_rcv,
5996 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5997 			}
5998 			m = SCTP_BUF_NEXT(m);
5999 		}
6000 		control->data = control->tail_mbuf = NULL;
6001 		control->length = 0;
6002 		if (out_flags & MSG_EOR) {
6003 			/* Done with this control */
6004 			goto done_with_control;
6005 		}
6006 	}
6007 release:
6008 	if (hold_rlock == 1) {
6009 		SCTP_INP_READ_UNLOCK(inp);
6010 		hold_rlock = 0;
6011 	}
6012 	if (hold_sblock == 1) {
6013 		SOCKBUF_UNLOCK(&so->so_rcv);
6014 		hold_sblock = 0;
6015 	}
6016 	sbunlock(&so->so_rcv);
6017 	sockbuf_lock = 0;
6018 
6019 release_unlocked:
6020 	if (hold_sblock) {
6021 		SOCKBUF_UNLOCK(&so->so_rcv);
6022 		hold_sblock = 0;
6023 	}
6024 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6025 		if ((freed_so_far >= rwnd_req) &&
6026 		    (control && (control->do_not_ref_stcb == 0)) &&
6027 		    (no_rcv_needed == 0))
6028 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6029 	}
6030 out:
6031 	if (msg_flags) {
6032 		*msg_flags = out_flags;
6033 	}
6034 	if (((out_flags & MSG_EOR) == 0) &&
6035 	    ((in_flags & MSG_PEEK) == 0) &&
6036 	    (sinfo) &&
6037 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6038 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6039 		struct sctp_extrcvinfo *s_extra;
6040 
6041 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6042 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6043 	}
6044 	if (hold_rlock == 1) {
6045 		SCTP_INP_READ_UNLOCK(inp);
6046 	}
6047 	if (hold_sblock) {
6048 		SOCKBUF_UNLOCK(&so->so_rcv);
6049 	}
6050 	if (sockbuf_lock) {
6051 		sbunlock(&so->so_rcv);
6052 	}
6053 	if (freecnt_applied) {
6054 		/*
6055 		 * The lock on the socket buffer protects us so the free
6056 		 * code will stop. But since we used the socketbuf lock and
6057 		 * the sender uses the tcb_lock to increment, we need to use
6058 		 * the atomic add to the refcnt.
6059 		 */
6060 		if (stcb == NULL) {
6061 #ifdef INVARIANTS
6062 			panic("stcb for refcnt has gone NULL?");
6063 			goto stage_left;
6064 #else
6065 			goto stage_left;
6066 #endif
6067 		}
6068 		/* Save the value back for next time */
6069 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6070 		atomic_add_int(&stcb->asoc.refcnt, -1);
6071 	}
6072 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6073 		if (stcb) {
6074 			sctp_misc_ints(SCTP_SORECV_DONE,
6075 			    freed_so_far,
6076 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6077 			    stcb->asoc.my_rwnd,
6078 			    so->so_rcv.sb_cc);
6079 		} else {
6080 			sctp_misc_ints(SCTP_SORECV_DONE,
6081 			    freed_so_far,
6082 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6083 			    0,
6084 			    so->so_rcv.sb_cc);
6085 		}
6086 	}
6087 stage_left:
6088 	if (wakeup_read_socket) {
6089 		sctp_sorwakeup(inp, so);
6090 	}
6091 	return (error);
6092 }
6093 
6094 
6095 #ifdef SCTP_MBUF_LOGGING
6096 struct mbuf *
6097 sctp_m_free(struct mbuf *m)
6098 {
6099 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6100 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6101 	}
6102 	return (m_free(m));
6103 }
6104 
6105 void
6106 sctp_m_freem(struct mbuf *mb)
6107 {
6108 	while (mb != NULL)
6109 		mb = sctp_m_free(mb);
6110 }
6111 
6112 #endif
6113 
6114 int
6115 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6116 {
6117 	/*
6118 	 * Given a local address. For all associations that holds the
6119 	 * address, request a peer-set-primary.
6120 	 */
6121 	struct sctp_ifa *ifa;
6122 	struct sctp_laddr *wi;
6123 
6124 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6125 	if (ifa == NULL) {
6126 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6127 		return (EADDRNOTAVAIL);
6128 	}
6129 	/*
6130 	 * Now that we have the ifa we must awaken the iterator with this
6131 	 * message.
6132 	 */
6133 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6134 	if (wi == NULL) {
6135 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6136 		return (ENOMEM);
6137 	}
6138 	/* Now incr the count and int wi structure */
6139 	SCTP_INCR_LADDR_COUNT();
6140 	memset(wi, 0, sizeof(*wi));
6141 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6142 	wi->ifa = ifa;
6143 	wi->action = SCTP_SET_PRIM_ADDR;
6144 	atomic_add_int(&ifa->refcount, 1);
6145 
6146 	/* Now add it to the work queue */
6147 	SCTP_WQ_ADDR_LOCK();
6148 	/*
6149 	 * Should this really be a tailq? As it is we will process the
6150 	 * newest first :-0
6151 	 */
6152 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6153 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6154 	    (struct sctp_inpcb *)NULL,
6155 	    (struct sctp_tcb *)NULL,
6156 	    (struct sctp_nets *)NULL);
6157 	SCTP_WQ_ADDR_UNLOCK();
6158 	return (0);
6159 }
6160 
6161 
6162 int
6163 sctp_soreceive(struct socket *so,
6164     struct sockaddr **psa,
6165     struct uio *uio,
6166     struct mbuf **mp0,
6167     struct mbuf **controlp,
6168     int *flagsp)
6169 {
6170 	int error, fromlen;
6171 	uint8_t sockbuf[256];
6172 	struct sockaddr *from;
6173 	struct sctp_extrcvinfo sinfo;
6174 	int filling_sinfo = 1;
6175 	struct sctp_inpcb *inp;
6176 
6177 	inp = (struct sctp_inpcb *)so->so_pcb;
6178 	/* pickup the assoc we are reading from */
6179 	if (inp == NULL) {
6180 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6181 		return (EINVAL);
6182 	}
6183 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6184 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6185 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6186 	    (controlp == NULL)) {
6187 		/* user does not want the sndrcv ctl */
6188 		filling_sinfo = 0;
6189 	}
6190 	if (psa) {
6191 		from = (struct sockaddr *)sockbuf;
6192 		fromlen = sizeof(sockbuf);
6193 		from->sa_len = 0;
6194 	} else {
6195 		from = NULL;
6196 		fromlen = 0;
6197 	}
6198 
6199 	if (filling_sinfo) {
6200 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6201 	}
6202 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6203 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6204 	if (controlp != NULL) {
6205 		/* copy back the sinfo in a CMSG format */
6206 		if (filling_sinfo)
6207 			*controlp = sctp_build_ctl_nchunk(inp,
6208 			    (struct sctp_sndrcvinfo *)&sinfo);
6209 		else
6210 			*controlp = NULL;
6211 	}
6212 	if (psa) {
6213 		/* copy back the address info */
6214 		if (from && from->sa_len) {
6215 			*psa = sodupsockaddr(from, M_NOWAIT);
6216 		} else {
6217 			*psa = NULL;
6218 		}
6219 	}
6220 	return (error);
6221 }
6222 
6223 
6224 
6225 
6226 
6227 int
6228 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6229     int totaddr, int *error)
6230 {
6231 	int added = 0;
6232 	int i;
6233 	struct sctp_inpcb *inp;
6234 	struct sockaddr *sa;
6235 	size_t incr = 0;
6236 #ifdef INET
6237 	struct sockaddr_in *sin;
6238 #endif
6239 #ifdef INET6
6240 	struct sockaddr_in6 *sin6;
6241 #endif
6242 
6243 	sa = addr;
6244 	inp = stcb->sctp_ep;
6245 	*error = 0;
6246 	for (i = 0; i < totaddr; i++) {
6247 		switch (sa->sa_family) {
6248 #ifdef INET
6249 		case AF_INET:
6250 			incr = sizeof(struct sockaddr_in);
6251 			sin = (struct sockaddr_in *)sa;
6252 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6253 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6254 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6255 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6256 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6257 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6258 				*error = EINVAL;
6259 				goto out_now;
6260 			}
6261 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6262 			    SCTP_DONOT_SETSCOPE,
6263 			    SCTP_ADDR_IS_CONFIRMED)) {
6264 				/* assoc gone no un-lock */
6265 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6266 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6267 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6268 				*error = ENOBUFS;
6269 				goto out_now;
6270 			}
6271 			added++;
6272 			break;
6273 #endif
6274 #ifdef INET6
6275 		case AF_INET6:
6276 			incr = sizeof(struct sockaddr_in6);
6277 			sin6 = (struct sockaddr_in6 *)sa;
6278 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6279 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6280 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6281 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6282 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6283 				*error = EINVAL;
6284 				goto out_now;
6285 			}
6286 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6287 			    SCTP_DONOT_SETSCOPE,
6288 			    SCTP_ADDR_IS_CONFIRMED)) {
6289 				/* assoc gone no un-lock */
6290 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6291 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6292 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6293 				*error = ENOBUFS;
6294 				goto out_now;
6295 			}
6296 			added++;
6297 			break;
6298 #endif
6299 		default:
6300 			break;
6301 		}
6302 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6303 	}
6304 out_now:
6305 	return (added);
6306 }
6307 
6308 struct sctp_tcb *
6309 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6310     unsigned int *totaddr,
6311     unsigned int *num_v4, unsigned int *num_v6, int *error,
6312     unsigned int limit, int *bad_addr)
6313 {
6314 	struct sockaddr *sa;
6315 	struct sctp_tcb *stcb = NULL;
6316 	unsigned int incr, at, i;
6317 
6318 	at = 0;
6319 	sa = addr;
6320 	*error = *num_v6 = *num_v4 = 0;
6321 	/* account and validate addresses */
6322 	for (i = 0; i < *totaddr; i++) {
6323 		switch (sa->sa_family) {
6324 #ifdef INET
6325 		case AF_INET:
6326 			incr = (unsigned int)sizeof(struct sockaddr_in);
6327 			if (sa->sa_len != incr) {
6328 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6329 				*error = EINVAL;
6330 				*bad_addr = 1;
6331 				return (NULL);
6332 			}
6333 			(*num_v4) += 1;
6334 			break;
6335 #endif
6336 #ifdef INET6
6337 		case AF_INET6:
6338 			{
6339 				struct sockaddr_in6 *sin6;
6340 
6341 				sin6 = (struct sockaddr_in6 *)sa;
6342 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6343 					/* Must be non-mapped for connectx */
6344 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6345 					*error = EINVAL;
6346 					*bad_addr = 1;
6347 					return (NULL);
6348 				}
6349 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6350 				if (sa->sa_len != incr) {
6351 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6352 					*error = EINVAL;
6353 					*bad_addr = 1;
6354 					return (NULL);
6355 				}
6356 				(*num_v6) += 1;
6357 				break;
6358 			}
6359 #endif
6360 		default:
6361 			*totaddr = i;
6362 			incr = 0;
6363 			/* we are done */
6364 			break;
6365 		}
6366 		if (i == *totaddr) {
6367 			break;
6368 		}
6369 		SCTP_INP_INCR_REF(inp);
6370 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6371 		if (stcb != NULL) {
6372 			/* Already have or am bring up an association */
6373 			return (stcb);
6374 		} else {
6375 			SCTP_INP_DECR_REF(inp);
6376 		}
6377 		if ((at + incr) > limit) {
6378 			*totaddr = i;
6379 			break;
6380 		}
6381 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6382 	}
6383 	return ((struct sctp_tcb *)NULL);
6384 }
6385 
6386 /*
6387  * sctp_bindx(ADD) for one address.
6388  * assumes all arguments are valid/checked by caller.
6389  */
6390 void
6391 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6392     struct sockaddr *sa, sctp_assoc_t assoc_id,
6393     uint32_t vrf_id, int *error, void *p)
6394 {
6395 	struct sockaddr *addr_touse;
6396 #if defined(INET) && defined(INET6)
6397 	struct sockaddr_in sin;
6398 #endif
6399 
6400 	/* see if we're bound all already! */
6401 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6402 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6403 		*error = EINVAL;
6404 		return;
6405 	}
6406 	addr_touse = sa;
6407 #ifdef INET6
6408 	if (sa->sa_family == AF_INET6) {
6409 #ifdef INET
6410 		struct sockaddr_in6 *sin6;
6411 
6412 #endif
6413 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6414 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6415 			*error = EINVAL;
6416 			return;
6417 		}
6418 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6419 			/* can only bind v6 on PF_INET6 sockets */
6420 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6421 			*error = EINVAL;
6422 			return;
6423 		}
6424 #ifdef INET
6425 		sin6 = (struct sockaddr_in6 *)addr_touse;
6426 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6427 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6428 			    SCTP_IPV6_V6ONLY(inp)) {
6429 				/* can't bind v4-mapped on PF_INET sockets */
6430 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6431 				*error = EINVAL;
6432 				return;
6433 			}
6434 			in6_sin6_2_sin(&sin, sin6);
6435 			addr_touse = (struct sockaddr *)&sin;
6436 		}
6437 #endif
6438 	}
6439 #endif
6440 #ifdef INET
6441 	if (sa->sa_family == AF_INET) {
6442 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6443 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 			*error = EINVAL;
6445 			return;
6446 		}
6447 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6448 		    SCTP_IPV6_V6ONLY(inp)) {
6449 			/* can't bind v4 on PF_INET sockets */
6450 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451 			*error = EINVAL;
6452 			return;
6453 		}
6454 	}
6455 #endif
6456 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6457 		if (p == NULL) {
6458 			/* Can't get proc for Net/Open BSD */
6459 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6460 			*error = EINVAL;
6461 			return;
6462 		}
6463 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6464 		return;
6465 	}
6466 	/*
6467 	 * No locks required here since bind and mgmt_ep_sa all do their own
6468 	 * locking. If we do something for the FIX: below we may need to
6469 	 * lock in that case.
6470 	 */
6471 	if (assoc_id == 0) {
6472 		/* add the address */
6473 		struct sctp_inpcb *lep;
6474 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6475 
6476 		/* validate the incoming port */
6477 		if ((lsin->sin_port != 0) &&
6478 		    (lsin->sin_port != inp->sctp_lport)) {
6479 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6480 			*error = EINVAL;
6481 			return;
6482 		} else {
6483 			/* user specified 0 port, set it to existing port */
6484 			lsin->sin_port = inp->sctp_lport;
6485 		}
6486 
6487 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6488 		if (lep != NULL) {
6489 			/*
6490 			 * We must decrement the refcount since we have the
6491 			 * ep already and are binding. No remove going on
6492 			 * here.
6493 			 */
6494 			SCTP_INP_DECR_REF(lep);
6495 		}
6496 		if (lep == inp) {
6497 			/* already bound to it.. ok */
6498 			return;
6499 		} else if (lep == NULL) {
6500 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6501 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6502 			    SCTP_ADD_IP_ADDRESS,
6503 			    vrf_id, NULL);
6504 		} else {
6505 			*error = EADDRINUSE;
6506 		}
6507 		if (*error)
6508 			return;
6509 	} else {
6510 		/*
6511 		 * FIX: decide whether we allow assoc based bindx
6512 		 */
6513 	}
6514 }
6515 
6516 /*
6517  * sctp_bindx(DELETE) for one address.
6518  * assumes all arguments are valid/checked by caller.
6519  */
6520 void
6521 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6522     struct sockaddr *sa, sctp_assoc_t assoc_id,
6523     uint32_t vrf_id, int *error)
6524 {
6525 	struct sockaddr *addr_touse;
6526 #if defined(INET) && defined(INET6)
6527 	struct sockaddr_in sin;
6528 #endif
6529 
6530 	/* see if we're bound all already! */
6531 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6532 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6533 		*error = EINVAL;
6534 		return;
6535 	}
6536 	addr_touse = sa;
6537 #ifdef INET6
6538 	if (sa->sa_family == AF_INET6) {
6539 #ifdef INET
6540 		struct sockaddr_in6 *sin6;
6541 #endif
6542 
6543 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6544 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6545 			*error = EINVAL;
6546 			return;
6547 		}
6548 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6549 			/* can only bind v6 on PF_INET6 sockets */
6550 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6551 			*error = EINVAL;
6552 			return;
6553 		}
6554 #ifdef INET
6555 		sin6 = (struct sockaddr_in6 *)addr_touse;
6556 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6557 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6558 			    SCTP_IPV6_V6ONLY(inp)) {
6559 				/* can't bind mapped-v4 on PF_INET sockets */
6560 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6561 				*error = EINVAL;
6562 				return;
6563 			}
6564 			in6_sin6_2_sin(&sin, sin6);
6565 			addr_touse = (struct sockaddr *)&sin;
6566 		}
6567 #endif
6568 	}
6569 #endif
6570 #ifdef INET
6571 	if (sa->sa_family == AF_INET) {
6572 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6573 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6574 			*error = EINVAL;
6575 			return;
6576 		}
6577 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6578 		    SCTP_IPV6_V6ONLY(inp)) {
6579 			/* can't bind v4 on PF_INET sockets */
6580 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6581 			*error = EINVAL;
6582 			return;
6583 		}
6584 	}
6585 #endif
6586 	/*
6587 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6588 	 * below is ever changed we may need to lock before calling
6589 	 * association level binding.
6590 	 */
6591 	if (assoc_id == 0) {
6592 		/* delete the address */
6593 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6594 		    SCTP_DEL_IP_ADDRESS,
6595 		    vrf_id, NULL);
6596 	} else {
6597 		/*
6598 		 * FIX: decide whether we allow assoc based bindx
6599 		 */
6600 	}
6601 }
6602 
6603 /*
6604  * returns the valid local address count for an assoc, taking into account
6605  * all scoping rules
6606  */
6607 int
6608 sctp_local_addr_count(struct sctp_tcb *stcb)
6609 {
6610 	int loopback_scope;
6611 #if defined(INET)
6612 	int ipv4_local_scope, ipv4_addr_legal;
6613 #endif
6614 #if defined (INET6)
6615 	int local_scope, site_scope, ipv6_addr_legal;
6616 #endif
6617 	struct sctp_vrf *vrf;
6618 	struct sctp_ifn *sctp_ifn;
6619 	struct sctp_ifa *sctp_ifa;
6620 	int count = 0;
6621 
6622 	/* Turn on all the appropriate scopes */
6623 	loopback_scope = stcb->asoc.scope.loopback_scope;
6624 #if defined(INET)
6625 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6626 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6627 #endif
6628 #if defined(INET6)
6629 	local_scope = stcb->asoc.scope.local_scope;
6630 	site_scope = stcb->asoc.scope.site_scope;
6631 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6632 #endif
6633 	SCTP_IPI_ADDR_RLOCK();
6634 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6635 	if (vrf == NULL) {
6636 		/* no vrf, no addresses */
6637 		SCTP_IPI_ADDR_RUNLOCK();
6638 		return (0);
6639 	}
6640 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6641 		/*
6642 		 * bound all case: go through all ifns on the vrf
6643 		 */
6644 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6645 			if ((loopback_scope == 0) &&
6646 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6647 				continue;
6648 			}
6649 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6650 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6651 					continue;
6652 				switch (sctp_ifa->address.sa.sa_family) {
6653 #ifdef INET
6654 				case AF_INET:
6655 					if (ipv4_addr_legal) {
6656 						struct sockaddr_in *sin;
6657 
6658 						sin = &sctp_ifa->address.sin;
6659 						if (sin->sin_addr.s_addr == 0) {
6660 							/*
6661 							 * skip unspecified
6662 							 * addrs
6663 							 */
6664 							continue;
6665 						}
6666 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6667 						    &sin->sin_addr) != 0) {
6668 							continue;
6669 						}
6670 						if ((ipv4_local_scope == 0) &&
6671 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6672 							continue;
6673 						}
6674 						/* count this one */
6675 						count++;
6676 					} else {
6677 						continue;
6678 					}
6679 					break;
6680 #endif
6681 #ifdef INET6
6682 				case AF_INET6:
6683 					if (ipv6_addr_legal) {
6684 						struct sockaddr_in6 *sin6;
6685 
6686 						sin6 = &sctp_ifa->address.sin6;
6687 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6688 							continue;
6689 						}
6690 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6691 						    &sin6->sin6_addr) != 0) {
6692 							continue;
6693 						}
6694 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6695 							if (local_scope == 0)
6696 								continue;
6697 							if (sin6->sin6_scope_id == 0) {
6698 								if (sa6_recoverscope(sin6) != 0)
6699 									/*
6700 									 *
6701 									 * bad
6702 									 * link
6703 									 *
6704 									 * local
6705 									 *
6706 									 * address
6707 									 */
6708 									continue;
6709 							}
6710 						}
6711 						if ((site_scope == 0) &&
6712 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6713 							continue;
6714 						}
6715 						/* count this one */
6716 						count++;
6717 					}
6718 					break;
6719 #endif
6720 				default:
6721 					/* TSNH */
6722 					break;
6723 				}
6724 			}
6725 		}
6726 	} else {
6727 		/*
6728 		 * subset bound case
6729 		 */
6730 		struct sctp_laddr *laddr;
6731 
6732 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6733 		    sctp_nxt_addr) {
6734 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6735 				continue;
6736 			}
6737 			/* count this one */
6738 			count++;
6739 		}
6740 	}
6741 	SCTP_IPI_ADDR_RUNLOCK();
6742 	return (count);
6743 }
6744 
6745 #if defined(SCTP_LOCAL_TRACE_BUF)
6746 
6747 void
6748 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6749 {
6750 	uint32_t saveindex, newindex;
6751 
6752 	do {
6753 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6754 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6755 			newindex = 1;
6756 		} else {
6757 			newindex = saveindex + 1;
6758 		}
6759 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6760 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6761 		saveindex = 0;
6762 	}
6763 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6764 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6765 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6766 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6767 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6768 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6769 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6770 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6771 }
6772 
6773 #endif
6774 static void
6775 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6776     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6777 {
6778 	struct ip *iph;
6779 #ifdef INET6
6780 	struct ip6_hdr *ip6;
6781 #endif
6782 	struct mbuf *sp, *last;
6783 	struct udphdr *uhdr;
6784 	uint16_t port;
6785 
6786 	if ((m->m_flags & M_PKTHDR) == 0) {
6787 		/* Can't handle one that is not a pkt hdr */
6788 		goto out;
6789 	}
6790 	/* Pull the src port */
6791 	iph = mtod(m, struct ip *);
6792 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6793 	port = uhdr->uh_sport;
6794 	/*
6795 	 * Split out the mbuf chain. Leave the IP header in m, place the
6796 	 * rest in the sp.
6797 	 */
6798 	sp = m_split(m, off, M_NOWAIT);
6799 	if (sp == NULL) {
6800 		/* Gak, drop packet, we can't do a split */
6801 		goto out;
6802 	}
6803 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6804 		/* Gak, packet can't have an SCTP header in it - too small */
6805 		m_freem(sp);
6806 		goto out;
6807 	}
6808 	/* Now pull up the UDP header and SCTP header together */
6809 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6810 	if (sp == NULL) {
6811 		/* Gak pullup failed */
6812 		goto out;
6813 	}
6814 	/* Trim out the UDP header */
6815 	m_adj(sp, sizeof(struct udphdr));
6816 
6817 	/* Now reconstruct the mbuf chain */
6818 	for (last = m; last->m_next; last = last->m_next);
6819 	last->m_next = sp;
6820 	m->m_pkthdr.len += sp->m_pkthdr.len;
6821 	/*
6822 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6823 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6824 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6825 	 * SCTP checksum. Therefore, clear the bit.
6826 	 */
6827 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6828 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6829 	    m->m_pkthdr.len,
6830 	    if_name(m->m_pkthdr.rcvif),
6831 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6832 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6833 	iph = mtod(m, struct ip *);
6834 	switch (iph->ip_v) {
6835 #ifdef INET
6836 	case IPVERSION:
6837 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6838 		sctp_input_with_port(m, off, port);
6839 		break;
6840 #endif
6841 #ifdef INET6
6842 	case IPV6_VERSION >> 4:
6843 		ip6 = mtod(m, struct ip6_hdr *);
6844 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6845 		sctp6_input_with_port(&m, &off, port);
6846 		break;
6847 #endif
6848 	default:
6849 		goto out;
6850 		break;
6851 	}
6852 	return;
6853 out:
6854 	m_freem(m);
6855 }
6856 
6857 #ifdef INET
6858 static void
6859 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6860 {
6861 	struct ip *outer_ip, *inner_ip;
6862 	struct sctphdr *sh;
6863 	struct icmp *icmp;
6864 	struct udphdr *udp;
6865 	struct sctp_inpcb *inp;
6866 	struct sctp_tcb *stcb;
6867 	struct sctp_nets *net;
6868 	struct sctp_init_chunk *ch;
6869 	struct sockaddr_in src, dst;
6870 	uint8_t type, code;
6871 
6872 	inner_ip = (struct ip *)vip;
6873 	icmp = (struct icmp *)((caddr_t)inner_ip -
6874 	    (sizeof(struct icmp) - sizeof(struct ip)));
6875 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6876 	if (ntohs(outer_ip->ip_len) <
6877 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6878 		return;
6879 	}
6880 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6881 	sh = (struct sctphdr *)(udp + 1);
6882 	memset(&src, 0, sizeof(struct sockaddr_in));
6883 	src.sin_family = AF_INET;
6884 	src.sin_len = sizeof(struct sockaddr_in);
6885 	src.sin_port = sh->src_port;
6886 	src.sin_addr = inner_ip->ip_src;
6887 	memset(&dst, 0, sizeof(struct sockaddr_in));
6888 	dst.sin_family = AF_INET;
6889 	dst.sin_len = sizeof(struct sockaddr_in);
6890 	dst.sin_port = sh->dest_port;
6891 	dst.sin_addr = inner_ip->ip_dst;
6892 	/*
6893 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6894 	 * holds our local endpoint address. Thus we reverse the dst and the
6895 	 * src in the lookup.
6896 	 */
6897 	inp = NULL;
6898 	net = NULL;
6899 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6900 	    (struct sockaddr *)&src,
6901 	    &inp, &net, 1,
6902 	    SCTP_DEFAULT_VRFID);
6903 	if ((stcb != NULL) &&
6904 	    (net != NULL) &&
6905 	    (inp != NULL)) {
6906 		/* Check the UDP port numbers */
6907 		if ((udp->uh_dport != net->port) ||
6908 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6909 			SCTP_TCB_UNLOCK(stcb);
6910 			return;
6911 		}
6912 		/* Check the verification tag */
6913 		if (ntohl(sh->v_tag) != 0) {
6914 			/*
6915 			 * This must be the verification tag used for
6916 			 * sending out packets. We don't consider packets
6917 			 * reflecting the verification tag.
6918 			 */
6919 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6920 				SCTP_TCB_UNLOCK(stcb);
6921 				return;
6922 			}
6923 		} else {
6924 			if (ntohs(outer_ip->ip_len) >=
6925 			    sizeof(struct ip) +
6926 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6927 				/*
6928 				 * In this case we can check if we got an
6929 				 * INIT chunk and if the initiate tag
6930 				 * matches.
6931 				 */
6932 				ch = (struct sctp_init_chunk *)(sh + 1);
6933 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6934 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6935 					SCTP_TCB_UNLOCK(stcb);
6936 					return;
6937 				}
6938 			} else {
6939 				SCTP_TCB_UNLOCK(stcb);
6940 				return;
6941 			}
6942 		}
6943 		type = icmp->icmp_type;
6944 		code = icmp->icmp_code;
6945 		if ((type == ICMP_UNREACH) &&
6946 		    (code == ICMP_UNREACH_PORT)) {
6947 			code = ICMP_UNREACH_PROTOCOL;
6948 		}
6949 		sctp_notify(inp, stcb, net, type, code,
6950 		    ntohs(inner_ip->ip_len),
6951 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6952 	} else {
6953 		if ((stcb == NULL) && (inp != NULL)) {
6954 			/* reduce ref-count */
6955 			SCTP_INP_WLOCK(inp);
6956 			SCTP_INP_DECR_REF(inp);
6957 			SCTP_INP_WUNLOCK(inp);
6958 		}
6959 		if (stcb) {
6960 			SCTP_TCB_UNLOCK(stcb);
6961 		}
6962 	}
6963 	return;
6964 }
6965 #endif
6966 
6967 #ifdef INET6
6968 static void
6969 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6970 {
6971 	struct ip6ctlparam *ip6cp;
6972 	struct sctp_inpcb *inp;
6973 	struct sctp_tcb *stcb;
6974 	struct sctp_nets *net;
6975 	struct sctphdr sh;
6976 	struct udphdr udp;
6977 	struct sockaddr_in6 src, dst;
6978 	uint8_t type, code;
6979 
6980 	ip6cp = (struct ip6ctlparam *)d;
6981 	/*
6982 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6983 	 */
6984 	if (ip6cp->ip6c_m == NULL) {
6985 		return;
6986 	}
6987 	/*
6988 	 * Check if we can safely examine the ports and the verification tag
6989 	 * of the SCTP common header.
6990 	 */
6991 	if (ip6cp->ip6c_m->m_pkthdr.len <
6992 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
6993 		return;
6994 	}
6995 	/* Copy out the UDP header. */
6996 	memset(&udp, 0, sizeof(struct udphdr));
6997 	m_copydata(ip6cp->ip6c_m,
6998 	    ip6cp->ip6c_off,
6999 	    sizeof(struct udphdr),
7000 	    (caddr_t)&udp);
7001 	/* Copy out the port numbers and the verification tag. */
7002 	memset(&sh, 0, sizeof(struct sctphdr));
7003 	m_copydata(ip6cp->ip6c_m,
7004 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7005 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7006 	    (caddr_t)&sh);
7007 	memset(&src, 0, sizeof(struct sockaddr_in6));
7008 	src.sin6_family = AF_INET6;
7009 	src.sin6_len = sizeof(struct sockaddr_in6);
7010 	src.sin6_port = sh.src_port;
7011 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7012 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7013 		return;
7014 	}
7015 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7016 	dst.sin6_family = AF_INET6;
7017 	dst.sin6_len = sizeof(struct sockaddr_in6);
7018 	dst.sin6_port = sh.dest_port;
7019 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7020 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7021 		return;
7022 	}
7023 	inp = NULL;
7024 	net = NULL;
7025 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7026 	    (struct sockaddr *)&src,
7027 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7028 	if ((stcb != NULL) &&
7029 	    (net != NULL) &&
7030 	    (inp != NULL)) {
7031 		/* Check the UDP port numbers */
7032 		if ((udp.uh_dport != net->port) ||
7033 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7034 			SCTP_TCB_UNLOCK(stcb);
7035 			return;
7036 		}
7037 		/* Check the verification tag */
7038 		if (ntohl(sh.v_tag) != 0) {
7039 			/*
7040 			 * This must be the verification tag used for
7041 			 * sending out packets. We don't consider packets
7042 			 * reflecting the verification tag.
7043 			 */
7044 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7045 				SCTP_TCB_UNLOCK(stcb);
7046 				return;
7047 			}
7048 		} else {
7049 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7050 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7051 			    sizeof(struct sctphdr) +
7052 			    sizeof(struct sctp_chunkhdr) +
7053 			    offsetof(struct sctp_init, a_rwnd)) {
7054 				/*
7055 				 * In this case we can check if we got an
7056 				 * INIT chunk and if the initiate tag
7057 				 * matches.
7058 				 */
7059 				uint32_t initiate_tag;
7060 				uint8_t chunk_type;
7061 
7062 				m_copydata(ip6cp->ip6c_m,
7063 				    ip6cp->ip6c_off +
7064 				    sizeof(struct udphdr) +
7065 				    sizeof(struct sctphdr),
7066 				    sizeof(uint8_t),
7067 				    (caddr_t)&chunk_type);
7068 				m_copydata(ip6cp->ip6c_m,
7069 				    ip6cp->ip6c_off +
7070 				    sizeof(struct udphdr) +
7071 				    sizeof(struct sctphdr) +
7072 				    sizeof(struct sctp_chunkhdr),
7073 				    sizeof(uint32_t),
7074 				    (caddr_t)&initiate_tag);
7075 				if ((chunk_type != SCTP_INITIATION) ||
7076 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7077 					SCTP_TCB_UNLOCK(stcb);
7078 					return;
7079 				}
7080 			} else {
7081 				SCTP_TCB_UNLOCK(stcb);
7082 				return;
7083 			}
7084 		}
7085 		type = ip6cp->ip6c_icmp6->icmp6_type;
7086 		code = ip6cp->ip6c_icmp6->icmp6_code;
7087 		if ((type == ICMP6_DST_UNREACH) &&
7088 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7089 			type = ICMP6_PARAM_PROB;
7090 			code = ICMP6_PARAMPROB_NEXTHEADER;
7091 		}
7092 		sctp6_notify(inp, stcb, net, type, code,
7093 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7094 	} else {
7095 		if ((stcb == NULL) && (inp != NULL)) {
7096 			/* reduce inp's ref-count */
7097 			SCTP_INP_WLOCK(inp);
7098 			SCTP_INP_DECR_REF(inp);
7099 			SCTP_INP_WUNLOCK(inp);
7100 		}
7101 		if (stcb) {
7102 			SCTP_TCB_UNLOCK(stcb);
7103 		}
7104 	}
7105 }
7106 #endif
7107 
7108 void
7109 sctp_over_udp_stop(void)
7110 {
7111 	/*
7112 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7113 	 * for writting!
7114 	 */
7115 #ifdef INET
7116 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7117 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7118 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7119 	}
7120 #endif
7121 #ifdef INET6
7122 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7123 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7124 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7125 	}
7126 #endif
7127 }
7128 
7129 int
7130 sctp_over_udp_start(void)
7131 {
7132 	uint16_t port;
7133 	int ret;
7134 #ifdef INET
7135 	struct sockaddr_in sin;
7136 #endif
7137 #ifdef INET6
7138 	struct sockaddr_in6 sin6;
7139 #endif
7140 	/*
7141 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7142 	 * for writting!
7143 	 */
7144 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7145 	if (ntohs(port) == 0) {
7146 		/* Must have a port set */
7147 		return (EINVAL);
7148 	}
7149 #ifdef INET
7150 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7151 		/* Already running -- must stop first */
7152 		return (EALREADY);
7153 	}
7154 #endif
7155 #ifdef INET6
7156 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7157 		/* Already running -- must stop first */
7158 		return (EALREADY);
7159 	}
7160 #endif
7161 #ifdef INET
7162 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7163 	    SOCK_DGRAM, IPPROTO_UDP,
7164 	    curthread->td_ucred, curthread))) {
7165 		sctp_over_udp_stop();
7166 		return (ret);
7167 	}
7168 	/* Call the special UDP hook. */
7169 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7170 	    sctp_recv_udp_tunneled_packet,
7171 	    sctp_recv_icmp_tunneled_packet,
7172 	    NULL))) {
7173 		sctp_over_udp_stop();
7174 		return (ret);
7175 	}
7176 	/* Ok, we have a socket, bind it to the port. */
7177 	memset(&sin, 0, sizeof(struct sockaddr_in));
7178 	sin.sin_len = sizeof(struct sockaddr_in);
7179 	sin.sin_family = AF_INET;
7180 	sin.sin_port = htons(port);
7181 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7182 	    (struct sockaddr *)&sin, curthread))) {
7183 		sctp_over_udp_stop();
7184 		return (ret);
7185 	}
7186 #endif
7187 #ifdef INET6
7188 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7189 	    SOCK_DGRAM, IPPROTO_UDP,
7190 	    curthread->td_ucred, curthread))) {
7191 		sctp_over_udp_stop();
7192 		return (ret);
7193 	}
7194 	/* Call the special UDP hook. */
7195 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7196 	    sctp_recv_udp_tunneled_packet,
7197 	    sctp_recv_icmp6_tunneled_packet,
7198 	    NULL))) {
7199 		sctp_over_udp_stop();
7200 		return (ret);
7201 	}
7202 	/* Ok, we have a socket, bind it to the port. */
7203 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7204 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7205 	sin6.sin6_family = AF_INET6;
7206 	sin6.sin6_port = htons(port);
7207 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7208 	    (struct sockaddr *)&sin6, curthread))) {
7209 		sctp_over_udp_stop();
7210 		return (ret);
7211 	}
7212 #endif
7213 	return (0);
7214 }
7215 
7216 /*
7217  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7218  * If all arguments are zero, zero is returned.
7219  */
7220 uint32_t
7221 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7222 {
7223 	if (mtu1 > 0) {
7224 		if (mtu2 > 0) {
7225 			if (mtu3 > 0) {
7226 				return (min(mtu1, min(mtu2, mtu3)));
7227 			} else {
7228 				return (min(mtu1, mtu2));
7229 			}
7230 		} else {
7231 			if (mtu3 > 0) {
7232 				return (min(mtu1, mtu3));
7233 			} else {
7234 				return (mtu1);
7235 			}
7236 		}
7237 	} else {
7238 		if (mtu2 > 0) {
7239 			if (mtu3 > 0) {
7240 				return (min(mtu2, mtu3));
7241 			} else {
7242 				return (mtu2);
7243 			}
7244 		} else {
7245 			return (mtu3);
7246 		}
7247 	}
7248 }
7249 
7250 void
7251 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7252 {
7253 	struct in_conninfo inc;
7254 
7255 	memset(&inc, 0, sizeof(struct in_conninfo));
7256 	inc.inc_fibnum = fibnum;
7257 	switch (addr->sa.sa_family) {
7258 #ifdef INET
7259 	case AF_INET:
7260 		inc.inc_faddr = addr->sin.sin_addr;
7261 		break;
7262 #endif
7263 #ifdef INET6
7264 	case AF_INET6:
7265 		inc.inc_flags |= INC_ISIPV6;
7266 		inc.inc6_faddr = addr->sin6.sin6_addr;
7267 		break;
7268 #endif
7269 	default:
7270 		return;
7271 	}
7272 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7273 }
7274 
7275 uint32_t
7276 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7277 {
7278 	struct in_conninfo inc;
7279 
7280 	memset(&inc, 0, sizeof(struct in_conninfo));
7281 	inc.inc_fibnum = fibnum;
7282 	switch (addr->sa.sa_family) {
7283 #ifdef INET
7284 	case AF_INET:
7285 		inc.inc_faddr = addr->sin.sin_addr;
7286 		break;
7287 #endif
7288 #ifdef INET6
7289 	case AF_INET6:
7290 		inc.inc_flags |= INC_ISIPV6;
7291 		inc.inc6_faddr = addr->sin6.sin6_addr;
7292 		break;
7293 #endif
7294 	default:
7295 		return (0);
7296 	}
7297 	return ((uint32_t)tcp_hc_getmtu(&inc));
7298 }
7299