xref: /freebsd/sys/netinet/sctputil.c (revision 40427cca7a9ae77b095936fb1954417c290cfb17)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61 
62 
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66 
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69 
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 	struct sctp_cwnd_log sctp_clog;
74 
75 	sctp_clog.x.sb.stcb = stcb;
76 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 	if (stcb)
78 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 	else
80 		sctp_clog.x.sb.stcb_sbcc = 0;
81 	sctp_clog.x.sb.incr = incr;
82 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 	    SCTP_LOG_EVENT_SB,
84 	    from,
85 	    sctp_clog.x.misc.log1,
86 	    sctp_clog.x.misc.log2,
87 	    sctp_clog.x.misc.log3,
88 	    sctp_clog.x.misc.log4);
89 }
90 
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 	struct sctp_cwnd_log sctp_clog;
95 
96 	sctp_clog.x.close.inp = (void *)inp;
97 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 	if (stcb) {
99 		sctp_clog.x.close.stcb = (void *)stcb;
100 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 	} else {
102 		sctp_clog.x.close.stcb = 0;
103 		sctp_clog.x.close.state = 0;
104 	}
105 	sctp_clog.x.close.loc = loc;
106 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 	    SCTP_LOG_EVENT_CLOSE,
108 	    0,
109 	    sctp_clog.x.misc.log1,
110 	    sctp_clog.x.misc.log2,
111 	    sctp_clog.x.misc.log3,
112 	    sctp_clog.x.misc.log4);
113 }
114 
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 	struct sctp_cwnd_log sctp_clog;
119 
120 	memset(&sctp_clog, 0, sizeof(sctp_clog));
121 	sctp_clog.x.rto.net = (void *)net;
122 	sctp_clog.x.rto.rtt = net->rtt / 1000;
123 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 	    SCTP_LOG_EVENT_RTT,
125 	    from,
126 	    sctp_clog.x.misc.log1,
127 	    sctp_clog.x.misc.log2,
128 	    sctp_clog.x.misc.log3,
129 	    sctp_clog.x.misc.log4);
130 }
131 
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 	struct sctp_cwnd_log sctp_clog;
136 
137 	sctp_clog.x.strlog.stcb = stcb;
138 	sctp_clog.x.strlog.n_tsn = tsn;
139 	sctp_clog.x.strlog.n_sseq = sseq;
140 	sctp_clog.x.strlog.e_tsn = 0;
141 	sctp_clog.x.strlog.e_sseq = 0;
142 	sctp_clog.x.strlog.strm = stream;
143 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 	    SCTP_LOG_EVENT_STRM,
145 	    from,
146 	    sctp_clog.x.misc.log1,
147 	    sctp_clog.x.misc.log2,
148 	    sctp_clog.x.misc.log3,
149 	    sctp_clog.x.misc.log4);
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 	struct sctp_cwnd_log sctp_clog;
212 
213 	memset(&sctp_clog, 0, sizeof(sctp_clog));
214 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 	sctp_clog.x.fr.tsn = tsn;
217 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 	    SCTP_LOG_EVENT_FR,
219 	    from,
220 	    sctp_clog.x.misc.log1,
221 	    sctp_clog.x.misc.log2,
222 	    sctp_clog.x.misc.log3,
223 	    sctp_clog.x.misc.log4);
224 }
225 
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 	struct sctp_cwnd_log sctp_clog;
231 
232 	sctp_clog.x.mb.mp = m;
233 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 	if (SCTP_BUF_IS_EXTENDED(m)) {
237 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 	} else {
240 		sctp_clog.x.mb.ext = 0;
241 		sctp_clog.x.mb.refcnt = 0;
242 	}
243 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 	    SCTP_LOG_EVENT_MBUF,
245 	    from,
246 	    sctp_clog.x.misc.log1,
247 	    sctp_clog.x.misc.log2,
248 	    sctp_clog.x.misc.log3,
249 	    sctp_clog.x.misc.log4);
250 }
251 
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 	struct mbuf *mat;
256 
257 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 		sctp_log_mb(mat, from);
259 	}
260 }
261 #endif
262 
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 	struct sctp_cwnd_log sctp_clog;
267 
268 	if (control == NULL) {
269 		SCTP_PRINTF("Gak log of NULL?\n");
270 		return;
271 	}
272 	sctp_clog.x.strlog.stcb = control->stcb;
273 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 	sctp_clog.x.strlog.strm = control->sinfo_stream;
276 	if (poschk != NULL) {
277 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 	} else {
280 		sctp_clog.x.strlog.e_tsn = 0;
281 		sctp_clog.x.strlog.e_sseq = 0;
282 	}
283 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 	    SCTP_LOG_EVENT_STRM,
285 	    from,
286 	    sctp_clog.x.misc.log1,
287 	    sctp_clog.x.misc.log2,
288 	    sctp_clog.x.misc.log3,
289 	    sctp_clog.x.misc.log4);
290 }
291 
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 	struct sctp_cwnd_log sctp_clog;
296 
297 	sctp_clog.x.cwnd.net = net;
298 	if (stcb->asoc.send_queue_cnt > 255)
299 		sctp_clog.x.cwnd.cnt_in_send = 255;
300 	else
301 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 	if (stcb->asoc.stream_queue_cnt > 255)
303 		sctp_clog.x.cwnd.cnt_in_str = 255;
304 	else
305 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306 
307 	if (net) {
308 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 		sctp_clog.x.cwnd.inflight = net->flight_size;
310 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 	}
314 	if (SCTP_CWNDLOG_PRESEND == from) {
315 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 	}
317 	sctp_clog.x.cwnd.cwnd_augment = augment;
318 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 	    SCTP_LOG_EVENT_CWND,
320 	    from,
321 	    sctp_clog.x.misc.log1,
322 	    sctp_clog.x.misc.log2,
323 	    sctp_clog.x.misc.log3,
324 	    sctp_clog.x.misc.log4);
325 }
326 
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 	struct sctp_cwnd_log sctp_clog;
331 
332 	memset(&sctp_clog, 0, sizeof(sctp_clog));
333 	if (inp) {
334 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335 
336 	} else {
337 		sctp_clog.x.lock.sock = (void *)NULL;
338 	}
339 	sctp_clog.x.lock.inp = (void *)inp;
340 	if (stcb) {
341 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 	} else {
343 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 	}
345 	if (inp) {
346 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 	} else {
349 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 	if (inp && (inp->sctp_socket)) {
354 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 	} else {
358 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 	}
362 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 	    SCTP_LOG_LOCK_EVENT,
364 	    from,
365 	    sctp_clog.x.misc.log1,
366 	    sctp_clog.x.misc.log2,
367 	    sctp_clog.x.misc.log3,
368 	    sctp_clog.x.misc.log4);
369 }
370 
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 	struct sctp_cwnd_log sctp_clog;
375 
376 	memset(&sctp_clog, 0, sizeof(sctp_clog));
377 	sctp_clog.x.cwnd.net = net;
378 	sctp_clog.x.cwnd.cwnd_new_value = error;
379 	sctp_clog.x.cwnd.inflight = net->flight_size;
380 	sctp_clog.x.cwnd.cwnd_augment = burst;
381 	if (stcb->asoc.send_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_send = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 	if (stcb->asoc.stream_queue_cnt > 255)
386 		sctp_clog.x.cwnd.cnt_in_str = 255;
387 	else
388 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_MAXBURST,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = snd_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = 0;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 	sctp_clog.x.rwnd.send_size = flight_size;
423 	sctp_clog.x.rwnd.overhead = overhead;
424 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_RWND,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 	struct sctp_cwnd_log sctp_clog;
439 
440 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 	sctp_clog.x.mbcnt.size_change = book;
442 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_EVENT_MBCNT,
446 	    from,
447 	    sctp_clog.x.misc.log1,
448 	    sctp_clog.x.misc.log2,
449 	    sctp_clog.x.misc.log3,
450 	    sctp_clog.x.misc.log4);
451 }
452 #endif
453 
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_MISC_EVENT,
459 	    from,
460 	    a, b, c, d);
461 }
462 
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 	struct sctp_cwnd_log sctp_clog;
467 
468 	sctp_clog.x.wake.stcb = (void *)stcb;
469 	sctp_clog.x.wake.wake_cnt = wake_cnt;
470 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473 
474 	if (stcb->asoc.stream_queue_cnt < 0xff)
475 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 	else
477 		sctp_clog.x.wake.stream_qcnt = 0xff;
478 
479 	if (stcb->asoc.chunks_on_out_queue < 0xff)
480 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 	else
482 		sctp_clog.x.wake.chunks_on_oque = 0xff;
483 
484 	sctp_clog.x.wake.sctpflags = 0;
485 	/* set in the defered mode stuff */
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 		sctp_clog.x.wake.sctpflags |= 1;
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 		sctp_clog.x.wake.sctpflags |= 2;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 		sctp_clog.x.wake.sctpflags |= 4;
492 	/* what about the sb */
493 	if (stcb->sctp_socket) {
494 		struct socket *so = stcb->sctp_socket;
495 
496 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 	} else {
498 		sctp_clog.x.wake.sbflags = 0xff;
499 	}
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	    SCTP_LOG_EVENT_WAKE,
502 	    from,
503 	    sctp_clog.x.misc.log1,
504 	    sctp_clog.x.misc.log2,
505 	    sctp_clog.x.misc.log3,
506 	    sctp_clog.x.misc.log4);
507 }
508 
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 	    SCTP_LOG_EVENT_BLOCK,
523 	    from,
524 	    sctp_clog.x.misc.log1,
525 	    sctp_clog.x.misc.log2,
526 	    sctp_clog.x.misc.log3,
527 	    sctp_clog.x.misc.log4);
528 }
529 
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 	/* May need to fix this if ktrdump does not work */
534 	return (0);
535 }
536 
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540 
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 	int i;
546 	int cnt;
547 
548 	cnt = 0;
549 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 		if ((sctp_audit_data[i][0] == 0xe0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if (sctp_audit_data[i][0] == 0xf0) {
555 			cnt = 0;
556 			SCTP_PRINTF("\n");
557 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558 		    (sctp_audit_data[i][1] == 0x01)) {
559 			SCTP_PRINTF("\n");
560 			cnt = 0;
561 		}
562 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 		    (uint32_t)sctp_audit_data[i][1]);
564 		cnt++;
565 		if ((cnt % 14) == 0)
566 			SCTP_PRINTF("\n");
567 	}
568 	for (i = 0; i < sctp_audit_indx; i++) {
569 		if ((sctp_audit_data[i][0] == 0xe0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if (sctp_audit_data[i][0] == 0xf0) {
574 			cnt = 0;
575 			SCTP_PRINTF("\n");
576 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577 		    (sctp_audit_data[i][1] == 0x01)) {
578 			SCTP_PRINTF("\n");
579 			cnt = 0;
580 		}
581 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 		    (uint32_t)sctp_audit_data[i][1]);
583 		cnt++;
584 		if ((cnt % 14) == 0)
585 			SCTP_PRINTF("\n");
586 	}
587 	SCTP_PRINTF("\n");
588 }
589 
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592     struct sctp_nets *net)
593 {
594 	int resend_cnt, tot_out, rep, tot_book_cnt;
595 	struct sctp_nets *lnet;
596 	struct sctp_tmit_chunk *chk;
597 
598 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 	sctp_audit_indx++;
601 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 		sctp_audit_indx = 0;
603 	}
604 	if (inp == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	if (stcb == NULL) {
614 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 		sctp_audit_indx++;
617 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 			sctp_audit_indx = 0;
619 		}
620 		return;
621 	}
622 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 	sctp_audit_data[sctp_audit_indx][1] =
624 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 	sctp_audit_indx++;
626 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 		sctp_audit_indx = 0;
628 	}
629 	rep = 0;
630 	tot_book_cnt = 0;
631 	resend_cnt = tot_out = 0;
632 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 			resend_cnt++;
635 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 			tot_out += chk->book_size;
637 			tot_book_cnt++;
638 		}
639 	}
640 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 		rep = 1;
650 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 		sctp_audit_data[sctp_audit_indx][1] =
653 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 	}
659 	if (tot_out != stcb->asoc.total_flight) {
660 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 		sctp_audit_indx++;
663 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 			sctp_audit_indx = 0;
665 		}
666 		rep = 1;
667 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 		    (int)stcb->asoc.total_flight);
669 		stcb->asoc.total_flight = tot_out;
670 	}
671 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 		sctp_audit_indx++;
675 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 			sctp_audit_indx = 0;
677 		}
678 		rep = 1;
679 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680 
681 		stcb->asoc.total_flight_count = tot_book_cnt;
682 	}
683 	tot_out = 0;
684 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 		tot_out += lnet->flight_size;
686 	}
687 	if (tot_out != stcb->asoc.total_flight) {
688 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 		sctp_audit_indx++;
691 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 			sctp_audit_indx = 0;
693 		}
694 		rep = 1;
695 		SCTP_PRINTF("real flight:%d net total was %d\n",
696 		    stcb->asoc.total_flight, tot_out);
697 		/* now corrective action */
698 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699 
700 			tot_out = 0;
701 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 				if ((chk->whoTo == lnet) &&
703 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 					tot_out += chk->book_size;
705 				}
706 			}
707 			if (lnet->flight_size != tot_out) {
708 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 				    (void *)lnet, lnet->flight_size,
710 				    tot_out);
711 				lnet->flight_size = tot_out;
712 			}
713 		}
714 	}
715 	if (rep) {
716 		sctp_print_audit_report();
717 	}
718 }
719 
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723 
724 	sctp_audit_data[sctp_audit_indx][0] = ev;
725 	sctp_audit_data[sctp_audit_indx][1] = fd;
726 	sctp_audit_indx++;
727 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 		sctp_audit_indx = 0;
729 	}
730 }
731 
732 #endif
733 
734 /*
735  * sctp_stop_timers_for_shutdown() should be called
736  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737  * state to make sure that all timers are stopped.
738  */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 	struct sctp_association *asoc;
743 	struct sctp_nets *net;
744 
745 	asoc = &stcb->asoc;
746 
747 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 	}
756 }
757 
758 /*
759  * a list of sizes based on typical mtu's, used only if next hop size not
760  * returned.
761  */
762 static uint32_t sctp_mtu_sizes[] = {
763 	68,
764 	296,
765 	508,
766 	512,
767 	544,
768 	576,
769 	1006,
770 	1492,
771 	1500,
772 	1536,
773 	2002,
774 	2048,
775 	4352,
776 	4464,
777 	8166,
778 	17914,
779 	32000,
780 	65535
781 };
782 
783 /*
784  * Return the largest MTU smaller than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 	uint32_t i;
791 
792 	if (val <= sctp_mtu_sizes[0]) {
793 		return (val);
794 	}
795 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val <= sctp_mtu_sizes[i]) {
797 			break;
798 		}
799 	}
800 	return (sctp_mtu_sizes[i - 1]);
801 }
802 
803 /*
804  * Return the smallest MTU larger than val. If there is no
805  * entry, just return val.
806  */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 	/* select another MTU that is just bigger than this one */
811 	uint32_t i;
812 
813 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 		if (val < sctp_mtu_sizes[i]) {
815 			return (sctp_mtu_sizes[i]);
816 		}
817 	}
818 	return (val);
819 }
820 
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 	/*
825 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 	 * our counter. The result becomes our good random numbers and we
827 	 * then setup to give these out. Note that we do no locking to
828 	 * protect this. This is ok, since if competing folks call this we
829 	 * will get more gobbled gook in the random store which is what we
830 	 * want. There is a danger that two guys will use the same random
831 	 * numbers, but thats ok too since that is random as well :->
832 	 */
833 	m->store_at = 0;
834 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837 	m->random_counter++;
838 }
839 
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 	/*
844 	 * A true implementation should use random selection process to get
845 	 * the initial stream sequence number, using RFC1750 as a good
846 	 * guideline
847 	 */
848 	uint32_t x, *xp;
849 	uint8_t *p;
850 	int store_at, new_store;
851 
852 	if (inp->initial_sequence_debug != 0) {
853 		uint32_t ret;
854 
855 		ret = inp->initial_sequence_debug;
856 		inp->initial_sequence_debug++;
857 		return (ret);
858 	}
859 retry:
860 	store_at = inp->store_at;
861 	new_store = store_at + sizeof(uint32_t);
862 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 		new_store = 0;
864 	}
865 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 		goto retry;
867 	}
868 	if (new_store == 0) {
869 		/* Refill the random store */
870 		sctp_fill_random_store(inp);
871 	}
872 	p = &inp->random_store[store_at];
873 	xp = (uint32_t *)p;
874 	x = *xp;
875 	return (x);
876 }
877 
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 	uint32_t x;
882 	struct timeval now;
883 
884 	if (check) {
885 		(void)SCTP_GETTIME_TIMEVAL(&now);
886 	}
887 	for (;;) {
888 		x = sctp_select_initial_TSN(&inp->sctp_ep);
889 		if (x == 0) {
890 			/* we never use 0 */
891 			continue;
892 		}
893 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 			break;
895 		}
896 	}
897 	return (x);
898 }
899 
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 	int32_t user_state;
904 
905 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 		user_state = SCTP_CLOSED;
907 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 		user_state = SCTP_SHUTDOWN_PENDING;
909 	} else {
910 		switch (kernel_state & SCTP_STATE_MASK) {
911 		case SCTP_STATE_EMPTY:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_INUSE:
915 			user_state = SCTP_CLOSED;
916 			break;
917 		case SCTP_STATE_COOKIE_WAIT:
918 			user_state = SCTP_COOKIE_WAIT;
919 			break;
920 		case SCTP_STATE_COOKIE_ECHOED:
921 			user_state = SCTP_COOKIE_ECHOED;
922 			break;
923 		case SCTP_STATE_OPEN:
924 			user_state = SCTP_ESTABLISHED;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_SENT:
927 			user_state = SCTP_SHUTDOWN_SENT;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_RECEIVED:
930 			user_state = SCTP_SHUTDOWN_RECEIVED;
931 			break;
932 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 			user_state = SCTP_SHUTDOWN_ACK_SENT;
934 			break;
935 		default:
936 			user_state = SCTP_CLOSED;
937 			break;
938 		}
939 	}
940 	return (user_state);
941 }
942 
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 	struct sctp_association *asoc;
948 
949 	/*
950 	 * Anything set to zero is taken care of by the allocation routine's
951 	 * bzero
952 	 */
953 
954 	/*
955 	 * Up front select what scoping to apply on addresses I tell my peer
956 	 * Not sure what to do with these right now, we will need to come up
957 	 * with a way to set them. We may need to pass them through from the
958 	 * caller in the sctp_aloc_assoc() function.
959 	 */
960 	int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 #endif
964 
965 	asoc = &stcb->asoc;
966 	/* init all variables to a known value. */
967 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 	asoc->max_burst = inp->sctp_ep.max_burst;
969 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 	asoc->ecn_supported = inp->ecn_supported;
974 	asoc->prsctp_supported = inp->prsctp_supported;
975 	asoc->idata_supported = inp->idata_supported;
976 	asoc->auth_supported = inp->auth_supported;
977 	asoc->asconf_supported = inp->asconf_supported;
978 	asoc->reconfig_supported = inp->reconfig_supported;
979 	asoc->nrsack_supported = inp->nrsack_supported;
980 	asoc->pktdrop_supported = inp->pktdrop_supported;
981 	asoc->idata_supported = inp->idata_supported;
982 	asoc->sctp_cmt_pf = (uint8_t)0;
983 	asoc->sctp_frag_point = inp->sctp_frag_point;
984 	asoc->sctp_features = inp->sctp_features;
985 	asoc->default_dscp = inp->sctp_ep.default_dscp;
986 	asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 	if (inp->sctp_ep.default_flowlabel) {
989 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 	} else {
991 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 			asoc->default_flowlabel &= 0x000fffff;
994 			asoc->default_flowlabel |= 0x80000000;
995 		} else {
996 			asoc->default_flowlabel = 0;
997 		}
998 	}
999 #endif
1000 	asoc->sb_send_resv = 0;
1001 	if (override_tag) {
1002 		asoc->my_vtag = override_tag;
1003 	} else {
1004 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 	}
1006 	/* Get the nonce tags */
1007 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->vrf_id = vrf_id;
1010 
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 	asoc->tsn_in_at = 0;
1013 	asoc->tsn_out_at = 0;
1014 	asoc->tsn_in_wrapped = 0;
1015 	asoc->tsn_out_wrapped = 0;
1016 	asoc->cumack_log_at = 0;
1017 	asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 	asoc->fs_index = 0;
1021 #endif
1022 	asoc->refcnt = 0;
1023 	asoc->assoc_up_sent = 0;
1024 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 	    sctp_select_initial_TSN(&inp->sctp_ep);
1026 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 	/* we are optimisitic here */
1028 	asoc->peer_supports_nat = 0;
1029 	asoc->sent_queue_retran_cnt = 0;
1030 
1031 	/* for CMT */
1032 	asoc->last_net_cmt_send_started = NULL;
1033 
1034 	/* This will need to be adjusted */
1035 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 	asoc->asconf_seq_in = asoc->last_acked_seq;
1038 
1039 	/* here we are different, we hold the next one we expect */
1040 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041 
1042 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044 
1045 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1046 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1047 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1048 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1049 	asoc->free_chunk_cnt = 0;
1050 
1051 	asoc->iam_blocking = 0;
1052 	asoc->context = inp->sctp_context;
1053 	asoc->local_strreset_support = inp->local_strreset_support;
1054 	asoc->def_send = inp->def_send;
1055 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1056 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1057 	asoc->pr_sctp_cnt = 0;
1058 	asoc->total_output_queue_size = 0;
1059 
1060 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1061 		asoc->scope.ipv6_addr_legal = 1;
1062 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1063 			asoc->scope.ipv4_addr_legal = 1;
1064 		} else {
1065 			asoc->scope.ipv4_addr_legal = 0;
1066 		}
1067 	} else {
1068 		asoc->scope.ipv6_addr_legal = 0;
1069 		asoc->scope.ipv4_addr_legal = 1;
1070 	}
1071 
1072 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1073 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1074 
1075 	asoc->smallest_mtu = inp->sctp_frag_point;
1076 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1077 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1078 
1079 	asoc->stream_locked_on = 0;
1080 	asoc->ecn_echo_cnt_onq = 0;
1081 	asoc->stream_locked = 0;
1082 
1083 	asoc->send_sack = 1;
1084 
1085 	LIST_INIT(&asoc->sctp_restricted_addrs);
1086 
1087 	TAILQ_INIT(&asoc->nets);
1088 	TAILQ_INIT(&asoc->pending_reply_queue);
1089 	TAILQ_INIT(&asoc->asconf_ack_sent);
1090 	/* Setup to fill the hb random cache at first HB */
1091 	asoc->hb_random_idx = 4;
1092 
1093 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1094 
1095 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1096 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1097 
1098 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1099 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    o_strms;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_mid_ordered = 0;
1125 		asoc->strmout[i].next_mid_unordered = 0;
1126 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127 		asoc->strmout[i].chunks_on_queues = 0;
1128 #if defined(SCTP_DETAILED_STR_STATS)
1129 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1130 			asoc->strmout[i].abandoned_sent[j] = 0;
1131 			asoc->strmout[i].abandoned_unsent[j] = 0;
1132 		}
1133 #else
1134 		asoc->strmout[i].abandoned_sent[0] = 0;
1135 		asoc->strmout[i].abandoned_unsent[0] = 0;
1136 #endif
1137 		asoc->strmout[i].sid = i;
1138 		asoc->strmout[i].last_msg_incomplete = 0;
1139 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1140 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1141 	}
1142 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1143 
1144 	/* Now the mapping array */
1145 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1146 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1147 	    SCTP_M_MAP);
1148 	if (asoc->mapping_array == NULL) {
1149 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1150 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1151 		return (ENOMEM);
1152 	}
1153 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1154 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1155 	    SCTP_M_MAP);
1156 	if (asoc->nr_mapping_array == NULL) {
1157 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1158 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1159 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1160 		return (ENOMEM);
1161 	}
1162 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1163 
1164 	/* Now the init of the other outqueues */
1165 	TAILQ_INIT(&asoc->free_chunks);
1166 	TAILQ_INIT(&asoc->control_send_queue);
1167 	TAILQ_INIT(&asoc->asconf_send_queue);
1168 	TAILQ_INIT(&asoc->send_queue);
1169 	TAILQ_INIT(&asoc->sent_queue);
1170 	TAILQ_INIT(&asoc->resetHead);
1171 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1172 	TAILQ_INIT(&asoc->asconf_queue);
1173 	/* authentication fields */
1174 	asoc->authinfo.random = NULL;
1175 	asoc->authinfo.active_keyid = 0;
1176 	asoc->authinfo.assoc_key = NULL;
1177 	asoc->authinfo.assoc_keyid = 0;
1178 	asoc->authinfo.recv_key = NULL;
1179 	asoc->authinfo.recv_keyid = 0;
1180 	LIST_INIT(&asoc->shared_keys);
1181 	asoc->marked_retrans = 0;
1182 	asoc->port = inp->sctp_ep.port;
1183 	asoc->timoinit = 0;
1184 	asoc->timodata = 0;
1185 	asoc->timosack = 0;
1186 	asoc->timoshutdown = 0;
1187 	asoc->timoheartbeat = 0;
1188 	asoc->timocookie = 0;
1189 	asoc->timoshutdownack = 0;
1190 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1191 	asoc->discontinuity_time = asoc->start_time;
1192 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1193 		asoc->abandoned_unsent[i] = 0;
1194 		asoc->abandoned_sent[i] = 0;
1195 	}
1196 	/*
1197 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1198 	 * freed later when the association is freed.
1199 	 */
1200 	return (0);
1201 }
1202 
1203 void
1204 sctp_print_mapping_array(struct sctp_association *asoc)
1205 {
1206 	unsigned int i, limit;
1207 
1208 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1209 	    asoc->mapping_array_size,
1210 	    asoc->mapping_array_base_tsn,
1211 	    asoc->cumulative_tsn,
1212 	    asoc->highest_tsn_inside_map,
1213 	    asoc->highest_tsn_inside_nr_map);
1214 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1215 		if (asoc->mapping_array[limit - 1] != 0) {
1216 			break;
1217 		}
1218 	}
1219 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1220 	for (i = 0; i < limit; i++) {
1221 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1222 	}
1223 	if (limit % 16)
1224 		SCTP_PRINTF("\n");
1225 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1226 		if (asoc->nr_mapping_array[limit - 1]) {
1227 			break;
1228 		}
1229 	}
1230 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1231 	for (i = 0; i < limit; i++) {
1232 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1233 	}
1234 	if (limit % 16)
1235 		SCTP_PRINTF("\n");
1236 }
1237 
1238 int
1239 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1240 {
1241 	/* mapping array needs to grow */
1242 	uint8_t *new_array1, *new_array2;
1243 	uint32_t new_size;
1244 
1245 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1246 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1247 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1248 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1249 		/* can't get more, forget it */
1250 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1251 		if (new_array1) {
1252 			SCTP_FREE(new_array1, SCTP_M_MAP);
1253 		}
1254 		if (new_array2) {
1255 			SCTP_FREE(new_array2, SCTP_M_MAP);
1256 		}
1257 		return (-1);
1258 	}
1259 	memset(new_array1, 0, new_size);
1260 	memset(new_array2, 0, new_size);
1261 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1262 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1263 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1264 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1265 	asoc->mapping_array = new_array1;
1266 	asoc->nr_mapping_array = new_array2;
1267 	asoc->mapping_array_size = new_size;
1268 	return (0);
1269 }
1270 
1271 
1272 static void
1273 sctp_iterator_work(struct sctp_iterator *it)
1274 {
1275 	int iteration_count = 0;
1276 	int inp_skip = 0;
1277 	int first_in = 1;
1278 	struct sctp_inpcb *tinp;
1279 
1280 	SCTP_INP_INFO_RLOCK();
1281 	SCTP_ITERATOR_LOCK();
1282 	sctp_it_ctl.cur_it = it;
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		sctp_it_ctl.cur_it = NULL;
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		CURVNET_RESTORE();
1437 		SCTP_IPI_ITERATOR_WQ_LOCK();
1438 		/* sa_ignore FREED_MEMORY */
1439 	}
1440 	sctp_it_ctl.iterator_running = 0;
1441 	return;
1442 }
1443 
1444 
1445 static void
1446 sctp_handle_addr_wq(void)
1447 {
1448 	/* deal with the ADDR wq from the rtsock calls */
1449 	struct sctp_laddr *wi, *nwi;
1450 	struct sctp_asconf_iterator *asc;
1451 
1452 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1453 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1454 	if (asc == NULL) {
1455 		/* Try later, no memory */
1456 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1457 		    (struct sctp_inpcb *)NULL,
1458 		    (struct sctp_tcb *)NULL,
1459 		    (struct sctp_nets *)NULL);
1460 		return;
1461 	}
1462 	LIST_INIT(&asc->list_of_work);
1463 	asc->cnt = 0;
1464 
1465 	SCTP_WQ_ADDR_LOCK();
1466 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1467 		LIST_REMOVE(wi, sctp_nxt_addr);
1468 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1469 		asc->cnt++;
1470 	}
1471 	SCTP_WQ_ADDR_UNLOCK();
1472 
1473 	if (asc->cnt == 0) {
1474 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1475 	} else {
1476 		int ret;
1477 
1478 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1479 		    sctp_asconf_iterator_stcb,
1480 		    NULL,	/* No ep end for boundall */
1481 		    SCTP_PCB_FLAGS_BOUNDALL,
1482 		    SCTP_PCB_ANY_FEATURES,
1483 		    SCTP_ASOC_ANY_STATE,
1484 		    (void *)asc, 0,
1485 		    sctp_asconf_iterator_end, NULL, 0);
1486 		if (ret) {
1487 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1488 			/*
1489 			 * Freeing if we are stopping or put back on the
1490 			 * addr_wq.
1491 			 */
1492 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1493 				sctp_asconf_iterator_end(asc, 0);
1494 			} else {
1495 				SCTP_WQ_ADDR_LOCK();
1496 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1497 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1498 				}
1499 				SCTP_WQ_ADDR_UNLOCK();
1500 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1501 			}
1502 		}
1503 	}
1504 }
1505 
1506 void
1507 sctp_timeout_handler(void *t)
1508 {
1509 	struct sctp_inpcb *inp;
1510 	struct sctp_tcb *stcb;
1511 	struct sctp_nets *net;
1512 	struct sctp_timer *tmr;
1513 	struct mbuf *op_err;
1514 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1515 	struct socket *so;
1516 #endif
1517 	int did_output;
1518 	int type;
1519 
1520 	tmr = (struct sctp_timer *)t;
1521 	inp = (struct sctp_inpcb *)tmr->ep;
1522 	stcb = (struct sctp_tcb *)tmr->tcb;
1523 	net = (struct sctp_nets *)tmr->net;
1524 	CURVNET_SET((struct vnet *)tmr->vnet);
1525 	did_output = 1;
1526 
1527 #ifdef SCTP_AUDITING_ENABLED
1528 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1529 	sctp_auditing(3, inp, stcb, net);
1530 #endif
1531 
1532 	/* sanity checks... */
1533 	if (tmr->self != (void *)tmr) {
1534 		/*
1535 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536 		 * (void *)tmr);
1537 		 */
1538 		CURVNET_RESTORE();
1539 		return;
1540 	}
1541 	tmr->stopped_from = 0xa001;
1542 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543 		/*
1544 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545 		 * tmr->type);
1546 		 */
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	tmr->stopped_from = 0xa002;
1551 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552 		CURVNET_RESTORE();
1553 		return;
1554 	}
1555 	/* if this is an iterator timeout, get the struct and clear inp */
1556 	tmr->stopped_from = 0xa003;
1557 	if (inp) {
1558 		SCTP_INP_INCR_REF(inp);
1559 		if ((inp->sctp_socket == NULL) &&
1560 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569 		    ) {
1570 			SCTP_INP_DECR_REF(inp);
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 			CURVNET_RESTORE();
1584 			return;
1585 		}
1586 	}
1587 	type = tmr->type;
1588 	tmr->stopped_from = 0xa005;
1589 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591 		if (inp) {
1592 			SCTP_INP_DECR_REF(inp);
1593 		}
1594 		if (stcb) {
1595 			atomic_add_int(&stcb->asoc.refcnt, -1);
1596 		}
1597 		CURVNET_RESTORE();
1598 		return;
1599 	}
1600 	tmr->stopped_from = 0xa006;
1601 
1602 	if (stcb) {
1603 		SCTP_TCB_LOCK(stcb);
1604 		atomic_add_int(&stcb->asoc.refcnt, -1);
1605 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606 		    ((stcb->asoc.state == 0) ||
1607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608 			SCTP_TCB_UNLOCK(stcb);
1609 			if (inp) {
1610 				SCTP_INP_DECR_REF(inp);
1611 			}
1612 			CURVNET_RESTORE();
1613 			return;
1614 		}
1615 	}
1616 	/* record in stopped what t-o occurred */
1617 	tmr->stopped_from = type;
1618 
1619 	/* mark as being serviced now */
1620 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621 		/*
1622 		 * Callout has been rescheduled.
1623 		 */
1624 		goto get_out;
1625 	}
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		/*
1628 		 * Not active, so no action.
1629 		 */
1630 		goto get_out;
1631 	}
1632 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633 
1634 	/* call the handler for the appropriate timer type */
1635 	switch (type) {
1636 	case SCTP_TIMER_TYPE_ADDR_WQ:
1637 		sctp_handle_addr_wq();
1638 		break;
1639 	case SCTP_TIMER_TYPE_SEND:
1640 		if ((stcb == NULL) || (inp == NULL)) {
1641 			break;
1642 		}
1643 		SCTP_STAT_INCR(sctps_timodata);
1644 		stcb->asoc.timodata++;
1645 		stcb->asoc.num_send_timers_up--;
1646 		if (stcb->asoc.num_send_timers_up < 0) {
1647 			stcb->asoc.num_send_timers_up = 0;
1648 		}
1649 		SCTP_TCB_LOCK_ASSERT(stcb);
1650 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1651 			/* no need to unlock on tcb its gone */
1652 
1653 			goto out_decr;
1654 		}
1655 		SCTP_TCB_LOCK_ASSERT(stcb);
1656 #ifdef SCTP_AUDITING_ENABLED
1657 		sctp_auditing(4, inp, stcb, net);
1658 #endif
1659 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1660 		if ((stcb->asoc.num_send_timers_up == 0) &&
1661 		    (stcb->asoc.sent_queue_cnt > 0)) {
1662 			struct sctp_tmit_chunk *chk;
1663 
1664 			/*
1665 			 * safeguard. If there on some on the sent queue
1666 			 * somewhere but no timers running something is
1667 			 * wrong... so we start a timer on the first chunk
1668 			 * on the send queue on whatever net it is sent to.
1669 			 */
1670 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1671 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1672 			    chk->whoTo);
1673 		}
1674 		break;
1675 	case SCTP_TIMER_TYPE_INIT:
1676 		if ((stcb == NULL) || (inp == NULL)) {
1677 			break;
1678 		}
1679 		SCTP_STAT_INCR(sctps_timoinit);
1680 		stcb->asoc.timoinit++;
1681 		if (sctp_t1init_timer(inp, stcb, net)) {
1682 			/* no need to unlock on tcb its gone */
1683 			goto out_decr;
1684 		}
1685 		/* We do output but not here */
1686 		did_output = 0;
1687 		break;
1688 	case SCTP_TIMER_TYPE_RECV:
1689 		if ((stcb == NULL) || (inp == NULL)) {
1690 			break;
1691 		}
1692 		SCTP_STAT_INCR(sctps_timosack);
1693 		stcb->asoc.timosack++;
1694 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1695 #ifdef SCTP_AUDITING_ENABLED
1696 		sctp_auditing(4, inp, stcb, net);
1697 #endif
1698 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1699 		break;
1700 	case SCTP_TIMER_TYPE_SHUTDOWN:
1701 		if ((stcb == NULL) || (inp == NULL)) {
1702 			break;
1703 		}
1704 		if (sctp_shutdown_timer(inp, stcb, net)) {
1705 			/* no need to unlock on tcb its gone */
1706 			goto out_decr;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timoshutdown);
1709 		stcb->asoc.timoshutdown++;
1710 #ifdef SCTP_AUDITING_ENABLED
1711 		sctp_auditing(4, inp, stcb, net);
1712 #endif
1713 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1714 		break;
1715 	case SCTP_TIMER_TYPE_HEARTBEAT:
1716 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1717 			break;
1718 		}
1719 		SCTP_STAT_INCR(sctps_timoheartbeat);
1720 		stcb->asoc.timoheartbeat++;
1721 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1722 			/* no need to unlock on tcb its gone */
1723 			goto out_decr;
1724 		}
1725 #ifdef SCTP_AUDITING_ENABLED
1726 		sctp_auditing(4, inp, stcb, net);
1727 #endif
1728 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1729 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1730 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1731 		}
1732 		break;
1733 	case SCTP_TIMER_TYPE_COOKIE:
1734 		if ((stcb == NULL) || (inp == NULL)) {
1735 			break;
1736 		}
1737 		if (sctp_cookie_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 		SCTP_STAT_INCR(sctps_timocookie);
1742 		stcb->asoc.timocookie++;
1743 #ifdef SCTP_AUDITING_ENABLED
1744 		sctp_auditing(4, inp, stcb, net);
1745 #endif
1746 		/*
1747 		 * We consider T3 and Cookie timer pretty much the same with
1748 		 * respect to where from in chunk_output.
1749 		 */
1750 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1751 		break;
1752 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1753 		{
1754 			struct timeval tv;
1755 			int i, secret;
1756 
1757 			if (inp == NULL) {
1758 				break;
1759 			}
1760 			SCTP_STAT_INCR(sctps_timosecret);
1761 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1762 			SCTP_INP_WLOCK(inp);
1763 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1764 			inp->sctp_ep.last_secret_number =
1765 			    inp->sctp_ep.current_secret_number;
1766 			inp->sctp_ep.current_secret_number++;
1767 			if (inp->sctp_ep.current_secret_number >=
1768 			    SCTP_HOW_MANY_SECRETS) {
1769 				inp->sctp_ep.current_secret_number = 0;
1770 			}
1771 			secret = (int)inp->sctp_ep.current_secret_number;
1772 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1773 				inp->sctp_ep.secret_key[secret][i] =
1774 				    sctp_select_initial_TSN(&inp->sctp_ep);
1775 			}
1776 			SCTP_INP_WUNLOCK(inp);
1777 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1778 		}
1779 		did_output = 0;
1780 		break;
1781 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1782 		if ((stcb == NULL) || (inp == NULL)) {
1783 			break;
1784 		}
1785 		SCTP_STAT_INCR(sctps_timopathmtu);
1786 		sctp_pathmtu_timer(inp, stcb, net);
1787 		did_output = 0;
1788 		break;
1789 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1790 		if ((stcb == NULL) || (inp == NULL)) {
1791 			break;
1792 		}
1793 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1794 			/* no need to unlock on tcb its gone */
1795 			goto out_decr;
1796 		}
1797 		SCTP_STAT_INCR(sctps_timoshutdownack);
1798 		stcb->asoc.timoshutdownack++;
1799 #ifdef SCTP_AUDITING_ENABLED
1800 		sctp_auditing(4, inp, stcb, net);
1801 #endif
1802 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1803 		break;
1804 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1809 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1810 		    "Shutdown guard timer expired");
1811 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1812 		/* no need to unlock on tcb its gone */
1813 		goto out_decr;
1814 
1815 	case SCTP_TIMER_TYPE_STRRESET:
1816 		if ((stcb == NULL) || (inp == NULL)) {
1817 			break;
1818 		}
1819 		if (sctp_strreset_timer(inp, stcb, net)) {
1820 			/* no need to unlock on tcb its gone */
1821 			goto out_decr;
1822 		}
1823 		SCTP_STAT_INCR(sctps_timostrmrst);
1824 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1825 		break;
1826 	case SCTP_TIMER_TYPE_ASCONF:
1827 		if ((stcb == NULL) || (inp == NULL)) {
1828 			break;
1829 		}
1830 		if (sctp_asconf_timer(inp, stcb, net)) {
1831 			/* no need to unlock on tcb its gone */
1832 			goto out_decr;
1833 		}
1834 		SCTP_STAT_INCR(sctps_timoasconf);
1835 #ifdef SCTP_AUDITING_ENABLED
1836 		sctp_auditing(4, inp, stcb, net);
1837 #endif
1838 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1839 		break;
1840 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1841 		if ((stcb == NULL) || (inp == NULL)) {
1842 			break;
1843 		}
1844 		sctp_delete_prim_timer(inp, stcb, net);
1845 		SCTP_STAT_INCR(sctps_timodelprim);
1846 		break;
1847 
1848 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1849 		if ((stcb == NULL) || (inp == NULL)) {
1850 			break;
1851 		}
1852 		SCTP_STAT_INCR(sctps_timoautoclose);
1853 		sctp_autoclose_timer(inp, stcb, net);
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1855 		did_output = 0;
1856 		break;
1857 	case SCTP_TIMER_TYPE_ASOCKILL:
1858 		if ((stcb == NULL) || (inp == NULL)) {
1859 			break;
1860 		}
1861 		SCTP_STAT_INCR(sctps_timoassockill);
1862 		/* Can we free it yet? */
1863 		SCTP_INP_DECR_REF(inp);
1864 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1865 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1866 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1867 		so = SCTP_INP_SO(inp);
1868 		atomic_add_int(&stcb->asoc.refcnt, 1);
1869 		SCTP_TCB_UNLOCK(stcb);
1870 		SCTP_SOCKET_LOCK(so, 1);
1871 		SCTP_TCB_LOCK(stcb);
1872 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1873 #endif
1874 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1875 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1877 		SCTP_SOCKET_UNLOCK(so, 1);
1878 #endif
1879 		/*
1880 		 * free asoc, always unlocks (or destroy's) so prevent
1881 		 * duplicate unlock or unlock of a free mtx :-0
1882 		 */
1883 		stcb = NULL;
1884 		goto out_no_decr;
1885 	case SCTP_TIMER_TYPE_INPKILL:
1886 		SCTP_STAT_INCR(sctps_timoinpkill);
1887 		if (inp == NULL) {
1888 			break;
1889 		}
1890 		/*
1891 		 * special case, take away our increment since WE are the
1892 		 * killer
1893 		 */
1894 		SCTP_INP_DECR_REF(inp);
1895 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1896 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1897 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1898 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1899 		inp = NULL;
1900 		goto out_no_decr;
1901 	default:
1902 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1903 		    type);
1904 		break;
1905 	}
1906 #ifdef SCTP_AUDITING_ENABLED
1907 	sctp_audit_log(0xF1, (uint8_t)type);
1908 	if (inp)
1909 		sctp_auditing(5, inp, stcb, net);
1910 #endif
1911 	if ((did_output) && stcb) {
1912 		/*
1913 		 * Now we need to clean up the control chunk chain if an
1914 		 * ECNE is on it. It must be marked as UNSENT again so next
1915 		 * call will continue to send it until such time that we get
1916 		 * a CWR, to remove it. It is, however, less likely that we
1917 		 * will find a ecn echo on the chain though.
1918 		 */
1919 		sctp_fix_ecn_echo(&stcb->asoc);
1920 	}
1921 get_out:
1922 	if (stcb) {
1923 		SCTP_TCB_UNLOCK(stcb);
1924 	}
1925 out_decr:
1926 	if (inp) {
1927 		SCTP_INP_DECR_REF(inp);
1928 	}
1929 out_no_decr:
1930 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1931 	CURVNET_RESTORE();
1932 }
1933 
1934 void
1935 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1936     struct sctp_nets *net)
1937 {
1938 	uint32_t to_ticks;
1939 	struct sctp_timer *tmr;
1940 
1941 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1942 		return;
1943 
1944 	tmr = NULL;
1945 	if (stcb) {
1946 		SCTP_TCB_LOCK_ASSERT(stcb);
1947 	}
1948 	switch (t_type) {
1949 	case SCTP_TIMER_TYPE_ADDR_WQ:
1950 		/* Only 1 tick away :-) */
1951 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1952 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1953 		break;
1954 	case SCTP_TIMER_TYPE_SEND:
1955 		/* Here we use the RTO timer */
1956 		{
1957 			int rto_val;
1958 
1959 			if ((stcb == NULL) || (net == NULL)) {
1960 				return;
1961 			}
1962 			tmr = &net->rxt_timer;
1963 			if (net->RTO == 0) {
1964 				rto_val = stcb->asoc.initial_rto;
1965 			} else {
1966 				rto_val = net->RTO;
1967 			}
1968 			to_ticks = MSEC_TO_TICKS(rto_val);
1969 		}
1970 		break;
1971 	case SCTP_TIMER_TYPE_INIT:
1972 		/*
1973 		 * Here we use the INIT timer default usually about 1
1974 		 * minute.
1975 		 */
1976 		if ((stcb == NULL) || (net == NULL)) {
1977 			return;
1978 		}
1979 		tmr = &net->rxt_timer;
1980 		if (net->RTO == 0) {
1981 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1982 		} else {
1983 			to_ticks = MSEC_TO_TICKS(net->RTO);
1984 		}
1985 		break;
1986 	case SCTP_TIMER_TYPE_RECV:
1987 		/*
1988 		 * Here we use the Delayed-Ack timer value from the inp
1989 		 * ususually about 200ms.
1990 		 */
1991 		if (stcb == NULL) {
1992 			return;
1993 		}
1994 		tmr = &stcb->asoc.dack_timer;
1995 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1996 		break;
1997 	case SCTP_TIMER_TYPE_SHUTDOWN:
1998 		/* Here we use the RTO of the destination. */
1999 		if ((stcb == NULL) || (net == NULL)) {
2000 			return;
2001 		}
2002 		if (net->RTO == 0) {
2003 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2004 		} else {
2005 			to_ticks = MSEC_TO_TICKS(net->RTO);
2006 		}
2007 		tmr = &net->rxt_timer;
2008 		break;
2009 	case SCTP_TIMER_TYPE_HEARTBEAT:
2010 		/*
2011 		 * the net is used here so that we can add in the RTO. Even
2012 		 * though we use a different timer. We also add the HB timer
2013 		 * PLUS a random jitter.
2014 		 */
2015 		if ((stcb == NULL) || (net == NULL)) {
2016 			return;
2017 		} else {
2018 			uint32_t rndval;
2019 			uint32_t jitter;
2020 
2021 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2022 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2023 				return;
2024 			}
2025 			if (net->RTO == 0) {
2026 				to_ticks = stcb->asoc.initial_rto;
2027 			} else {
2028 				to_ticks = net->RTO;
2029 			}
2030 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2031 			jitter = rndval % to_ticks;
2032 			if (jitter >= (to_ticks >> 1)) {
2033 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2034 			} else {
2035 				to_ticks = to_ticks - jitter;
2036 			}
2037 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2038 			    !(net->dest_state & SCTP_ADDR_PF)) {
2039 				to_ticks += net->heart_beat_delay;
2040 			}
2041 			/*
2042 			 * Now we must convert the to_ticks that are now in
2043 			 * ms to ticks.
2044 			 */
2045 			to_ticks = MSEC_TO_TICKS(to_ticks);
2046 			tmr = &net->hb_timer;
2047 		}
2048 		break;
2049 	case SCTP_TIMER_TYPE_COOKIE:
2050 		/*
2051 		 * Here we can use the RTO timer from the network since one
2052 		 * RTT was compelete. If a retran happened then we will be
2053 		 * using the RTO initial value.
2054 		 */
2055 		if ((stcb == NULL) || (net == NULL)) {
2056 			return;
2057 		}
2058 		if (net->RTO == 0) {
2059 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2060 		} else {
2061 			to_ticks = MSEC_TO_TICKS(net->RTO);
2062 		}
2063 		tmr = &net->rxt_timer;
2064 		break;
2065 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2066 		/*
2067 		 * nothing needed but the endpoint here ususually about 60
2068 		 * minutes.
2069 		 */
2070 		tmr = &inp->sctp_ep.signature_change;
2071 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2072 		break;
2073 	case SCTP_TIMER_TYPE_ASOCKILL:
2074 		if (stcb == NULL) {
2075 			return;
2076 		}
2077 		tmr = &stcb->asoc.strreset_timer;
2078 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2079 		break;
2080 	case SCTP_TIMER_TYPE_INPKILL:
2081 		/*
2082 		 * The inp is setup to die. We re-use the signature_chage
2083 		 * timer since that has stopped and we are in the GONE
2084 		 * state.
2085 		 */
2086 		tmr = &inp->sctp_ep.signature_change;
2087 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2088 		break;
2089 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2090 		/*
2091 		 * Here we use the value found in the EP for PMTU ususually
2092 		 * about 10 minutes.
2093 		 */
2094 		if ((stcb == NULL) || (net == NULL)) {
2095 			return;
2096 		}
2097 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2098 			return;
2099 		}
2100 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2101 		tmr = &net->pmtu_timer;
2102 		break;
2103 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2104 		/* Here we use the RTO of the destination */
2105 		if ((stcb == NULL) || (net == NULL)) {
2106 			return;
2107 		}
2108 		if (net->RTO == 0) {
2109 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2110 		} else {
2111 			to_ticks = MSEC_TO_TICKS(net->RTO);
2112 		}
2113 		tmr = &net->rxt_timer;
2114 		break;
2115 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2116 		/*
2117 		 * Here we use the endpoints shutdown guard timer usually
2118 		 * about 3 minutes.
2119 		 */
2120 		if (stcb == NULL) {
2121 			return;
2122 		}
2123 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2124 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2125 		} else {
2126 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2127 		}
2128 		tmr = &stcb->asoc.shut_guard_timer;
2129 		break;
2130 	case SCTP_TIMER_TYPE_STRRESET:
2131 		/*
2132 		 * Here the timer comes from the stcb but its value is from
2133 		 * the net's RTO.
2134 		 */
2135 		if ((stcb == NULL) || (net == NULL)) {
2136 			return;
2137 		}
2138 		if (net->RTO == 0) {
2139 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2140 		} else {
2141 			to_ticks = MSEC_TO_TICKS(net->RTO);
2142 		}
2143 		tmr = &stcb->asoc.strreset_timer;
2144 		break;
2145 	case SCTP_TIMER_TYPE_ASCONF:
2146 		/*
2147 		 * Here the timer comes from the stcb but its value is from
2148 		 * the net's RTO.
2149 		 */
2150 		if ((stcb == NULL) || (net == NULL)) {
2151 			return;
2152 		}
2153 		if (net->RTO == 0) {
2154 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2155 		} else {
2156 			to_ticks = MSEC_TO_TICKS(net->RTO);
2157 		}
2158 		tmr = &stcb->asoc.asconf_timer;
2159 		break;
2160 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2161 		if ((stcb == NULL) || (net != NULL)) {
2162 			return;
2163 		}
2164 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2165 		tmr = &stcb->asoc.delete_prim_timer;
2166 		break;
2167 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2168 		if (stcb == NULL) {
2169 			return;
2170 		}
2171 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2172 			/*
2173 			 * Really an error since stcb is NOT set to
2174 			 * autoclose
2175 			 */
2176 			return;
2177 		}
2178 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2179 		tmr = &stcb->asoc.autoclose_timer;
2180 		break;
2181 	default:
2182 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2183 		    __func__, t_type);
2184 		return;
2185 		break;
2186 	}
2187 	if ((to_ticks <= 0) || (tmr == NULL)) {
2188 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2189 		    __func__, t_type, to_ticks, (void *)tmr);
2190 		return;
2191 	}
2192 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2193 		/*
2194 		 * we do NOT allow you to have it already running. if it is
2195 		 * we leave the current one up unchanged
2196 		 */
2197 		return;
2198 	}
2199 	/* At this point we can proceed */
2200 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2201 		stcb->asoc.num_send_timers_up++;
2202 	}
2203 	tmr->stopped_from = 0;
2204 	tmr->type = t_type;
2205 	tmr->ep = (void *)inp;
2206 	tmr->tcb = (void *)stcb;
2207 	tmr->net = (void *)net;
2208 	tmr->self = (void *)tmr;
2209 	tmr->vnet = (void *)curvnet;
2210 	tmr->ticks = sctp_get_tick_count();
2211 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2212 	return;
2213 }
2214 
2215 void
2216 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2217     struct sctp_nets *net, uint32_t from)
2218 {
2219 	struct sctp_timer *tmr;
2220 
2221 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2222 	    (inp == NULL))
2223 		return;
2224 
2225 	tmr = NULL;
2226 	if (stcb) {
2227 		SCTP_TCB_LOCK_ASSERT(stcb);
2228 	}
2229 	switch (t_type) {
2230 	case SCTP_TIMER_TYPE_ADDR_WQ:
2231 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2232 		break;
2233 	case SCTP_TIMER_TYPE_SEND:
2234 		if ((stcb == NULL) || (net == NULL)) {
2235 			return;
2236 		}
2237 		tmr = &net->rxt_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_INIT:
2240 		if ((stcb == NULL) || (net == NULL)) {
2241 			return;
2242 		}
2243 		tmr = &net->rxt_timer;
2244 		break;
2245 	case SCTP_TIMER_TYPE_RECV:
2246 		if (stcb == NULL) {
2247 			return;
2248 		}
2249 		tmr = &stcb->asoc.dack_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_SHUTDOWN:
2252 		if ((stcb == NULL) || (net == NULL)) {
2253 			return;
2254 		}
2255 		tmr = &net->rxt_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_HEARTBEAT:
2258 		if ((stcb == NULL) || (net == NULL)) {
2259 			return;
2260 		}
2261 		tmr = &net->hb_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_COOKIE:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2270 		/* nothing needed but the endpoint here */
2271 		tmr = &inp->sctp_ep.signature_change;
2272 		/*
2273 		 * We re-use the newcookie timer for the INP kill timer. We
2274 		 * must assure that we do not kill it by accident.
2275 		 */
2276 		break;
2277 	case SCTP_TIMER_TYPE_ASOCKILL:
2278 		/*
2279 		 * Stop the asoc kill timer.
2280 		 */
2281 		if (stcb == NULL) {
2282 			return;
2283 		}
2284 		tmr = &stcb->asoc.strreset_timer;
2285 		break;
2286 
2287 	case SCTP_TIMER_TYPE_INPKILL:
2288 		/*
2289 		 * The inp is setup to die. We re-use the signature_chage
2290 		 * timer since that has stopped and we are in the GONE
2291 		 * state.
2292 		 */
2293 		tmr = &inp->sctp_ep.signature_change;
2294 		break;
2295 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2296 		if ((stcb == NULL) || (net == NULL)) {
2297 			return;
2298 		}
2299 		tmr = &net->pmtu_timer;
2300 		break;
2301 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2302 		if ((stcb == NULL) || (net == NULL)) {
2303 			return;
2304 		}
2305 		tmr = &net->rxt_timer;
2306 		break;
2307 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2308 		if (stcb == NULL) {
2309 			return;
2310 		}
2311 		tmr = &stcb->asoc.shut_guard_timer;
2312 		break;
2313 	case SCTP_TIMER_TYPE_STRRESET:
2314 		if (stcb == NULL) {
2315 			return;
2316 		}
2317 		tmr = &stcb->asoc.strreset_timer;
2318 		break;
2319 	case SCTP_TIMER_TYPE_ASCONF:
2320 		if (stcb == NULL) {
2321 			return;
2322 		}
2323 		tmr = &stcb->asoc.asconf_timer;
2324 		break;
2325 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2326 		if (stcb == NULL) {
2327 			return;
2328 		}
2329 		tmr = &stcb->asoc.delete_prim_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2332 		if (stcb == NULL) {
2333 			return;
2334 		}
2335 		tmr = &stcb->asoc.autoclose_timer;
2336 		break;
2337 	default:
2338 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2339 		    __func__, t_type);
2340 		break;
2341 	}
2342 	if (tmr == NULL) {
2343 		return;
2344 	}
2345 	if ((tmr->type != t_type) && tmr->type) {
2346 		/*
2347 		 * Ok we have a timer that is under joint use. Cookie timer
2348 		 * per chance with the SEND timer. We therefore are NOT
2349 		 * running the timer that the caller wants stopped.  So just
2350 		 * return.
2351 		 */
2352 		return;
2353 	}
2354 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2355 		stcb->asoc.num_send_timers_up--;
2356 		if (stcb->asoc.num_send_timers_up < 0) {
2357 			stcb->asoc.num_send_timers_up = 0;
2358 		}
2359 	}
2360 	tmr->self = NULL;
2361 	tmr->stopped_from = from;
2362 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2363 	return;
2364 }
2365 
2366 uint32_t
2367 sctp_calculate_len(struct mbuf *m)
2368 {
2369 	uint32_t tlen = 0;
2370 	struct mbuf *at;
2371 
2372 	at = m;
2373 	while (at) {
2374 		tlen += SCTP_BUF_LEN(at);
2375 		at = SCTP_BUF_NEXT(at);
2376 	}
2377 	return (tlen);
2378 }
2379 
2380 void
2381 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2382     struct sctp_association *asoc, uint32_t mtu)
2383 {
2384 	/*
2385 	 * Reset the P-MTU size on this association, this involves changing
2386 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2387 	 * allow the DF flag to be cleared.
2388 	 */
2389 	struct sctp_tmit_chunk *chk;
2390 	unsigned int eff_mtu, ovh;
2391 
2392 	asoc->smallest_mtu = mtu;
2393 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2394 		ovh = SCTP_MIN_OVERHEAD;
2395 	} else {
2396 		ovh = SCTP_MIN_V4_OVERHEAD;
2397 	}
2398 	eff_mtu = mtu - ovh;
2399 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2400 		if (chk->send_size > eff_mtu) {
2401 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2402 		}
2403 	}
2404 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2405 		if (chk->send_size > eff_mtu) {
2406 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2407 		}
2408 	}
2409 }
2410 
2411 
2412 /*
2413  * given an association and starting time of the current RTT period return
2414  * RTO in number of msecs net should point to the current network
2415  */
2416 
2417 uint32_t
2418 sctp_calculate_rto(struct sctp_tcb *stcb,
2419     struct sctp_association *asoc,
2420     struct sctp_nets *net,
2421     struct timeval *told,
2422     int safe, int rtt_from_sack)
2423 {
2424 	/*-
2425 	 * given an association and the starting time of the current RTT
2426 	 * period (in value1/value2) return RTO in number of msecs.
2427 	 */
2428 	int32_t rtt;		/* RTT in ms */
2429 	uint32_t new_rto;
2430 	int first_measure = 0;
2431 	struct timeval now, then, *old;
2432 
2433 	/* Copy it out for sparc64 */
2434 	if (safe == sctp_align_unsafe_makecopy) {
2435 		old = &then;
2436 		memcpy(&then, told, sizeof(struct timeval));
2437 	} else if (safe == sctp_align_safe_nocopy) {
2438 		old = told;
2439 	} else {
2440 		/* error */
2441 		SCTP_PRINTF("Huh, bad rto calc call\n");
2442 		return (0);
2443 	}
2444 	/************************/
2445 	/* 1. calculate new RTT */
2446 	/************************/
2447 	/* get the current time */
2448 	if (stcb->asoc.use_precise_time) {
2449 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2450 	} else {
2451 		(void)SCTP_GETTIME_TIMEVAL(&now);
2452 	}
2453 	timevalsub(&now, old);
2454 	/* store the current RTT in us */
2455 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2456 	        (uint64_t)now.tv_usec;
2457 
2458 	/* compute rtt in ms */
2459 	rtt = (int32_t)(net->rtt / 1000);
2460 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2461 		/*
2462 		 * Tell the CC module that a new update has just occurred
2463 		 * from a sack
2464 		 */
2465 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2466 	}
2467 	/*
2468 	 * Do we need to determine the lan? We do this only on sacks i.e.
2469 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2470 	 */
2471 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2472 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2473 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2474 			net->lan_type = SCTP_LAN_INTERNET;
2475 		} else {
2476 			net->lan_type = SCTP_LAN_LOCAL;
2477 		}
2478 	}
2479 	/***************************/
2480 	/* 2. update RTTVAR & SRTT */
2481 	/***************************/
2482 	/*-
2483 	 * Compute the scaled average lastsa and the
2484 	 * scaled variance lastsv as described in van Jacobson
2485 	 * Paper "Congestion Avoidance and Control", Annex A.
2486 	 *
2487 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2488 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2489 	 */
2490 	if (net->RTO_measured) {
2491 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2492 		net->lastsa += rtt;
2493 		if (rtt < 0) {
2494 			rtt = -rtt;
2495 		}
2496 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2497 		net->lastsv += rtt;
2498 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2499 			rto_logging(net, SCTP_LOG_RTTVAR);
2500 		}
2501 	} else {
2502 		/* First RTO measurment */
2503 		net->RTO_measured = 1;
2504 		first_measure = 1;
2505 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2506 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2507 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2508 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2509 		}
2510 	}
2511 	if (net->lastsv == 0) {
2512 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2513 	}
2514 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2515 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2516 	    (stcb->asoc.sat_network_lockout == 0)) {
2517 		stcb->asoc.sat_network = 1;
2518 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2519 		stcb->asoc.sat_network = 0;
2520 		stcb->asoc.sat_network_lockout = 1;
2521 	}
2522 	/* bound it, per C6/C7 in Section 5.3.1 */
2523 	if (new_rto < stcb->asoc.minrto) {
2524 		new_rto = stcb->asoc.minrto;
2525 	}
2526 	if (new_rto > stcb->asoc.maxrto) {
2527 		new_rto = stcb->asoc.maxrto;
2528 	}
2529 	/* we are now returning the RTO */
2530 	return (new_rto);
2531 }
2532 
2533 /*
2534  * return a pointer to a contiguous piece of data from the given mbuf chain
2535  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2536  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2537  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2538  */
2539 caddr_t
2540 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2541 {
2542 	uint32_t count;
2543 	uint8_t *ptr;
2544 
2545 	ptr = in_ptr;
2546 	if ((off < 0) || (len <= 0))
2547 		return (NULL);
2548 
2549 	/* find the desired start location */
2550 	while ((m != NULL) && (off > 0)) {
2551 		if (off < SCTP_BUF_LEN(m))
2552 			break;
2553 		off -= SCTP_BUF_LEN(m);
2554 		m = SCTP_BUF_NEXT(m);
2555 	}
2556 	if (m == NULL)
2557 		return (NULL);
2558 
2559 	/* is the current mbuf large enough (eg. contiguous)? */
2560 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2561 		return (mtod(m, caddr_t)+off);
2562 	} else {
2563 		/* else, it spans more than one mbuf, so save a temp copy... */
2564 		while ((m != NULL) && (len > 0)) {
2565 			count = min(SCTP_BUF_LEN(m) - off, len);
2566 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2567 			len -= count;
2568 			ptr += count;
2569 			off = 0;
2570 			m = SCTP_BUF_NEXT(m);
2571 		}
2572 		if ((m == NULL) && (len > 0))
2573 			return (NULL);
2574 		else
2575 			return ((caddr_t)in_ptr);
2576 	}
2577 }
2578 
2579 
2580 
2581 struct sctp_paramhdr *
2582 sctp_get_next_param(struct mbuf *m,
2583     int offset,
2584     struct sctp_paramhdr *pull,
2585     int pull_limit)
2586 {
2587 	/* This just provides a typed signature to Peter's Pull routine */
2588 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2589 	    (uint8_t *)pull));
2590 }
2591 
2592 
2593 struct mbuf *
2594 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2595 {
2596 	struct mbuf *m_last;
2597 	caddr_t dp;
2598 
2599 	if (padlen > 3) {
2600 		return (NULL);
2601 	}
2602 	if (padlen <= M_TRAILINGSPACE(m)) {
2603 		/*
2604 		 * The easy way. We hope the majority of the time we hit
2605 		 * here :)
2606 		 */
2607 		m_last = m;
2608 	} else {
2609 		/* Hard way we must grow the mbuf chain */
2610 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2611 		if (m_last == NULL) {
2612 			return (NULL);
2613 		}
2614 		SCTP_BUF_LEN(m_last) = 0;
2615 		SCTP_BUF_NEXT(m_last) = NULL;
2616 		SCTP_BUF_NEXT(m) = m_last;
2617 	}
2618 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2619 	SCTP_BUF_LEN(m_last) += padlen;
2620 	memset(dp, 0, padlen);
2621 	return (m_last);
2622 }
2623 
2624 struct mbuf *
2625 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2626 {
2627 	/* find the last mbuf in chain and pad it */
2628 	struct mbuf *m_at;
2629 
2630 	if (last_mbuf != NULL) {
2631 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2632 	} else {
2633 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2634 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2635 				return (sctp_add_pad_tombuf(m_at, padval));
2636 			}
2637 		}
2638 	}
2639 	return (NULL);
2640 }
2641 
2642 static void
2643 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2644     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2645 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2646     SCTP_UNUSED
2647 #endif
2648 )
2649 {
2650 	struct mbuf *m_notify;
2651 	struct sctp_assoc_change *sac;
2652 	struct sctp_queued_to_read *control;
2653 	unsigned int notif_len;
2654 	uint16_t abort_len;
2655 	unsigned int i;
2656 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2657 	struct socket *so;
2658 #endif
2659 
2660 	if (stcb == NULL) {
2661 		return;
2662 	}
2663 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2664 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2665 		if (abort != NULL) {
2666 			abort_len = ntohs(abort->ch.chunk_length);
2667 		} else {
2668 			abort_len = 0;
2669 		}
2670 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2671 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2672 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2673 			notif_len += abort_len;
2674 		}
2675 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2676 		if (m_notify == NULL) {
2677 			/* Retry with smaller value. */
2678 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2679 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2680 			if (m_notify == NULL) {
2681 				goto set_error;
2682 			}
2683 		}
2684 		SCTP_BUF_NEXT(m_notify) = NULL;
2685 		sac = mtod(m_notify, struct sctp_assoc_change *);
2686 		memset(sac, 0, notif_len);
2687 		sac->sac_type = SCTP_ASSOC_CHANGE;
2688 		sac->sac_flags = 0;
2689 		sac->sac_length = sizeof(struct sctp_assoc_change);
2690 		sac->sac_state = state;
2691 		sac->sac_error = error;
2692 		/* XXX verify these stream counts */
2693 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2694 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2695 		sac->sac_assoc_id = sctp_get_associd(stcb);
2696 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2697 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2698 				i = 0;
2699 				if (stcb->asoc.prsctp_supported == 1) {
2700 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2701 				}
2702 				if (stcb->asoc.auth_supported == 1) {
2703 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2704 				}
2705 				if (stcb->asoc.asconf_supported == 1) {
2706 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2707 				}
2708 				if (stcb->asoc.idata_supported == 1) {
2709 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2710 				}
2711 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2712 				if (stcb->asoc.reconfig_supported == 1) {
2713 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2714 				}
2715 				sac->sac_length += i;
2716 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2717 				memcpy(sac->sac_info, abort, abort_len);
2718 				sac->sac_length += abort_len;
2719 			}
2720 		}
2721 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2722 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2723 		    0, 0, stcb->asoc.context, 0, 0, 0,
2724 		    m_notify);
2725 		if (control != NULL) {
2726 			control->length = SCTP_BUF_LEN(m_notify);
2727 			control->spec_flags = M_NOTIFICATION;
2728 			/* not that we need this */
2729 			control->tail_mbuf = m_notify;
2730 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2731 			    control,
2732 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2733 			    so_locked);
2734 		} else {
2735 			sctp_m_freem(m_notify);
2736 		}
2737 	}
2738 	/*
2739 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2740 	 * comes in.
2741 	 */
2742 set_error:
2743 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2744 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2745 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2746 		SOCK_LOCK(stcb->sctp_socket);
2747 		if (from_peer) {
2748 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2749 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2750 				stcb->sctp_socket->so_error = ECONNREFUSED;
2751 			} else {
2752 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2753 				stcb->sctp_socket->so_error = ECONNRESET;
2754 			}
2755 		} else {
2756 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2757 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2758 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2759 				stcb->sctp_socket->so_error = ETIMEDOUT;
2760 			} else {
2761 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2762 				stcb->sctp_socket->so_error = ECONNABORTED;
2763 			}
2764 		}
2765 		SOCK_UNLOCK(stcb->sctp_socket);
2766 	}
2767 	/* Wake ANY sleepers */
2768 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2769 	so = SCTP_INP_SO(stcb->sctp_ep);
2770 	if (!so_locked) {
2771 		atomic_add_int(&stcb->asoc.refcnt, 1);
2772 		SCTP_TCB_UNLOCK(stcb);
2773 		SCTP_SOCKET_LOCK(so, 1);
2774 		SCTP_TCB_LOCK(stcb);
2775 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2776 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2777 			SCTP_SOCKET_UNLOCK(so, 1);
2778 			return;
2779 		}
2780 	}
2781 #endif
2782 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2783 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2784 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2785 		socantrcvmore(stcb->sctp_socket);
2786 	}
2787 	sorwakeup(stcb->sctp_socket);
2788 	sowwakeup(stcb->sctp_socket);
2789 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2790 	if (!so_locked) {
2791 		SCTP_SOCKET_UNLOCK(so, 1);
2792 	}
2793 #endif
2794 }
2795 
2796 static void
2797 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2798     struct sockaddr *sa, uint32_t error, int so_locked
2799 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2800     SCTP_UNUSED
2801 #endif
2802 )
2803 {
2804 	struct mbuf *m_notify;
2805 	struct sctp_paddr_change *spc;
2806 	struct sctp_queued_to_read *control;
2807 
2808 	if ((stcb == NULL) ||
2809 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2810 		/* event not enabled */
2811 		return;
2812 	}
2813 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2814 	if (m_notify == NULL)
2815 		return;
2816 	SCTP_BUF_LEN(m_notify) = 0;
2817 	spc = mtod(m_notify, struct sctp_paddr_change *);
2818 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2819 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2820 	spc->spc_flags = 0;
2821 	spc->spc_length = sizeof(struct sctp_paddr_change);
2822 	switch (sa->sa_family) {
2823 #ifdef INET
2824 	case AF_INET:
2825 #ifdef INET6
2826 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2827 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2828 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2829 		} else {
2830 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2831 		}
2832 #else
2833 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2834 #endif
2835 		break;
2836 #endif
2837 #ifdef INET6
2838 	case AF_INET6:
2839 		{
2840 			struct sockaddr_in6 *sin6;
2841 
2842 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2843 
2844 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2845 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2846 				if (sin6->sin6_scope_id == 0) {
2847 					/* recover scope_id for user */
2848 					(void)sa6_recoverscope(sin6);
2849 				} else {
2850 					/* clear embedded scope_id for user */
2851 					in6_clearscope(&sin6->sin6_addr);
2852 				}
2853 			}
2854 			break;
2855 		}
2856 #endif
2857 	default:
2858 		/* TSNH */
2859 		break;
2860 	}
2861 	spc->spc_state = state;
2862 	spc->spc_error = error;
2863 	spc->spc_assoc_id = sctp_get_associd(stcb);
2864 
2865 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2866 	SCTP_BUF_NEXT(m_notify) = NULL;
2867 
2868 	/* append to socket */
2869 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2870 	    0, 0, stcb->asoc.context, 0, 0, 0,
2871 	    m_notify);
2872 	if (control == NULL) {
2873 		/* no memory */
2874 		sctp_m_freem(m_notify);
2875 		return;
2876 	}
2877 	control->length = SCTP_BUF_LEN(m_notify);
2878 	control->spec_flags = M_NOTIFICATION;
2879 	/* not that we need this */
2880 	control->tail_mbuf = m_notify;
2881 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2882 	    control,
2883 	    &stcb->sctp_socket->so_rcv, 1,
2884 	    SCTP_READ_LOCK_NOT_HELD,
2885 	    so_locked);
2886 }
2887 
2888 
2889 static void
2890 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2891     struct sctp_tmit_chunk *chk, int so_locked
2892 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2893     SCTP_UNUSED
2894 #endif
2895 )
2896 {
2897 	struct mbuf *m_notify;
2898 	struct sctp_send_failed *ssf;
2899 	struct sctp_send_failed_event *ssfe;
2900 	struct sctp_queued_to_read *control;
2901 	struct sctp_chunkhdr *chkhdr;
2902 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2903 
2904 	if ((stcb == NULL) ||
2905 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2906 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2907 		/* event not enabled */
2908 		return;
2909 	}
2910 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2911 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2912 	} else {
2913 		notifhdr_len = sizeof(struct sctp_send_failed);
2914 	}
2915 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2916 	if (m_notify == NULL)
2917 		/* no space left */
2918 		return;
2919 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2920 	if (stcb->asoc.idata_supported) {
2921 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2922 	} else {
2923 		chkhdr_len = sizeof(struct sctp_data_chunk);
2924 	}
2925 	/* Use some defaults in case we can't access the chunk header */
2926 	if (chk->send_size >= chkhdr_len) {
2927 		payload_len = chk->send_size - chkhdr_len;
2928 	} else {
2929 		payload_len = 0;
2930 	}
2931 	padding_len = 0;
2932 	if (chk->data != NULL) {
2933 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2934 		if (chkhdr != NULL) {
2935 			chk_len = ntohs(chkhdr->chunk_length);
2936 			if ((chk_len >= chkhdr_len) &&
2937 			    (chk->send_size >= chk_len) &&
2938 			    (chk->send_size - chk_len < 4)) {
2939 				padding_len = chk->send_size - chk_len;
2940 				payload_len = chk->send_size - chkhdr_len - padding_len;
2941 			}
2942 		}
2943 	}
2944 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2945 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2946 		memset(ssfe, 0, notifhdr_len);
2947 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2948 		if (sent) {
2949 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2950 		} else {
2951 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2952 		}
2953 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2954 		ssfe->ssfe_error = error;
2955 		/* not exactly what the user sent in, but should be close :) */
2956 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2957 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2958 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2959 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2960 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2961 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2962 	} else {
2963 		ssf = mtod(m_notify, struct sctp_send_failed *);
2964 		memset(ssf, 0, notifhdr_len);
2965 		ssf->ssf_type = SCTP_SEND_FAILED;
2966 		if (sent) {
2967 			ssf->ssf_flags = SCTP_DATA_SENT;
2968 		} else {
2969 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2970 		}
2971 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2972 		ssf->ssf_error = error;
2973 		/* not exactly what the user sent in, but should be close :) */
2974 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2975 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
2976 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2977 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
2978 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2979 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2980 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2981 	}
2982 	if (chk->data != NULL) {
2983 		/* Trim off the sctp chunk header (it should be there) */
2984 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
2985 			m_adj(chk->data, chkhdr_len);
2986 			m_adj(chk->data, -padding_len);
2987 			sctp_mbuf_crush(chk->data);
2988 			chk->send_size -= (chkhdr_len + padding_len);
2989 		}
2990 	}
2991 	SCTP_BUF_NEXT(m_notify) = chk->data;
2992 	/* Steal off the mbuf */
2993 	chk->data = NULL;
2994 	/*
2995 	 * For this case, we check the actual socket buffer, since the assoc
2996 	 * is going away we don't want to overfill the socket buffer for a
2997 	 * non-reader
2998 	 */
2999 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3000 		sctp_m_freem(m_notify);
3001 		return;
3002 	}
3003 	/* append to socket */
3004 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3005 	    0, 0, stcb->asoc.context, 0, 0, 0,
3006 	    m_notify);
3007 	if (control == NULL) {
3008 		/* no memory */
3009 		sctp_m_freem(m_notify);
3010 		return;
3011 	}
3012 	control->length = SCTP_BUF_LEN(m_notify);
3013 	control->spec_flags = M_NOTIFICATION;
3014 	/* not that we need this */
3015 	control->tail_mbuf = m_notify;
3016 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3017 	    control,
3018 	    &stcb->sctp_socket->so_rcv, 1,
3019 	    SCTP_READ_LOCK_NOT_HELD,
3020 	    so_locked);
3021 }
3022 
3023 
3024 static void
3025 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3026     struct sctp_stream_queue_pending *sp, int so_locked
3027 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3028     SCTP_UNUSED
3029 #endif
3030 )
3031 {
3032 	struct mbuf *m_notify;
3033 	struct sctp_send_failed *ssf;
3034 	struct sctp_send_failed_event *ssfe;
3035 	struct sctp_queued_to_read *control;
3036 	int notifhdr_len;
3037 
3038 	if ((stcb == NULL) ||
3039 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3040 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3041 		/* event not enabled */
3042 		return;
3043 	}
3044 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3045 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3046 	} else {
3047 		notifhdr_len = sizeof(struct sctp_send_failed);
3048 	}
3049 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3050 	if (m_notify == NULL) {
3051 		/* no space left */
3052 		return;
3053 	}
3054 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3055 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3056 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3057 		memset(ssfe, 0, notifhdr_len);
3058 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3059 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3060 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3061 		ssfe->ssfe_error = error;
3062 		/* not exactly what the user sent in, but should be close :) */
3063 		ssfe->ssfe_info.snd_sid = sp->sid;
3064 		if (sp->some_taken) {
3065 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3066 		} else {
3067 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3068 		}
3069 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3070 		ssfe->ssfe_info.snd_context = sp->context;
3071 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3072 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3073 	} else {
3074 		ssf = mtod(m_notify, struct sctp_send_failed *);
3075 		memset(ssf, 0, notifhdr_len);
3076 		ssf->ssf_type = SCTP_SEND_FAILED;
3077 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3078 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3079 		ssf->ssf_error = error;
3080 		/* not exactly what the user sent in, but should be close :) */
3081 		ssf->ssf_info.sinfo_stream = sp->sid;
3082 		ssf->ssf_info.sinfo_ssn = 0;
3083 		if (sp->some_taken) {
3084 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3085 		} else {
3086 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3087 		}
3088 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3089 		ssf->ssf_info.sinfo_context = sp->context;
3090 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3091 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3092 	}
3093 	SCTP_BUF_NEXT(m_notify) = sp->data;
3094 
3095 	/* Steal off the mbuf */
3096 	sp->data = NULL;
3097 	/*
3098 	 * For this case, we check the actual socket buffer, since the assoc
3099 	 * is going away we don't want to overfill the socket buffer for a
3100 	 * non-reader
3101 	 */
3102 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3103 		sctp_m_freem(m_notify);
3104 		return;
3105 	}
3106 	/* append to socket */
3107 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3108 	    0, 0, stcb->asoc.context, 0, 0, 0,
3109 	    m_notify);
3110 	if (control == NULL) {
3111 		/* no memory */
3112 		sctp_m_freem(m_notify);
3113 		return;
3114 	}
3115 	control->length = SCTP_BUF_LEN(m_notify);
3116 	control->spec_flags = M_NOTIFICATION;
3117 	/* not that we need this */
3118 	control->tail_mbuf = m_notify;
3119 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3120 	    control,
3121 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3122 }
3123 
3124 
3125 
3126 static void
3127 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3128 {
3129 	struct mbuf *m_notify;
3130 	struct sctp_adaptation_event *sai;
3131 	struct sctp_queued_to_read *control;
3132 
3133 	if ((stcb == NULL) ||
3134 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3135 		/* event not enabled */
3136 		return;
3137 	}
3138 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3139 	if (m_notify == NULL)
3140 		/* no space left */
3141 		return;
3142 	SCTP_BUF_LEN(m_notify) = 0;
3143 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3144 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3145 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3146 	sai->sai_flags = 0;
3147 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3148 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3149 	sai->sai_assoc_id = sctp_get_associd(stcb);
3150 
3151 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3152 	SCTP_BUF_NEXT(m_notify) = NULL;
3153 
3154 	/* append to socket */
3155 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3156 	    0, 0, stcb->asoc.context, 0, 0, 0,
3157 	    m_notify);
3158 	if (control == NULL) {
3159 		/* no memory */
3160 		sctp_m_freem(m_notify);
3161 		return;
3162 	}
3163 	control->length = SCTP_BUF_LEN(m_notify);
3164 	control->spec_flags = M_NOTIFICATION;
3165 	/* not that we need this */
3166 	control->tail_mbuf = m_notify;
3167 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3168 	    control,
3169 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3170 }
3171 
3172 /* This always must be called with the read-queue LOCKED in the INP */
3173 static void
3174 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3175     uint32_t val, int so_locked
3176 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3177     SCTP_UNUSED
3178 #endif
3179 )
3180 {
3181 	struct mbuf *m_notify;
3182 	struct sctp_pdapi_event *pdapi;
3183 	struct sctp_queued_to_read *control;
3184 	struct sockbuf *sb;
3185 
3186 	if ((stcb == NULL) ||
3187 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3188 		/* event not enabled */
3189 		return;
3190 	}
3191 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3192 		return;
3193 	}
3194 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3195 	if (m_notify == NULL)
3196 		/* no space left */
3197 		return;
3198 	SCTP_BUF_LEN(m_notify) = 0;
3199 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3200 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3201 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3202 	pdapi->pdapi_flags = 0;
3203 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3204 	pdapi->pdapi_indication = error;
3205 	pdapi->pdapi_stream = (val >> 16);
3206 	pdapi->pdapi_seq = (val & 0x0000ffff);
3207 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3208 
3209 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3210 	SCTP_BUF_NEXT(m_notify) = NULL;
3211 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3212 	    0, 0, stcb->asoc.context, 0, 0, 0,
3213 	    m_notify);
3214 	if (control == NULL) {
3215 		/* no memory */
3216 		sctp_m_freem(m_notify);
3217 		return;
3218 	}
3219 	control->length = SCTP_BUF_LEN(m_notify);
3220 	control->spec_flags = M_NOTIFICATION;
3221 	/* not that we need this */
3222 	control->tail_mbuf = m_notify;
3223 	sb = &stcb->sctp_socket->so_rcv;
3224 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3225 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3226 	}
3227 	sctp_sballoc(stcb, sb, m_notify);
3228 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3229 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3230 	}
3231 	control->end_added = 1;
3232 	if (stcb->asoc.control_pdapi)
3233 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3234 	else {
3235 		/* we really should not see this case */
3236 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3237 	}
3238 	if (stcb->sctp_ep && stcb->sctp_socket) {
3239 		/* This should always be the case */
3240 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3241 		struct socket *so;
3242 
3243 		so = SCTP_INP_SO(stcb->sctp_ep);
3244 		if (!so_locked) {
3245 			atomic_add_int(&stcb->asoc.refcnt, 1);
3246 			SCTP_TCB_UNLOCK(stcb);
3247 			SCTP_SOCKET_LOCK(so, 1);
3248 			SCTP_TCB_LOCK(stcb);
3249 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3250 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3251 				SCTP_SOCKET_UNLOCK(so, 1);
3252 				return;
3253 			}
3254 		}
3255 #endif
3256 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3257 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3258 		if (!so_locked) {
3259 			SCTP_SOCKET_UNLOCK(so, 1);
3260 		}
3261 #endif
3262 	}
3263 }
3264 
3265 static void
3266 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3267 {
3268 	struct mbuf *m_notify;
3269 	struct sctp_shutdown_event *sse;
3270 	struct sctp_queued_to_read *control;
3271 
3272 	/*
3273 	 * For TCP model AND UDP connected sockets we will send an error up
3274 	 * when an SHUTDOWN completes
3275 	 */
3276 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3277 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3278 		/* mark socket closed for read/write and wakeup! */
3279 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3280 		struct socket *so;
3281 
3282 		so = SCTP_INP_SO(stcb->sctp_ep);
3283 		atomic_add_int(&stcb->asoc.refcnt, 1);
3284 		SCTP_TCB_UNLOCK(stcb);
3285 		SCTP_SOCKET_LOCK(so, 1);
3286 		SCTP_TCB_LOCK(stcb);
3287 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3288 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3289 			SCTP_SOCKET_UNLOCK(so, 1);
3290 			return;
3291 		}
3292 #endif
3293 		socantsendmore(stcb->sctp_socket);
3294 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3295 		SCTP_SOCKET_UNLOCK(so, 1);
3296 #endif
3297 	}
3298 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3299 		/* event not enabled */
3300 		return;
3301 	}
3302 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3303 	if (m_notify == NULL)
3304 		/* no space left */
3305 		return;
3306 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3307 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3308 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3309 	sse->sse_flags = 0;
3310 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3311 	sse->sse_assoc_id = sctp_get_associd(stcb);
3312 
3313 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3314 	SCTP_BUF_NEXT(m_notify) = NULL;
3315 
3316 	/* append to socket */
3317 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3318 	    0, 0, stcb->asoc.context, 0, 0, 0,
3319 	    m_notify);
3320 	if (control == NULL) {
3321 		/* no memory */
3322 		sctp_m_freem(m_notify);
3323 		return;
3324 	}
3325 	control->length = SCTP_BUF_LEN(m_notify);
3326 	control->spec_flags = M_NOTIFICATION;
3327 	/* not that we need this */
3328 	control->tail_mbuf = m_notify;
3329 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3330 	    control,
3331 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3332 }
3333 
3334 static void
3335 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3336     int so_locked
3337 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3338     SCTP_UNUSED
3339 #endif
3340 )
3341 {
3342 	struct mbuf *m_notify;
3343 	struct sctp_sender_dry_event *event;
3344 	struct sctp_queued_to_read *control;
3345 
3346 	if ((stcb == NULL) ||
3347 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3348 		/* event not enabled */
3349 		return;
3350 	}
3351 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3352 	if (m_notify == NULL) {
3353 		/* no space left */
3354 		return;
3355 	}
3356 	SCTP_BUF_LEN(m_notify) = 0;
3357 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3358 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3359 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3360 	event->sender_dry_flags = 0;
3361 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3362 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3363 
3364 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3365 	SCTP_BUF_NEXT(m_notify) = NULL;
3366 
3367 	/* append to socket */
3368 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3369 	    0, 0, stcb->asoc.context, 0, 0, 0,
3370 	    m_notify);
3371 	if (control == NULL) {
3372 		/* no memory */
3373 		sctp_m_freem(m_notify);
3374 		return;
3375 	}
3376 	control->length = SCTP_BUF_LEN(m_notify);
3377 	control->spec_flags = M_NOTIFICATION;
3378 	/* not that we need this */
3379 	control->tail_mbuf = m_notify;
3380 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3381 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3382 }
3383 
3384 
3385 void
3386 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3387 {
3388 	struct mbuf *m_notify;
3389 	struct sctp_queued_to_read *control;
3390 	struct sctp_stream_change_event *stradd;
3391 
3392 	if ((stcb == NULL) ||
3393 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3394 		/* event not enabled */
3395 		return;
3396 	}
3397 	if ((stcb->asoc.peer_req_out) && flag) {
3398 		/* Peer made the request, don't tell the local user */
3399 		stcb->asoc.peer_req_out = 0;
3400 		return;
3401 	}
3402 	stcb->asoc.peer_req_out = 0;
3403 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3404 	if (m_notify == NULL)
3405 		/* no space left */
3406 		return;
3407 	SCTP_BUF_LEN(m_notify) = 0;
3408 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3409 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3410 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3411 	stradd->strchange_flags = flag;
3412 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3413 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3414 	stradd->strchange_instrms = numberin;
3415 	stradd->strchange_outstrms = numberout;
3416 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3417 	SCTP_BUF_NEXT(m_notify) = NULL;
3418 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3419 		/* no space */
3420 		sctp_m_freem(m_notify);
3421 		return;
3422 	}
3423 	/* append to socket */
3424 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3425 	    0, 0, stcb->asoc.context, 0, 0, 0,
3426 	    m_notify);
3427 	if (control == NULL) {
3428 		/* no memory */
3429 		sctp_m_freem(m_notify);
3430 		return;
3431 	}
3432 	control->length = SCTP_BUF_LEN(m_notify);
3433 	control->spec_flags = M_NOTIFICATION;
3434 	/* not that we need this */
3435 	control->tail_mbuf = m_notify;
3436 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3437 	    control,
3438 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3439 }
3440 
3441 void
3442 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3443 {
3444 	struct mbuf *m_notify;
3445 	struct sctp_queued_to_read *control;
3446 	struct sctp_assoc_reset_event *strasoc;
3447 
3448 	if ((stcb == NULL) ||
3449 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3450 		/* event not enabled */
3451 		return;
3452 	}
3453 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3454 	if (m_notify == NULL)
3455 		/* no space left */
3456 		return;
3457 	SCTP_BUF_LEN(m_notify) = 0;
3458 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3459 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3460 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3461 	strasoc->assocreset_flags = flag;
3462 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3463 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3464 	strasoc->assocreset_local_tsn = sending_tsn;
3465 	strasoc->assocreset_remote_tsn = recv_tsn;
3466 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3467 	SCTP_BUF_NEXT(m_notify) = NULL;
3468 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3469 		/* no space */
3470 		sctp_m_freem(m_notify);
3471 		return;
3472 	}
3473 	/* append to socket */
3474 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3475 	    0, 0, stcb->asoc.context, 0, 0, 0,
3476 	    m_notify);
3477 	if (control == NULL) {
3478 		/* no memory */
3479 		sctp_m_freem(m_notify);
3480 		return;
3481 	}
3482 	control->length = SCTP_BUF_LEN(m_notify);
3483 	control->spec_flags = M_NOTIFICATION;
3484 	/* not that we need this */
3485 	control->tail_mbuf = m_notify;
3486 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3487 	    control,
3488 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3489 }
3490 
3491 
3492 
3493 static void
3494 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3495     int number_entries, uint16_t *list, int flag)
3496 {
3497 	struct mbuf *m_notify;
3498 	struct sctp_queued_to_read *control;
3499 	struct sctp_stream_reset_event *strreset;
3500 	int len;
3501 
3502 	if ((stcb == NULL) ||
3503 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3504 		/* event not enabled */
3505 		return;
3506 	}
3507 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3508 	if (m_notify == NULL)
3509 		/* no space left */
3510 		return;
3511 	SCTP_BUF_LEN(m_notify) = 0;
3512 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3513 	if (len > M_TRAILINGSPACE(m_notify)) {
3514 		/* never enough room */
3515 		sctp_m_freem(m_notify);
3516 		return;
3517 	}
3518 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3519 	memset(strreset, 0, len);
3520 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3521 	strreset->strreset_flags = flag;
3522 	strreset->strreset_length = len;
3523 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3524 	if (number_entries) {
3525 		int i;
3526 
3527 		for (i = 0; i < number_entries; i++) {
3528 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3529 		}
3530 	}
3531 	SCTP_BUF_LEN(m_notify) = len;
3532 	SCTP_BUF_NEXT(m_notify) = NULL;
3533 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3534 		/* no space */
3535 		sctp_m_freem(m_notify);
3536 		return;
3537 	}
3538 	/* append to socket */
3539 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3540 	    0, 0, stcb->asoc.context, 0, 0, 0,
3541 	    m_notify);
3542 	if (control == NULL) {
3543 		/* no memory */
3544 		sctp_m_freem(m_notify);
3545 		return;
3546 	}
3547 	control->length = SCTP_BUF_LEN(m_notify);
3548 	control->spec_flags = M_NOTIFICATION;
3549 	/* not that we need this */
3550 	control->tail_mbuf = m_notify;
3551 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3552 	    control,
3553 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3554 }
3555 
3556 
3557 static void
3558 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3559 {
3560 	struct mbuf *m_notify;
3561 	struct sctp_remote_error *sre;
3562 	struct sctp_queued_to_read *control;
3563 	unsigned int notif_len;
3564 	uint16_t chunk_len;
3565 
3566 	if ((stcb == NULL) ||
3567 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3568 		return;
3569 	}
3570 	if (chunk != NULL) {
3571 		chunk_len = ntohs(chunk->ch.chunk_length);
3572 	} else {
3573 		chunk_len = 0;
3574 	}
3575 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3576 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3577 	if (m_notify == NULL) {
3578 		/* Retry with smaller value. */
3579 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3580 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3581 		if (m_notify == NULL) {
3582 			return;
3583 		}
3584 	}
3585 	SCTP_BUF_NEXT(m_notify) = NULL;
3586 	sre = mtod(m_notify, struct sctp_remote_error *);
3587 	memset(sre, 0, notif_len);
3588 	sre->sre_type = SCTP_REMOTE_ERROR;
3589 	sre->sre_flags = 0;
3590 	sre->sre_length = sizeof(struct sctp_remote_error);
3591 	sre->sre_error = error;
3592 	sre->sre_assoc_id = sctp_get_associd(stcb);
3593 	if (notif_len > sizeof(struct sctp_remote_error)) {
3594 		memcpy(sre->sre_data, chunk, chunk_len);
3595 		sre->sre_length += chunk_len;
3596 	}
3597 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3598 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3599 	    0, 0, stcb->asoc.context, 0, 0, 0,
3600 	    m_notify);
3601 	if (control != NULL) {
3602 		control->length = SCTP_BUF_LEN(m_notify);
3603 		control->spec_flags = M_NOTIFICATION;
3604 		/* not that we need this */
3605 		control->tail_mbuf = m_notify;
3606 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3607 		    control,
3608 		    &stcb->sctp_socket->so_rcv, 1,
3609 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3610 	} else {
3611 		sctp_m_freem(m_notify);
3612 	}
3613 }
3614 
3615 
3616 void
3617 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3618     uint32_t error, void *data, int so_locked
3619 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3620     SCTP_UNUSED
3621 #endif
3622 )
3623 {
3624 	if ((stcb == NULL) ||
3625 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3626 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3627 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3628 		/* If the socket is gone we are out of here */
3629 		return;
3630 	}
3631 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3632 		return;
3633 	}
3634 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3635 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3636 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3637 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3638 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3639 			/* Don't report these in front states */
3640 			return;
3641 		}
3642 	}
3643 	switch (notification) {
3644 	case SCTP_NOTIFY_ASSOC_UP:
3645 		if (stcb->asoc.assoc_up_sent == 0) {
3646 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3647 			stcb->asoc.assoc_up_sent = 1;
3648 		}
3649 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3650 			sctp_notify_adaptation_layer(stcb);
3651 		}
3652 		if (stcb->asoc.auth_supported == 0) {
3653 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3654 			    NULL, so_locked);
3655 		}
3656 		break;
3657 	case SCTP_NOTIFY_ASSOC_DOWN:
3658 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3659 		break;
3660 	case SCTP_NOTIFY_INTERFACE_DOWN:
3661 		{
3662 			struct sctp_nets *net;
3663 
3664 			net = (struct sctp_nets *)data;
3665 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3666 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3667 			break;
3668 		}
3669 	case SCTP_NOTIFY_INTERFACE_UP:
3670 		{
3671 			struct sctp_nets *net;
3672 
3673 			net = (struct sctp_nets *)data;
3674 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3675 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3676 			break;
3677 		}
3678 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3679 		{
3680 			struct sctp_nets *net;
3681 
3682 			net = (struct sctp_nets *)data;
3683 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3684 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3685 			break;
3686 		}
3687 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3688 		sctp_notify_send_failed2(stcb, error,
3689 		    (struct sctp_stream_queue_pending *)data, so_locked);
3690 		break;
3691 	case SCTP_NOTIFY_SENT_DG_FAIL:
3692 		sctp_notify_send_failed(stcb, 1, error,
3693 		    (struct sctp_tmit_chunk *)data, so_locked);
3694 		break;
3695 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3696 		sctp_notify_send_failed(stcb, 0, error,
3697 		    (struct sctp_tmit_chunk *)data, so_locked);
3698 		break;
3699 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3700 		{
3701 			uint32_t val;
3702 
3703 			val = *((uint32_t *)data);
3704 
3705 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3706 			break;
3707 		}
3708 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3709 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3710 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3711 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3712 		} else {
3713 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3714 		}
3715 		break;
3716 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3717 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3718 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3719 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3720 		} else {
3721 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3722 		}
3723 		break;
3724 	case SCTP_NOTIFY_ASSOC_RESTART:
3725 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3726 		if (stcb->asoc.auth_supported == 0) {
3727 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3728 			    NULL, so_locked);
3729 		}
3730 		break;
3731 	case SCTP_NOTIFY_STR_RESET_SEND:
3732 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3733 		break;
3734 	case SCTP_NOTIFY_STR_RESET_RECV:
3735 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3736 		break;
3737 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3738 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3739 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3740 		break;
3741 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3742 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3743 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3744 		break;
3745 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3746 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3747 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3748 		break;
3749 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3750 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3751 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3752 		break;
3753 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3754 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3755 		    error, so_locked);
3756 		break;
3757 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3758 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3759 		    error, so_locked);
3760 		break;
3761 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3762 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3763 		    error, so_locked);
3764 		break;
3765 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3766 		sctp_notify_shutdown_event(stcb);
3767 		break;
3768 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3769 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3770 		    (uint16_t)(uintptr_t)data,
3771 		    so_locked);
3772 		break;
3773 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3774 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3775 		    (uint16_t)(uintptr_t)data,
3776 		    so_locked);
3777 		break;
3778 	case SCTP_NOTIFY_NO_PEER_AUTH:
3779 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3780 		    (uint16_t)(uintptr_t)data,
3781 		    so_locked);
3782 		break;
3783 	case SCTP_NOTIFY_SENDER_DRY:
3784 		sctp_notify_sender_dry_event(stcb, so_locked);
3785 		break;
3786 	case SCTP_NOTIFY_REMOTE_ERROR:
3787 		sctp_notify_remote_error(stcb, error, data);
3788 		break;
3789 	default:
3790 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3791 		    __func__, notification, notification);
3792 		break;
3793 	}			/* end switch */
3794 }
3795 
3796 void
3797 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3798 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3799     SCTP_UNUSED
3800 #endif
3801 )
3802 {
3803 	struct sctp_association *asoc;
3804 	struct sctp_stream_out *outs;
3805 	struct sctp_tmit_chunk *chk, *nchk;
3806 	struct sctp_stream_queue_pending *sp, *nsp;
3807 	int i;
3808 
3809 	if (stcb == NULL) {
3810 		return;
3811 	}
3812 	asoc = &stcb->asoc;
3813 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3814 		/* already being freed */
3815 		return;
3816 	}
3817 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3818 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3819 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3820 		return;
3821 	}
3822 	/* now through all the gunk freeing chunks */
3823 	if (holds_lock == 0) {
3824 		SCTP_TCB_SEND_LOCK(stcb);
3825 	}
3826 	/* sent queue SHOULD be empty */
3827 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3828 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3829 		asoc->sent_queue_cnt--;
3830 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3831 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3832 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3833 #ifdef INVARIANTS
3834 			} else {
3835 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3836 #endif
3837 			}
3838 		}
3839 		if (chk->data != NULL) {
3840 			sctp_free_bufspace(stcb, asoc, chk, 1);
3841 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3842 			    error, chk, so_locked);
3843 			if (chk->data) {
3844 				sctp_m_freem(chk->data);
3845 				chk->data = NULL;
3846 			}
3847 		}
3848 		sctp_free_a_chunk(stcb, chk, so_locked);
3849 		/* sa_ignore FREED_MEMORY */
3850 	}
3851 	/* pending send queue SHOULD be empty */
3852 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3853 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3854 		asoc->send_queue_cnt--;
3855 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3856 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3857 #ifdef INVARIANTS
3858 		} else {
3859 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3860 #endif
3861 		}
3862 		if (chk->data != NULL) {
3863 			sctp_free_bufspace(stcb, asoc, chk, 1);
3864 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3865 			    error, chk, so_locked);
3866 			if (chk->data) {
3867 				sctp_m_freem(chk->data);
3868 				chk->data = NULL;
3869 			}
3870 		}
3871 		sctp_free_a_chunk(stcb, chk, so_locked);
3872 		/* sa_ignore FREED_MEMORY */
3873 	}
3874 	for (i = 0; i < asoc->streamoutcnt; i++) {
3875 		/* For each stream */
3876 		outs = &asoc->strmout[i];
3877 		/* clean up any sends there */
3878 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3879 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3880 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3881 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3882 			sctp_free_spbufspace(stcb, asoc, sp);
3883 			if (sp->data) {
3884 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3885 				    error, (void *)sp, so_locked);
3886 				if (sp->data) {
3887 					sctp_m_freem(sp->data);
3888 					sp->data = NULL;
3889 					sp->tail_mbuf = NULL;
3890 					sp->length = 0;
3891 				}
3892 			}
3893 			if (sp->net) {
3894 				sctp_free_remote_addr(sp->net);
3895 				sp->net = NULL;
3896 			}
3897 			/* Free the chunk */
3898 			sctp_free_a_strmoq(stcb, sp, so_locked);
3899 			/* sa_ignore FREED_MEMORY */
3900 		}
3901 	}
3902 
3903 	if (holds_lock == 0) {
3904 		SCTP_TCB_SEND_UNLOCK(stcb);
3905 	}
3906 }
3907 
3908 void
3909 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3910     struct sctp_abort_chunk *abort, int so_locked
3911 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3912     SCTP_UNUSED
3913 #endif
3914 )
3915 {
3916 	if (stcb == NULL) {
3917 		return;
3918 	}
3919 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3920 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3921 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3922 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3923 	}
3924 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3925 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3926 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3927 		return;
3928 	}
3929 	/* Tell them we lost the asoc */
3930 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3931 	if (from_peer) {
3932 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3933 	} else {
3934 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3935 	}
3936 }
3937 
3938 void
3939 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3940     struct mbuf *m, int iphlen,
3941     struct sockaddr *src, struct sockaddr *dst,
3942     struct sctphdr *sh, struct mbuf *op_err,
3943     uint8_t mflowtype, uint32_t mflowid,
3944     uint32_t vrf_id, uint16_t port)
3945 {
3946 	uint32_t vtag;
3947 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3948 	struct socket *so;
3949 #endif
3950 
3951 	vtag = 0;
3952 	if (stcb != NULL) {
3953 		vtag = stcb->asoc.peer_vtag;
3954 		vrf_id = stcb->asoc.vrf_id;
3955 	}
3956 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3957 	    mflowtype, mflowid, inp->fibnum,
3958 	    vrf_id, port);
3959 	if (stcb != NULL) {
3960 		/* We have a TCB to abort, send notification too */
3961 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3962 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3963 		/* Ok, now lets free it */
3964 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3965 		so = SCTP_INP_SO(inp);
3966 		atomic_add_int(&stcb->asoc.refcnt, 1);
3967 		SCTP_TCB_UNLOCK(stcb);
3968 		SCTP_SOCKET_LOCK(so, 1);
3969 		SCTP_TCB_LOCK(stcb);
3970 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3971 #endif
3972 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3973 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3974 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3975 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3976 		}
3977 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3978 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3979 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3980 		SCTP_SOCKET_UNLOCK(so, 1);
3981 #endif
3982 	}
3983 }
3984 #ifdef SCTP_ASOCLOG_OF_TSNS
3985 void
3986 sctp_print_out_track_log(struct sctp_tcb *stcb)
3987 {
3988 #ifdef NOSIY_PRINTS
3989 	int i;
3990 
3991 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3992 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3993 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3994 		SCTP_PRINTF("None rcvd\n");
3995 		goto none_in;
3996 	}
3997 	if (stcb->asoc.tsn_in_wrapped) {
3998 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3999 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4000 			    stcb->asoc.in_tsnlog[i].tsn,
4001 			    stcb->asoc.in_tsnlog[i].strm,
4002 			    stcb->asoc.in_tsnlog[i].seq,
4003 			    stcb->asoc.in_tsnlog[i].flgs,
4004 			    stcb->asoc.in_tsnlog[i].sz);
4005 		}
4006 	}
4007 	if (stcb->asoc.tsn_in_at) {
4008 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4009 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4010 			    stcb->asoc.in_tsnlog[i].tsn,
4011 			    stcb->asoc.in_tsnlog[i].strm,
4012 			    stcb->asoc.in_tsnlog[i].seq,
4013 			    stcb->asoc.in_tsnlog[i].flgs,
4014 			    stcb->asoc.in_tsnlog[i].sz);
4015 		}
4016 	}
4017 none_in:
4018 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4019 	if ((stcb->asoc.tsn_out_at == 0) &&
4020 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4021 		SCTP_PRINTF("None sent\n");
4022 	}
4023 	if (stcb->asoc.tsn_out_wrapped) {
4024 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4025 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4026 			    stcb->asoc.out_tsnlog[i].tsn,
4027 			    stcb->asoc.out_tsnlog[i].strm,
4028 			    stcb->asoc.out_tsnlog[i].seq,
4029 			    stcb->asoc.out_tsnlog[i].flgs,
4030 			    stcb->asoc.out_tsnlog[i].sz);
4031 		}
4032 	}
4033 	if (stcb->asoc.tsn_out_at) {
4034 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4035 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4036 			    stcb->asoc.out_tsnlog[i].tsn,
4037 			    stcb->asoc.out_tsnlog[i].strm,
4038 			    stcb->asoc.out_tsnlog[i].seq,
4039 			    stcb->asoc.out_tsnlog[i].flgs,
4040 			    stcb->asoc.out_tsnlog[i].sz);
4041 		}
4042 	}
4043 #endif
4044 }
4045 #endif
4046 
4047 void
4048 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4049     struct mbuf *op_err,
4050     int so_locked
4051 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4052     SCTP_UNUSED
4053 #endif
4054 )
4055 {
4056 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4057 	struct socket *so;
4058 #endif
4059 
4060 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4061 	so = SCTP_INP_SO(inp);
4062 #endif
4063 	if (stcb == NULL) {
4064 		/* Got to have a TCB */
4065 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4066 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4067 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4068 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4069 			}
4070 		}
4071 		return;
4072 	} else {
4073 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4074 	}
4075 	/* notify the peer */
4076 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4077 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4078 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4079 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4080 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4081 	}
4082 	/* notify the ulp */
4083 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4084 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4085 	}
4086 	/* now free the asoc */
4087 #ifdef SCTP_ASOCLOG_OF_TSNS
4088 	sctp_print_out_track_log(stcb);
4089 #endif
4090 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4091 	if (!so_locked) {
4092 		atomic_add_int(&stcb->asoc.refcnt, 1);
4093 		SCTP_TCB_UNLOCK(stcb);
4094 		SCTP_SOCKET_LOCK(so, 1);
4095 		SCTP_TCB_LOCK(stcb);
4096 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4097 	}
4098 #endif
4099 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4100 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4101 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4102 	if (!so_locked) {
4103 		SCTP_SOCKET_UNLOCK(so, 1);
4104 	}
4105 #endif
4106 }
4107 
4108 void
4109 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4110     struct sockaddr *src, struct sockaddr *dst,
4111     struct sctphdr *sh, struct sctp_inpcb *inp,
4112     struct mbuf *cause,
4113     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4114     uint32_t vrf_id, uint16_t port)
4115 {
4116 	struct sctp_chunkhdr *ch, chunk_buf;
4117 	unsigned int chk_length;
4118 	int contains_init_chunk;
4119 
4120 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4121 	/* Generate a TO address for future reference */
4122 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4123 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4124 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4125 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4126 		}
4127 	}
4128 	contains_init_chunk = 0;
4129 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4130 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4131 	while (ch != NULL) {
4132 		chk_length = ntohs(ch->chunk_length);
4133 		if (chk_length < sizeof(*ch)) {
4134 			/* break to abort land */
4135 			break;
4136 		}
4137 		switch (ch->chunk_type) {
4138 		case SCTP_INIT:
4139 			contains_init_chunk = 1;
4140 			break;
4141 		case SCTP_PACKET_DROPPED:
4142 			/* we don't respond to pkt-dropped */
4143 			return;
4144 		case SCTP_ABORT_ASSOCIATION:
4145 			/* we don't respond with an ABORT to an ABORT */
4146 			return;
4147 		case SCTP_SHUTDOWN_COMPLETE:
4148 			/*
4149 			 * we ignore it since we are not waiting for it and
4150 			 * peer is gone
4151 			 */
4152 			return;
4153 		case SCTP_SHUTDOWN_ACK:
4154 			sctp_send_shutdown_complete2(src, dst, sh,
4155 			    mflowtype, mflowid, fibnum,
4156 			    vrf_id, port);
4157 			return;
4158 		default:
4159 			break;
4160 		}
4161 		offset += SCTP_SIZE32(chk_length);
4162 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4163 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4164 	}
4165 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4166 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4167 	    (contains_init_chunk == 0))) {
4168 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4169 		    mflowtype, mflowid, fibnum,
4170 		    vrf_id, port);
4171 	}
4172 }
4173 
4174 /*
4175  * check the inbound datagram to make sure there is not an abort inside it,
4176  * if there is return 1, else return 0.
4177  */
4178 int
4179 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4180 {
4181 	struct sctp_chunkhdr *ch;
4182 	struct sctp_init_chunk *init_chk, chunk_buf;
4183 	int offset;
4184 	unsigned int chk_length;
4185 
4186 	offset = iphlen + sizeof(struct sctphdr);
4187 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4188 	    (uint8_t *)&chunk_buf);
4189 	while (ch != NULL) {
4190 		chk_length = ntohs(ch->chunk_length);
4191 		if (chk_length < sizeof(*ch)) {
4192 			/* packet is probably corrupt */
4193 			break;
4194 		}
4195 		/* we seem to be ok, is it an abort? */
4196 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4197 			/* yep, tell them */
4198 			return (1);
4199 		}
4200 		if (ch->chunk_type == SCTP_INITIATION) {
4201 			/* need to update the Vtag */
4202 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4203 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4204 			if (init_chk != NULL) {
4205 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4206 			}
4207 		}
4208 		/* Nope, move to the next chunk */
4209 		offset += SCTP_SIZE32(chk_length);
4210 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4211 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4212 	}
4213 	return (0);
4214 }
4215 
4216 /*
4217  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4218  * set (i.e. it's 0) so, create this function to compare link local scopes
4219  */
4220 #ifdef INET6
4221 uint32_t
4222 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4223 {
4224 	struct sockaddr_in6 a, b;
4225 
4226 	/* save copies */
4227 	a = *addr1;
4228 	b = *addr2;
4229 
4230 	if (a.sin6_scope_id == 0)
4231 		if (sa6_recoverscope(&a)) {
4232 			/* can't get scope, so can't match */
4233 			return (0);
4234 		}
4235 	if (b.sin6_scope_id == 0)
4236 		if (sa6_recoverscope(&b)) {
4237 			/* can't get scope, so can't match */
4238 			return (0);
4239 		}
4240 	if (a.sin6_scope_id != b.sin6_scope_id)
4241 		return (0);
4242 
4243 	return (1);
4244 }
4245 
4246 /*
4247  * returns a sockaddr_in6 with embedded scope recovered and removed
4248  */
4249 struct sockaddr_in6 *
4250 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4251 {
4252 	/* check and strip embedded scope junk */
4253 	if (addr->sin6_family == AF_INET6) {
4254 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4255 			if (addr->sin6_scope_id == 0) {
4256 				*store = *addr;
4257 				if (!sa6_recoverscope(store)) {
4258 					/* use the recovered scope */
4259 					addr = store;
4260 				}
4261 			} else {
4262 				/* else, return the original "to" addr */
4263 				in6_clearscope(&addr->sin6_addr);
4264 			}
4265 		}
4266 	}
4267 	return (addr);
4268 }
4269 #endif
4270 
4271 /*
4272  * are the two addresses the same?  currently a "scopeless" check returns: 1
4273  * if same, 0 if not
4274  */
4275 int
4276 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4277 {
4278 
4279 	/* must be valid */
4280 	if (sa1 == NULL || sa2 == NULL)
4281 		return (0);
4282 
4283 	/* must be the same family */
4284 	if (sa1->sa_family != sa2->sa_family)
4285 		return (0);
4286 
4287 	switch (sa1->sa_family) {
4288 #ifdef INET6
4289 	case AF_INET6:
4290 		{
4291 			/* IPv6 addresses */
4292 			struct sockaddr_in6 *sin6_1, *sin6_2;
4293 
4294 			sin6_1 = (struct sockaddr_in6 *)sa1;
4295 			sin6_2 = (struct sockaddr_in6 *)sa2;
4296 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4297 			    sin6_2));
4298 		}
4299 #endif
4300 #ifdef INET
4301 	case AF_INET:
4302 		{
4303 			/* IPv4 addresses */
4304 			struct sockaddr_in *sin_1, *sin_2;
4305 
4306 			sin_1 = (struct sockaddr_in *)sa1;
4307 			sin_2 = (struct sockaddr_in *)sa2;
4308 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4309 		}
4310 #endif
4311 	default:
4312 		/* we don't do these... */
4313 		return (0);
4314 	}
4315 }
4316 
4317 void
4318 sctp_print_address(struct sockaddr *sa)
4319 {
4320 #ifdef INET6
4321 	char ip6buf[INET6_ADDRSTRLEN];
4322 #endif
4323 
4324 	switch (sa->sa_family) {
4325 #ifdef INET6
4326 	case AF_INET6:
4327 		{
4328 			struct sockaddr_in6 *sin6;
4329 
4330 			sin6 = (struct sockaddr_in6 *)sa;
4331 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4332 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4333 			    ntohs(sin6->sin6_port),
4334 			    sin6->sin6_scope_id);
4335 			break;
4336 		}
4337 #endif
4338 #ifdef INET
4339 	case AF_INET:
4340 		{
4341 			struct sockaddr_in *sin;
4342 			unsigned char *p;
4343 
4344 			sin = (struct sockaddr_in *)sa;
4345 			p = (unsigned char *)&sin->sin_addr;
4346 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4347 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4348 			break;
4349 		}
4350 #endif
4351 	default:
4352 		SCTP_PRINTF("?\n");
4353 		break;
4354 	}
4355 }
4356 
4357 void
4358 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4359     struct sctp_inpcb *new_inp,
4360     struct sctp_tcb *stcb,
4361     int waitflags)
4362 {
4363 	/*
4364 	 * go through our old INP and pull off any control structures that
4365 	 * belong to stcb and move then to the new inp.
4366 	 */
4367 	struct socket *old_so, *new_so;
4368 	struct sctp_queued_to_read *control, *nctl;
4369 	struct sctp_readhead tmp_queue;
4370 	struct mbuf *m;
4371 	int error = 0;
4372 
4373 	old_so = old_inp->sctp_socket;
4374 	new_so = new_inp->sctp_socket;
4375 	TAILQ_INIT(&tmp_queue);
4376 	error = sblock(&old_so->so_rcv, waitflags);
4377 	if (error) {
4378 		/*
4379 		 * Gak, can't get sblock, we have a problem. data will be
4380 		 * left stranded.. and we don't dare look at it since the
4381 		 * other thread may be reading something. Oh well, its a
4382 		 * screwed up app that does a peeloff OR a accept while
4383 		 * reading from the main socket... actually its only the
4384 		 * peeloff() case, since I think read will fail on a
4385 		 * listening socket..
4386 		 */
4387 		return;
4388 	}
4389 	/* lock the socket buffers */
4390 	SCTP_INP_READ_LOCK(old_inp);
4391 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4392 		/* Pull off all for out target stcb */
4393 		if (control->stcb == stcb) {
4394 			/* remove it we want it */
4395 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4396 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4397 			m = control->data;
4398 			while (m) {
4399 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4400 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4401 				}
4402 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4403 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4404 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4405 				}
4406 				m = SCTP_BUF_NEXT(m);
4407 			}
4408 		}
4409 	}
4410 	SCTP_INP_READ_UNLOCK(old_inp);
4411 	/* Remove the sb-lock on the old socket */
4412 
4413 	sbunlock(&old_so->so_rcv);
4414 	/* Now we move them over to the new socket buffer */
4415 	SCTP_INP_READ_LOCK(new_inp);
4416 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4417 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4418 		m = control->data;
4419 		while (m) {
4420 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4421 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4422 			}
4423 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4424 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4425 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4426 			}
4427 			m = SCTP_BUF_NEXT(m);
4428 		}
4429 	}
4430 	SCTP_INP_READ_UNLOCK(new_inp);
4431 }
4432 
4433 void
4434 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4435     struct sctp_tcb *stcb,
4436     int so_locked
4437 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4438     SCTP_UNUSED
4439 #endif
4440 )
4441 {
4442 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4443 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4444 		struct socket *so;
4445 
4446 		so = SCTP_INP_SO(inp);
4447 		if (!so_locked) {
4448 			if (stcb) {
4449 				atomic_add_int(&stcb->asoc.refcnt, 1);
4450 				SCTP_TCB_UNLOCK(stcb);
4451 			}
4452 			SCTP_SOCKET_LOCK(so, 1);
4453 			if (stcb) {
4454 				SCTP_TCB_LOCK(stcb);
4455 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4456 			}
4457 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4458 				SCTP_SOCKET_UNLOCK(so, 1);
4459 				return;
4460 			}
4461 		}
4462 #endif
4463 		sctp_sorwakeup(inp, inp->sctp_socket);
4464 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4465 		if (!so_locked) {
4466 			SCTP_SOCKET_UNLOCK(so, 1);
4467 		}
4468 #endif
4469 	}
4470 }
4471 
4472 void
4473 sctp_add_to_readq(struct sctp_inpcb *inp,
4474     struct sctp_tcb *stcb,
4475     struct sctp_queued_to_read *control,
4476     struct sockbuf *sb,
4477     int end,
4478     int inp_read_lock_held,
4479     int so_locked
4480 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4481     SCTP_UNUSED
4482 #endif
4483 )
4484 {
4485 	/*
4486 	 * Here we must place the control on the end of the socket read
4487 	 * queue AND increment sb_cc so that select will work properly on
4488 	 * read.
4489 	 */
4490 	struct mbuf *m, *prev = NULL;
4491 
4492 	if (inp == NULL) {
4493 		/* Gak, TSNH!! */
4494 #ifdef INVARIANTS
4495 		panic("Gak, inp NULL on add_to_readq");
4496 #endif
4497 		return;
4498 	}
4499 	if (inp_read_lock_held == 0)
4500 		SCTP_INP_READ_LOCK(inp);
4501 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4502 		sctp_free_remote_addr(control->whoFrom);
4503 		if (control->data) {
4504 			sctp_m_freem(control->data);
4505 			control->data = NULL;
4506 		}
4507 		sctp_free_a_readq(stcb, control);
4508 		if (inp_read_lock_held == 0)
4509 			SCTP_INP_READ_UNLOCK(inp);
4510 		return;
4511 	}
4512 	if (!(control->spec_flags & M_NOTIFICATION)) {
4513 		atomic_add_int(&inp->total_recvs, 1);
4514 		if (!control->do_not_ref_stcb) {
4515 			atomic_add_int(&stcb->total_recvs, 1);
4516 		}
4517 	}
4518 	m = control->data;
4519 	control->held_length = 0;
4520 	control->length = 0;
4521 	while (m) {
4522 		if (SCTP_BUF_LEN(m) == 0) {
4523 			/* Skip mbufs with NO length */
4524 			if (prev == NULL) {
4525 				/* First one */
4526 				control->data = sctp_m_free(m);
4527 				m = control->data;
4528 			} else {
4529 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4530 				m = SCTP_BUF_NEXT(prev);
4531 			}
4532 			if (m == NULL) {
4533 				control->tail_mbuf = prev;
4534 			}
4535 			continue;
4536 		}
4537 		prev = m;
4538 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4539 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4540 		}
4541 		sctp_sballoc(stcb, sb, m);
4542 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4543 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4544 		}
4545 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4546 		m = SCTP_BUF_NEXT(m);
4547 	}
4548 	if (prev != NULL) {
4549 		control->tail_mbuf = prev;
4550 	} else {
4551 		/* Everything got collapsed out?? */
4552 		sctp_free_remote_addr(control->whoFrom);
4553 		sctp_free_a_readq(stcb, control);
4554 		if (inp_read_lock_held == 0)
4555 			SCTP_INP_READ_UNLOCK(inp);
4556 		return;
4557 	}
4558 	if (end) {
4559 		control->end_added = 1;
4560 	}
4561 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4562 	control->on_read_q = 1;
4563 	if (inp_read_lock_held == 0)
4564 		SCTP_INP_READ_UNLOCK(inp);
4565 	if (inp && inp->sctp_socket) {
4566 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4567 	}
4568 }
4569 
4570 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4571  *************ALTERNATE ROUTING CODE
4572  */
4573 
4574 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4575  *************ALTERNATE ROUTING CODE
4576  */
4577 
4578 struct mbuf *
4579 sctp_generate_cause(uint16_t code, char *info)
4580 {
4581 	struct mbuf *m;
4582 	struct sctp_gen_error_cause *cause;
4583 	size_t info_len;
4584 	uint16_t len;
4585 
4586 	if ((code == 0) || (info == NULL)) {
4587 		return (NULL);
4588 	}
4589 	info_len = strlen(info);
4590 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4591 		return (NULL);
4592 	}
4593 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4594 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4595 	if (m != NULL) {
4596 		SCTP_BUF_LEN(m) = len;
4597 		cause = mtod(m, struct sctp_gen_error_cause *);
4598 		cause->code = htons(code);
4599 		cause->length = htons(len);
4600 		memcpy(cause->info, info, info_len);
4601 	}
4602 	return (m);
4603 }
4604 
4605 struct mbuf *
4606 sctp_generate_no_user_data_cause(uint32_t tsn)
4607 {
4608 	struct mbuf *m;
4609 	struct sctp_error_no_user_data *no_user_data_cause;
4610 	uint16_t len;
4611 
4612 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4613 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4614 	if (m != NULL) {
4615 		SCTP_BUF_LEN(m) = len;
4616 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4617 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4618 		no_user_data_cause->cause.length = htons(len);
4619 		no_user_data_cause->tsn = htonl(tsn);
4620 	}
4621 	return (m);
4622 }
4623 
4624 #ifdef SCTP_MBCNT_LOGGING
4625 void
4626 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4627     struct sctp_tmit_chunk *tp1, int chk_cnt)
4628 {
4629 	if (tp1->data == NULL) {
4630 		return;
4631 	}
4632 	asoc->chunks_on_out_queue -= chk_cnt;
4633 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4634 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4635 		    asoc->total_output_queue_size,
4636 		    tp1->book_size,
4637 		    0,
4638 		    tp1->mbcnt);
4639 	}
4640 	if (asoc->total_output_queue_size >= tp1->book_size) {
4641 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4642 	} else {
4643 		asoc->total_output_queue_size = 0;
4644 	}
4645 
4646 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4647 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4648 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4649 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4650 		} else {
4651 			stcb->sctp_socket->so_snd.sb_cc = 0;
4652 
4653 		}
4654 	}
4655 }
4656 
4657 #endif
4658 
4659 int
4660 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4661     uint8_t sent, int so_locked
4662 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4663     SCTP_UNUSED
4664 #endif
4665 )
4666 {
4667 	struct sctp_stream_out *strq;
4668 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4669 	struct sctp_stream_queue_pending *sp;
4670 	uint32_t mid;
4671 	uint16_t sid;
4672 	uint8_t foundeom = 0;
4673 	int ret_sz = 0;
4674 	int notdone;
4675 	int do_wakeup_routine = 0;
4676 
4677 	sid = tp1->rec.data.sid;
4678 	mid = tp1->rec.data.mid;
4679 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4680 		stcb->asoc.abandoned_sent[0]++;
4681 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4682 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4683 #if defined(SCTP_DETAILED_STR_STATS)
4684 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4685 #endif
4686 	} else {
4687 		stcb->asoc.abandoned_unsent[0]++;
4688 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4689 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4690 #if defined(SCTP_DETAILED_STR_STATS)
4691 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4692 #endif
4693 	}
4694 	do {
4695 		ret_sz += tp1->book_size;
4696 		if (tp1->data != NULL) {
4697 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4698 				sctp_flight_size_decrease(tp1);
4699 				sctp_total_flight_decrease(stcb, tp1);
4700 			}
4701 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4702 			stcb->asoc.peers_rwnd += tp1->send_size;
4703 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4704 			if (sent) {
4705 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4706 			} else {
4707 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4708 			}
4709 			if (tp1->data) {
4710 				sctp_m_freem(tp1->data);
4711 				tp1->data = NULL;
4712 			}
4713 			do_wakeup_routine = 1;
4714 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4715 				stcb->asoc.sent_queue_cnt_removeable--;
4716 			}
4717 		}
4718 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4719 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4720 		    SCTP_DATA_NOT_FRAG) {
4721 			/* not frag'ed we ae done   */
4722 			notdone = 0;
4723 			foundeom = 1;
4724 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4725 			/* end of frag, we are done */
4726 			notdone = 0;
4727 			foundeom = 1;
4728 		} else {
4729 			/*
4730 			 * Its a begin or middle piece, we must mark all of
4731 			 * it
4732 			 */
4733 			notdone = 1;
4734 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4735 		}
4736 	} while (tp1 && notdone);
4737 	if (foundeom == 0) {
4738 		/*
4739 		 * The multi-part message was scattered across the send and
4740 		 * sent queue.
4741 		 */
4742 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4743 			if ((tp1->rec.data.sid != sid) ||
4744 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4745 				break;
4746 			}
4747 			/*
4748 			 * save to chk in case we have some on stream out
4749 			 * queue. If so and we have an un-transmitted one we
4750 			 * don't have to fudge the TSN.
4751 			 */
4752 			chk = tp1;
4753 			ret_sz += tp1->book_size;
4754 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4755 			if (sent) {
4756 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4757 			} else {
4758 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4759 			}
4760 			if (tp1->data) {
4761 				sctp_m_freem(tp1->data);
4762 				tp1->data = NULL;
4763 			}
4764 			/* No flight involved here book the size to 0 */
4765 			tp1->book_size = 0;
4766 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4767 				foundeom = 1;
4768 			}
4769 			do_wakeup_routine = 1;
4770 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4771 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4772 			/*
4773 			 * on to the sent queue so we can wait for it to be
4774 			 * passed by.
4775 			 */
4776 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4777 			    sctp_next);
4778 			stcb->asoc.send_queue_cnt--;
4779 			stcb->asoc.sent_queue_cnt++;
4780 		}
4781 	}
4782 	if (foundeom == 0) {
4783 		/*
4784 		 * Still no eom found. That means there is stuff left on the
4785 		 * stream out queue.. yuck.
4786 		 */
4787 		SCTP_TCB_SEND_LOCK(stcb);
4788 		strq = &stcb->asoc.strmout[sid];
4789 		sp = TAILQ_FIRST(&strq->outqueue);
4790 		if (sp != NULL) {
4791 			sp->discard_rest = 1;
4792 			/*
4793 			 * We may need to put a chunk on the queue that
4794 			 * holds the TSN that would have been sent with the
4795 			 * LAST bit.
4796 			 */
4797 			if (chk == NULL) {
4798 				/* Yep, we have to */
4799 				sctp_alloc_a_chunk(stcb, chk);
4800 				if (chk == NULL) {
4801 					/*
4802 					 * we are hosed. All we can do is
4803 					 * nothing.. which will cause an
4804 					 * abort if the peer is paying
4805 					 * attention.
4806 					 */
4807 					goto oh_well;
4808 				}
4809 				memset(chk, 0, sizeof(*chk));
4810 				chk->rec.data.rcv_flags = 0;
4811 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4812 				chk->asoc = &stcb->asoc;
4813 				if (stcb->asoc.idata_supported == 0) {
4814 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4815 						chk->rec.data.mid = 0;
4816 					} else {
4817 						chk->rec.data.mid = strq->next_mid_ordered;
4818 					}
4819 				} else {
4820 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4821 						chk->rec.data.mid = strq->next_mid_unordered;
4822 					} else {
4823 						chk->rec.data.mid = strq->next_mid_ordered;
4824 					}
4825 				}
4826 				chk->rec.data.sid = sp->sid;
4827 				chk->rec.data.ppid = sp->ppid;
4828 				chk->rec.data.context = sp->context;
4829 				chk->flags = sp->act_flags;
4830 				chk->whoTo = NULL;
4831 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4832 				strq->chunks_on_queues++;
4833 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4834 				stcb->asoc.sent_queue_cnt++;
4835 				stcb->asoc.pr_sctp_cnt++;
4836 			}
4837 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4838 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4839 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4840 			}
4841 			if (stcb->asoc.idata_supported == 0) {
4842 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4843 					strq->next_mid_ordered++;
4844 				}
4845 			} else {
4846 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4847 					strq->next_mid_unordered++;
4848 				} else {
4849 					strq->next_mid_ordered++;
4850 				}
4851 			}
4852 	oh_well:
4853 			if (sp->data) {
4854 				/*
4855 				 * Pull any data to free up the SB and allow
4856 				 * sender to "add more" while we will throw
4857 				 * away :-)
4858 				 */
4859 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4860 				ret_sz += sp->length;
4861 				do_wakeup_routine = 1;
4862 				sp->some_taken = 1;
4863 				sctp_m_freem(sp->data);
4864 				sp->data = NULL;
4865 				sp->tail_mbuf = NULL;
4866 				sp->length = 0;
4867 			}
4868 		}
4869 		SCTP_TCB_SEND_UNLOCK(stcb);
4870 	}
4871 	if (do_wakeup_routine) {
4872 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4873 		struct socket *so;
4874 
4875 		so = SCTP_INP_SO(stcb->sctp_ep);
4876 		if (!so_locked) {
4877 			atomic_add_int(&stcb->asoc.refcnt, 1);
4878 			SCTP_TCB_UNLOCK(stcb);
4879 			SCTP_SOCKET_LOCK(so, 1);
4880 			SCTP_TCB_LOCK(stcb);
4881 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4882 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4883 				/* assoc was freed while we were unlocked */
4884 				SCTP_SOCKET_UNLOCK(so, 1);
4885 				return (ret_sz);
4886 			}
4887 		}
4888 #endif
4889 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4890 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4891 		if (!so_locked) {
4892 			SCTP_SOCKET_UNLOCK(so, 1);
4893 		}
4894 #endif
4895 	}
4896 	return (ret_sz);
4897 }
4898 
4899 /*
4900  * checks to see if the given address, sa, is one that is currently known by
4901  * the kernel note: can't distinguish the same address on multiple interfaces
4902  * and doesn't handle multiple addresses with different zone/scope id's note:
4903  * ifa_ifwithaddr() compares the entire sockaddr struct
4904  */
4905 struct sctp_ifa *
4906 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4907     int holds_lock)
4908 {
4909 	struct sctp_laddr *laddr;
4910 
4911 	if (holds_lock == 0) {
4912 		SCTP_INP_RLOCK(inp);
4913 	}
4914 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4915 		if (laddr->ifa == NULL)
4916 			continue;
4917 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4918 			continue;
4919 #ifdef INET
4920 		if (addr->sa_family == AF_INET) {
4921 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4922 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4923 				/* found him. */
4924 				if (holds_lock == 0) {
4925 					SCTP_INP_RUNLOCK(inp);
4926 				}
4927 				return (laddr->ifa);
4928 				break;
4929 			}
4930 		}
4931 #endif
4932 #ifdef INET6
4933 		if (addr->sa_family == AF_INET6) {
4934 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4935 			    &laddr->ifa->address.sin6)) {
4936 				/* found him. */
4937 				if (holds_lock == 0) {
4938 					SCTP_INP_RUNLOCK(inp);
4939 				}
4940 				return (laddr->ifa);
4941 				break;
4942 			}
4943 		}
4944 #endif
4945 	}
4946 	if (holds_lock == 0) {
4947 		SCTP_INP_RUNLOCK(inp);
4948 	}
4949 	return (NULL);
4950 }
4951 
4952 uint32_t
4953 sctp_get_ifa_hash_val(struct sockaddr *addr)
4954 {
4955 	switch (addr->sa_family) {
4956 #ifdef INET
4957 	case AF_INET:
4958 		{
4959 			struct sockaddr_in *sin;
4960 
4961 			sin = (struct sockaddr_in *)addr;
4962 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4963 		}
4964 #endif
4965 #ifdef INET6
4966 	case AF_INET6:
4967 		{
4968 			struct sockaddr_in6 *sin6;
4969 			uint32_t hash_of_addr;
4970 
4971 			sin6 = (struct sockaddr_in6 *)addr;
4972 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4973 			    sin6->sin6_addr.s6_addr32[1] +
4974 			    sin6->sin6_addr.s6_addr32[2] +
4975 			    sin6->sin6_addr.s6_addr32[3]);
4976 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4977 			return (hash_of_addr);
4978 		}
4979 #endif
4980 	default:
4981 		break;
4982 	}
4983 	return (0);
4984 }
4985 
4986 struct sctp_ifa *
4987 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4988 {
4989 	struct sctp_ifa *sctp_ifap;
4990 	struct sctp_vrf *vrf;
4991 	struct sctp_ifalist *hash_head;
4992 	uint32_t hash_of_addr;
4993 
4994 	if (holds_lock == 0)
4995 		SCTP_IPI_ADDR_RLOCK();
4996 
4997 	vrf = sctp_find_vrf(vrf_id);
4998 	if (vrf == NULL) {
4999 		if (holds_lock == 0)
5000 			SCTP_IPI_ADDR_RUNLOCK();
5001 		return (NULL);
5002 	}
5003 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5004 
5005 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5006 	if (hash_head == NULL) {
5007 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5008 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5009 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5010 		sctp_print_address(addr);
5011 		SCTP_PRINTF("No such bucket for address\n");
5012 		if (holds_lock == 0)
5013 			SCTP_IPI_ADDR_RUNLOCK();
5014 
5015 		return (NULL);
5016 	}
5017 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5018 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5019 			continue;
5020 #ifdef INET
5021 		if (addr->sa_family == AF_INET) {
5022 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5023 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5024 				/* found him. */
5025 				if (holds_lock == 0)
5026 					SCTP_IPI_ADDR_RUNLOCK();
5027 				return (sctp_ifap);
5028 				break;
5029 			}
5030 		}
5031 #endif
5032 #ifdef INET6
5033 		if (addr->sa_family == AF_INET6) {
5034 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5035 			    &sctp_ifap->address.sin6)) {
5036 				/* found him. */
5037 				if (holds_lock == 0)
5038 					SCTP_IPI_ADDR_RUNLOCK();
5039 				return (sctp_ifap);
5040 				break;
5041 			}
5042 		}
5043 #endif
5044 	}
5045 	if (holds_lock == 0)
5046 		SCTP_IPI_ADDR_RUNLOCK();
5047 	return (NULL);
5048 }
5049 
5050 static void
5051 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5052     uint32_t rwnd_req)
5053 {
5054 	/* User pulled some data, do we need a rwnd update? */
5055 	int r_unlocked = 0;
5056 	uint32_t dif, rwnd;
5057 	struct socket *so = NULL;
5058 
5059 	if (stcb == NULL)
5060 		return;
5061 
5062 	atomic_add_int(&stcb->asoc.refcnt, 1);
5063 
5064 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5065 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5066 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5067 		/* Pre-check If we are freeing no update */
5068 		goto no_lock;
5069 	}
5070 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5071 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5072 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5073 		goto out;
5074 	}
5075 	so = stcb->sctp_socket;
5076 	if (so == NULL) {
5077 		goto out;
5078 	}
5079 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5080 	/* Have you have freed enough to look */
5081 	*freed_so_far = 0;
5082 	/* Yep, its worth a look and the lock overhead */
5083 
5084 	/* Figure out what the rwnd would be */
5085 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5086 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5087 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5088 	} else {
5089 		dif = 0;
5090 	}
5091 	if (dif >= rwnd_req) {
5092 		if (hold_rlock) {
5093 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5094 			r_unlocked = 1;
5095 		}
5096 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5097 			/*
5098 			 * One last check before we allow the guy possibly
5099 			 * to get in. There is a race, where the guy has not
5100 			 * reached the gate. In that case
5101 			 */
5102 			goto out;
5103 		}
5104 		SCTP_TCB_LOCK(stcb);
5105 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5106 			/* No reports here */
5107 			SCTP_TCB_UNLOCK(stcb);
5108 			goto out;
5109 		}
5110 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5111 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5112 
5113 		sctp_chunk_output(stcb->sctp_ep, stcb,
5114 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5115 		/* make sure no timer is running */
5116 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5117 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5118 		SCTP_TCB_UNLOCK(stcb);
5119 	} else {
5120 		/* Update how much we have pending */
5121 		stcb->freed_by_sorcv_sincelast = dif;
5122 	}
5123 out:
5124 	if (so && r_unlocked && hold_rlock) {
5125 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5126 	}
5127 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5128 no_lock:
5129 	atomic_add_int(&stcb->asoc.refcnt, -1);
5130 	return;
5131 }
5132 
5133 int
5134 sctp_sorecvmsg(struct socket *so,
5135     struct uio *uio,
5136     struct mbuf **mp,
5137     struct sockaddr *from,
5138     int fromlen,
5139     int *msg_flags,
5140     struct sctp_sndrcvinfo *sinfo,
5141     int filling_sinfo)
5142 {
5143 	/*
5144 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5145 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5146 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5147 	 * On the way out we may send out any combination of:
5148 	 * MSG_NOTIFICATION MSG_EOR
5149 	 *
5150 	 */
5151 	struct sctp_inpcb *inp = NULL;
5152 	int my_len = 0;
5153 	int cp_len = 0, error = 0;
5154 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5155 	struct mbuf *m = NULL;
5156 	struct sctp_tcb *stcb = NULL;
5157 	int wakeup_read_socket = 0;
5158 	int freecnt_applied = 0;
5159 	int out_flags = 0, in_flags = 0;
5160 	int block_allowed = 1;
5161 	uint32_t freed_so_far = 0;
5162 	uint32_t copied_so_far = 0;
5163 	int in_eeor_mode = 0;
5164 	int no_rcv_needed = 0;
5165 	uint32_t rwnd_req = 0;
5166 	int hold_sblock = 0;
5167 	int hold_rlock = 0;
5168 	ssize_t slen = 0;
5169 	uint32_t held_length = 0;
5170 	int sockbuf_lock = 0;
5171 
5172 	if (uio == NULL) {
5173 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5174 		return (EINVAL);
5175 	}
5176 	if (msg_flags) {
5177 		in_flags = *msg_flags;
5178 		if (in_flags & MSG_PEEK)
5179 			SCTP_STAT_INCR(sctps_read_peeks);
5180 	} else {
5181 		in_flags = 0;
5182 	}
5183 	slen = uio->uio_resid;
5184 
5185 	/* Pull in and set up our int flags */
5186 	if (in_flags & MSG_OOB) {
5187 		/* Out of band's NOT supported */
5188 		return (EOPNOTSUPP);
5189 	}
5190 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5191 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5192 		return (EINVAL);
5193 	}
5194 	if ((in_flags & (MSG_DONTWAIT
5195 	    | MSG_NBIO
5196 	    )) ||
5197 	    SCTP_SO_IS_NBIO(so)) {
5198 		block_allowed = 0;
5199 	}
5200 	/* setup the endpoint */
5201 	inp = (struct sctp_inpcb *)so->so_pcb;
5202 	if (inp == NULL) {
5203 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5204 		return (EFAULT);
5205 	}
5206 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5207 	/* Must be at least a MTU's worth */
5208 	if (rwnd_req < SCTP_MIN_RWND)
5209 		rwnd_req = SCTP_MIN_RWND;
5210 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5211 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5212 		sctp_misc_ints(SCTP_SORECV_ENTER,
5213 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5214 	}
5215 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5216 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5217 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5218 	}
5219 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5220 	if (error) {
5221 		goto release_unlocked;
5222 	}
5223 	sockbuf_lock = 1;
5224 restart:
5225 
5226 
5227 restart_nosblocks:
5228 	if (hold_sblock == 0) {
5229 		SOCKBUF_LOCK(&so->so_rcv);
5230 		hold_sblock = 1;
5231 	}
5232 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5233 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5234 		goto out;
5235 	}
5236 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5237 		if (so->so_error) {
5238 			error = so->so_error;
5239 			if ((in_flags & MSG_PEEK) == 0)
5240 				so->so_error = 0;
5241 			goto out;
5242 		} else {
5243 			if (so->so_rcv.sb_cc == 0) {
5244 				/* indicate EOF */
5245 				error = 0;
5246 				goto out;
5247 			}
5248 		}
5249 	}
5250 	if (so->so_rcv.sb_cc <= held_length) {
5251 		if (so->so_error) {
5252 			error = so->so_error;
5253 			if ((in_flags & MSG_PEEK) == 0) {
5254 				so->so_error = 0;
5255 			}
5256 			goto out;
5257 		}
5258 		if ((so->so_rcv.sb_cc == 0) &&
5259 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5260 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5261 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5262 				/*
5263 				 * For active open side clear flags for
5264 				 * re-use passive open is blocked by
5265 				 * connect.
5266 				 */
5267 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5268 					/*
5269 					 * You were aborted, passive side
5270 					 * always hits here
5271 					 */
5272 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5273 					error = ECONNRESET;
5274 				}
5275 				so->so_state &= ~(SS_ISCONNECTING |
5276 				    SS_ISDISCONNECTING |
5277 				    SS_ISCONFIRMING |
5278 				    SS_ISCONNECTED);
5279 				if (error == 0) {
5280 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5281 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5282 						error = ENOTCONN;
5283 					}
5284 				}
5285 				goto out;
5286 			}
5287 		}
5288 		if (block_allowed) {
5289 			error = sbwait(&so->so_rcv);
5290 			if (error) {
5291 				goto out;
5292 			}
5293 			held_length = 0;
5294 			goto restart_nosblocks;
5295 		} else {
5296 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5297 			error = EWOULDBLOCK;
5298 			goto out;
5299 		}
5300 	}
5301 	if (hold_sblock == 1) {
5302 		SOCKBUF_UNLOCK(&so->so_rcv);
5303 		hold_sblock = 0;
5304 	}
5305 	/* we possibly have data we can read */
5306 	/* sa_ignore FREED_MEMORY */
5307 	control = TAILQ_FIRST(&inp->read_queue);
5308 	if (control == NULL) {
5309 		/*
5310 		 * This could be happening since the appender did the
5311 		 * increment but as not yet did the tailq insert onto the
5312 		 * read_queue
5313 		 */
5314 		if (hold_rlock == 0) {
5315 			SCTP_INP_READ_LOCK(inp);
5316 		}
5317 		control = TAILQ_FIRST(&inp->read_queue);
5318 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5319 #ifdef INVARIANTS
5320 			panic("Huh, its non zero and nothing on control?");
5321 #endif
5322 			so->so_rcv.sb_cc = 0;
5323 		}
5324 		SCTP_INP_READ_UNLOCK(inp);
5325 		hold_rlock = 0;
5326 		goto restart;
5327 	}
5328 	if ((control->length == 0) &&
5329 	    (control->do_not_ref_stcb)) {
5330 		/*
5331 		 * Clean up code for freeing assoc that left behind a
5332 		 * pdapi.. maybe a peer in EEOR that just closed after
5333 		 * sending and never indicated a EOR.
5334 		 */
5335 		if (hold_rlock == 0) {
5336 			hold_rlock = 1;
5337 			SCTP_INP_READ_LOCK(inp);
5338 		}
5339 		control->held_length = 0;
5340 		if (control->data) {
5341 			/* Hmm there is data here .. fix */
5342 			struct mbuf *m_tmp;
5343 			int cnt = 0;
5344 
5345 			m_tmp = control->data;
5346 			while (m_tmp) {
5347 				cnt += SCTP_BUF_LEN(m_tmp);
5348 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5349 					control->tail_mbuf = m_tmp;
5350 					control->end_added = 1;
5351 				}
5352 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5353 			}
5354 			control->length = cnt;
5355 		} else {
5356 			/* remove it */
5357 			TAILQ_REMOVE(&inp->read_queue, control, next);
5358 			/* Add back any hiddend data */
5359 			sctp_free_remote_addr(control->whoFrom);
5360 			sctp_free_a_readq(stcb, control);
5361 		}
5362 		if (hold_rlock) {
5363 			hold_rlock = 0;
5364 			SCTP_INP_READ_UNLOCK(inp);
5365 		}
5366 		goto restart;
5367 	}
5368 	if ((control->length == 0) &&
5369 	    (control->end_added == 1)) {
5370 		/*
5371 		 * Do we also need to check for (control->pdapi_aborted ==
5372 		 * 1)?
5373 		 */
5374 		if (hold_rlock == 0) {
5375 			hold_rlock = 1;
5376 			SCTP_INP_READ_LOCK(inp);
5377 		}
5378 		TAILQ_REMOVE(&inp->read_queue, control, next);
5379 		if (control->data) {
5380 #ifdef INVARIANTS
5381 			panic("control->data not null but control->length == 0");
5382 #else
5383 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5384 			sctp_m_freem(control->data);
5385 			control->data = NULL;
5386 #endif
5387 		}
5388 		if (control->aux_data) {
5389 			sctp_m_free(control->aux_data);
5390 			control->aux_data = NULL;
5391 		}
5392 #ifdef INVARIANTS
5393 		if (control->on_strm_q) {
5394 			panic("About to free ctl:%p so:%p and its in %d",
5395 			    control, so, control->on_strm_q);
5396 		}
5397 #endif
5398 		sctp_free_remote_addr(control->whoFrom);
5399 		sctp_free_a_readq(stcb, control);
5400 		if (hold_rlock) {
5401 			hold_rlock = 0;
5402 			SCTP_INP_READ_UNLOCK(inp);
5403 		}
5404 		goto restart;
5405 	}
5406 	if (control->length == 0) {
5407 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5408 		    (filling_sinfo)) {
5409 			/* find a more suitable one then this */
5410 			ctl = TAILQ_NEXT(control, next);
5411 			while (ctl) {
5412 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5413 				    (ctl->some_taken ||
5414 				    (ctl->spec_flags & M_NOTIFICATION) ||
5415 				    ((ctl->do_not_ref_stcb == 0) &&
5416 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5417 				    ) {
5418 					/*-
5419 					 * If we have a different TCB next, and there is data
5420 					 * present. If we have already taken some (pdapi), OR we can
5421 					 * ref the tcb and no delivery as started on this stream, we
5422 					 * take it. Note we allow a notification on a different
5423 					 * assoc to be delivered..
5424 					 */
5425 					control = ctl;
5426 					goto found_one;
5427 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5428 					    (ctl->length) &&
5429 					    ((ctl->some_taken) ||
5430 					    ((ctl->do_not_ref_stcb == 0) &&
5431 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5432 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5433 					/*-
5434 					 * If we have the same tcb, and there is data present, and we
5435 					 * have the strm interleave feature present. Then if we have
5436 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5437 					 * not started a delivery for this stream, we can take it.
5438 					 * Note we do NOT allow a notificaiton on the same assoc to
5439 					 * be delivered.
5440 					 */
5441 					control = ctl;
5442 					goto found_one;
5443 				}
5444 				ctl = TAILQ_NEXT(ctl, next);
5445 			}
5446 		}
5447 		/*
5448 		 * if we reach here, not suitable replacement is available
5449 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5450 		 * into the our held count, and its time to sleep again.
5451 		 */
5452 		held_length = so->so_rcv.sb_cc;
5453 		control->held_length = so->so_rcv.sb_cc;
5454 		goto restart;
5455 	}
5456 	/* Clear the held length since there is something to read */
5457 	control->held_length = 0;
5458 found_one:
5459 	/*
5460 	 * If we reach here, control has a some data for us to read off.
5461 	 * Note that stcb COULD be NULL.
5462 	 */
5463 	if (hold_rlock == 0) {
5464 		hold_rlock = 1;
5465 		SCTP_INP_READ_LOCK(inp);
5466 	}
5467 	control->some_taken++;
5468 	stcb = control->stcb;
5469 	if (stcb) {
5470 		if ((control->do_not_ref_stcb == 0) &&
5471 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5472 			if (freecnt_applied == 0)
5473 				stcb = NULL;
5474 		} else if (control->do_not_ref_stcb == 0) {
5475 			/* you can't free it on me please */
5476 			/*
5477 			 * The lock on the socket buffer protects us so the
5478 			 * free code will stop. But since we used the
5479 			 * socketbuf lock and the sender uses the tcb_lock
5480 			 * to increment, we need to use the atomic add to
5481 			 * the refcnt
5482 			 */
5483 			if (freecnt_applied) {
5484 #ifdef INVARIANTS
5485 				panic("refcnt already incremented");
5486 #else
5487 				SCTP_PRINTF("refcnt already incremented?\n");
5488 #endif
5489 			} else {
5490 				atomic_add_int(&stcb->asoc.refcnt, 1);
5491 				freecnt_applied = 1;
5492 			}
5493 			/*
5494 			 * Setup to remember how much we have not yet told
5495 			 * the peer our rwnd has opened up. Note we grab the
5496 			 * value from the tcb from last time. Note too that
5497 			 * sack sending clears this when a sack is sent,
5498 			 * which is fine. Once we hit the rwnd_req, we then
5499 			 * will go to the sctp_user_rcvd() that will not
5500 			 * lock until it KNOWs it MUST send a WUP-SACK.
5501 			 */
5502 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5503 			stcb->freed_by_sorcv_sincelast = 0;
5504 		}
5505 	}
5506 	if (stcb &&
5507 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5508 	    control->do_not_ref_stcb == 0) {
5509 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5510 	}
5511 	/* First lets get off the sinfo and sockaddr info */
5512 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5513 		sinfo->sinfo_stream = control->sinfo_stream;
5514 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5515 		sinfo->sinfo_flags = control->sinfo_flags;
5516 		sinfo->sinfo_ppid = control->sinfo_ppid;
5517 		sinfo->sinfo_context = control->sinfo_context;
5518 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5519 		sinfo->sinfo_tsn = control->sinfo_tsn;
5520 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5521 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5522 		nxt = TAILQ_NEXT(control, next);
5523 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5524 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5525 			struct sctp_extrcvinfo *s_extra;
5526 
5527 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5528 			if ((nxt) &&
5529 			    (nxt->length)) {
5530 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5531 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5532 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5533 				}
5534 				if (nxt->spec_flags & M_NOTIFICATION) {
5535 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5536 				}
5537 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5538 				s_extra->serinfo_next_length = nxt->length;
5539 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5540 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5541 				if (nxt->tail_mbuf != NULL) {
5542 					if (nxt->end_added) {
5543 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5544 					}
5545 				}
5546 			} else {
5547 				/*
5548 				 * we explicitly 0 this, since the memcpy
5549 				 * got some other things beyond the older
5550 				 * sinfo_ that is on the control's structure
5551 				 * :-D
5552 				 */
5553 				nxt = NULL;
5554 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5555 				s_extra->serinfo_next_aid = 0;
5556 				s_extra->serinfo_next_length = 0;
5557 				s_extra->serinfo_next_ppid = 0;
5558 				s_extra->serinfo_next_stream = 0;
5559 			}
5560 		}
5561 		/*
5562 		 * update off the real current cum-ack, if we have an stcb.
5563 		 */
5564 		if ((control->do_not_ref_stcb == 0) && stcb)
5565 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5566 		/*
5567 		 * mask off the high bits, we keep the actual chunk bits in
5568 		 * there.
5569 		 */
5570 		sinfo->sinfo_flags &= 0x00ff;
5571 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5572 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5573 		}
5574 	}
5575 #ifdef SCTP_ASOCLOG_OF_TSNS
5576 	{
5577 		int index, newindex;
5578 		struct sctp_pcbtsn_rlog *entry;
5579 
5580 		do {
5581 			index = inp->readlog_index;
5582 			newindex = index + 1;
5583 			if (newindex >= SCTP_READ_LOG_SIZE) {
5584 				newindex = 0;
5585 			}
5586 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5587 		entry = &inp->readlog[index];
5588 		entry->vtag = control->sinfo_assoc_id;
5589 		entry->strm = control->sinfo_stream;
5590 		entry->seq = (uint16_t)control->mid;
5591 		entry->sz = control->length;
5592 		entry->flgs = control->sinfo_flags;
5593 	}
5594 #endif
5595 	if ((fromlen > 0) && (from != NULL)) {
5596 		union sctp_sockstore store;
5597 		size_t len;
5598 
5599 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5600 #ifdef INET6
5601 		case AF_INET6:
5602 			len = sizeof(struct sockaddr_in6);
5603 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5604 			store.sin6.sin6_port = control->port_from;
5605 			break;
5606 #endif
5607 #ifdef INET
5608 		case AF_INET:
5609 #ifdef INET6
5610 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5611 				len = sizeof(struct sockaddr_in6);
5612 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5613 				    &store.sin6);
5614 				store.sin6.sin6_port = control->port_from;
5615 			} else {
5616 				len = sizeof(struct sockaddr_in);
5617 				store.sin = control->whoFrom->ro._l_addr.sin;
5618 				store.sin.sin_port = control->port_from;
5619 			}
5620 #else
5621 			len = sizeof(struct sockaddr_in);
5622 			store.sin = control->whoFrom->ro._l_addr.sin;
5623 			store.sin.sin_port = control->port_from;
5624 #endif
5625 			break;
5626 #endif
5627 		default:
5628 			len = 0;
5629 			break;
5630 		}
5631 		memcpy(from, &store, min((size_t)fromlen, len));
5632 #ifdef INET6
5633 		{
5634 			struct sockaddr_in6 lsa6, *from6;
5635 
5636 			from6 = (struct sockaddr_in6 *)from;
5637 			sctp_recover_scope_mac(from6, (&lsa6));
5638 		}
5639 #endif
5640 	}
5641 	if (hold_rlock) {
5642 		SCTP_INP_READ_UNLOCK(inp);
5643 		hold_rlock = 0;
5644 	}
5645 	if (hold_sblock) {
5646 		SOCKBUF_UNLOCK(&so->so_rcv);
5647 		hold_sblock = 0;
5648 	}
5649 	/* now copy out what data we can */
5650 	if (mp == NULL) {
5651 		/* copy out each mbuf in the chain up to length */
5652 get_more_data:
5653 		m = control->data;
5654 		while (m) {
5655 			/* Move out all we can */
5656 			cp_len = (int)uio->uio_resid;
5657 			my_len = (int)SCTP_BUF_LEN(m);
5658 			if (cp_len > my_len) {
5659 				/* not enough in this buf */
5660 				cp_len = my_len;
5661 			}
5662 			if (hold_rlock) {
5663 				SCTP_INP_READ_UNLOCK(inp);
5664 				hold_rlock = 0;
5665 			}
5666 			if (cp_len > 0)
5667 				error = uiomove(mtod(m, char *), cp_len, uio);
5668 			/* re-read */
5669 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5670 				goto release;
5671 			}
5672 			if ((control->do_not_ref_stcb == 0) && stcb &&
5673 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5674 				no_rcv_needed = 1;
5675 			}
5676 			if (error) {
5677 				/* error we are out of here */
5678 				goto release;
5679 			}
5680 			SCTP_INP_READ_LOCK(inp);
5681 			hold_rlock = 1;
5682 			if (cp_len == SCTP_BUF_LEN(m)) {
5683 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5684 				    (control->end_added)) {
5685 					out_flags |= MSG_EOR;
5686 					if ((control->do_not_ref_stcb == 0) &&
5687 					    (control->stcb != NULL) &&
5688 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5689 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5690 				}
5691 				if (control->spec_flags & M_NOTIFICATION) {
5692 					out_flags |= MSG_NOTIFICATION;
5693 				}
5694 				/* we ate up the mbuf */
5695 				if (in_flags & MSG_PEEK) {
5696 					/* just looking */
5697 					m = SCTP_BUF_NEXT(m);
5698 					copied_so_far += cp_len;
5699 				} else {
5700 					/* dispose of the mbuf */
5701 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5702 						sctp_sblog(&so->so_rcv,
5703 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5704 					}
5705 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5706 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5707 						sctp_sblog(&so->so_rcv,
5708 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5709 					}
5710 					copied_so_far += cp_len;
5711 					freed_so_far += cp_len;
5712 					freed_so_far += MSIZE;
5713 					atomic_subtract_int(&control->length, cp_len);
5714 					control->data = sctp_m_free(m);
5715 					m = control->data;
5716 					/*
5717 					 * been through it all, must hold sb
5718 					 * lock ok to null tail
5719 					 */
5720 					if (control->data == NULL) {
5721 #ifdef INVARIANTS
5722 						if ((control->end_added == 0) ||
5723 						    (TAILQ_NEXT(control, next) == NULL)) {
5724 							/*
5725 							 * If the end is not
5726 							 * added, OR the
5727 							 * next is NOT null
5728 							 * we MUST have the
5729 							 * lock.
5730 							 */
5731 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5732 								panic("Hmm we don't own the lock?");
5733 							}
5734 						}
5735 #endif
5736 						control->tail_mbuf = NULL;
5737 #ifdef INVARIANTS
5738 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5739 							panic("end_added, nothing left and no MSG_EOR");
5740 						}
5741 #endif
5742 					}
5743 				}
5744 			} else {
5745 				/* Do we need to trim the mbuf? */
5746 				if (control->spec_flags & M_NOTIFICATION) {
5747 					out_flags |= MSG_NOTIFICATION;
5748 				}
5749 				if ((in_flags & MSG_PEEK) == 0) {
5750 					SCTP_BUF_RESV_UF(m, cp_len);
5751 					SCTP_BUF_LEN(m) -= cp_len;
5752 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5753 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5754 					}
5755 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5756 					if ((control->do_not_ref_stcb == 0) &&
5757 					    stcb) {
5758 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5759 					}
5760 					copied_so_far += cp_len;
5761 					freed_so_far += cp_len;
5762 					freed_so_far += MSIZE;
5763 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5764 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5765 						    SCTP_LOG_SBRESULT, 0);
5766 					}
5767 					atomic_subtract_int(&control->length, cp_len);
5768 				} else {
5769 					copied_so_far += cp_len;
5770 				}
5771 			}
5772 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5773 				break;
5774 			}
5775 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5776 			    (control->do_not_ref_stcb == 0) &&
5777 			    (freed_so_far >= rwnd_req)) {
5778 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5779 			}
5780 		}		/* end while(m) */
5781 		/*
5782 		 * At this point we have looked at it all and we either have
5783 		 * a MSG_EOR/or read all the user wants... <OR>
5784 		 * control->length == 0.
5785 		 */
5786 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5787 			/* we are done with this control */
5788 			if (control->length == 0) {
5789 				if (control->data) {
5790 #ifdef INVARIANTS
5791 					panic("control->data not null at read eor?");
5792 #else
5793 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5794 					sctp_m_freem(control->data);
5795 					control->data = NULL;
5796 #endif
5797 				}
5798 		done_with_control:
5799 				if (hold_rlock == 0) {
5800 					SCTP_INP_READ_LOCK(inp);
5801 					hold_rlock = 1;
5802 				}
5803 				TAILQ_REMOVE(&inp->read_queue, control, next);
5804 				/* Add back any hiddend data */
5805 				if (control->held_length) {
5806 					held_length = 0;
5807 					control->held_length = 0;
5808 					wakeup_read_socket = 1;
5809 				}
5810 				if (control->aux_data) {
5811 					sctp_m_free(control->aux_data);
5812 					control->aux_data = NULL;
5813 				}
5814 				no_rcv_needed = control->do_not_ref_stcb;
5815 				sctp_free_remote_addr(control->whoFrom);
5816 				control->data = NULL;
5817 #ifdef INVARIANTS
5818 				if (control->on_strm_q) {
5819 					panic("About to free ctl:%p so:%p and its in %d",
5820 					    control, so, control->on_strm_q);
5821 				}
5822 #endif
5823 				sctp_free_a_readq(stcb, control);
5824 				control = NULL;
5825 				if ((freed_so_far >= rwnd_req) &&
5826 				    (no_rcv_needed == 0))
5827 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5828 
5829 			} else {
5830 				/*
5831 				 * The user did not read all of this
5832 				 * message, turn off the returned MSG_EOR
5833 				 * since we are leaving more behind on the
5834 				 * control to read.
5835 				 */
5836 #ifdef INVARIANTS
5837 				if (control->end_added &&
5838 				    (control->data == NULL) &&
5839 				    (control->tail_mbuf == NULL)) {
5840 					panic("Gak, control->length is corrupt?");
5841 				}
5842 #endif
5843 				no_rcv_needed = control->do_not_ref_stcb;
5844 				out_flags &= ~MSG_EOR;
5845 			}
5846 		}
5847 		if (out_flags & MSG_EOR) {
5848 			goto release;
5849 		}
5850 		if ((uio->uio_resid == 0) ||
5851 		    ((in_eeor_mode) &&
5852 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5853 			goto release;
5854 		}
5855 		/*
5856 		 * If I hit here the receiver wants more and this message is
5857 		 * NOT done (pd-api). So two questions. Can we block? if not
5858 		 * we are done. Did the user NOT set MSG_WAITALL?
5859 		 */
5860 		if (block_allowed == 0) {
5861 			goto release;
5862 		}
5863 		/*
5864 		 * We need to wait for more data a few things: - We don't
5865 		 * sbunlock() so we don't get someone else reading. - We
5866 		 * must be sure to account for the case where what is added
5867 		 * is NOT to our control when we wakeup.
5868 		 */
5869 
5870 		/*
5871 		 * Do we need to tell the transport a rwnd update might be
5872 		 * needed before we go to sleep?
5873 		 */
5874 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5875 		    ((freed_so_far >= rwnd_req) &&
5876 		    (control->do_not_ref_stcb == 0) &&
5877 		    (no_rcv_needed == 0))) {
5878 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5879 		}
5880 wait_some_more:
5881 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5882 			goto release;
5883 		}
5884 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5885 			goto release;
5886 
5887 		if (hold_rlock == 1) {
5888 			SCTP_INP_READ_UNLOCK(inp);
5889 			hold_rlock = 0;
5890 		}
5891 		if (hold_sblock == 0) {
5892 			SOCKBUF_LOCK(&so->so_rcv);
5893 			hold_sblock = 1;
5894 		}
5895 		if ((copied_so_far) && (control->length == 0) &&
5896 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5897 			goto release;
5898 		}
5899 		if (so->so_rcv.sb_cc <= control->held_length) {
5900 			error = sbwait(&so->so_rcv);
5901 			if (error) {
5902 				goto release;
5903 			}
5904 			control->held_length = 0;
5905 		}
5906 		if (hold_sblock) {
5907 			SOCKBUF_UNLOCK(&so->so_rcv);
5908 			hold_sblock = 0;
5909 		}
5910 		if (control->length == 0) {
5911 			/* still nothing here */
5912 			if (control->end_added == 1) {
5913 				/* he aborted, or is done i.e.did a shutdown */
5914 				out_flags |= MSG_EOR;
5915 				if (control->pdapi_aborted) {
5916 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5917 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5918 
5919 					out_flags |= MSG_TRUNC;
5920 				} else {
5921 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5922 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5923 				}
5924 				goto done_with_control;
5925 			}
5926 			if (so->so_rcv.sb_cc > held_length) {
5927 				control->held_length = so->so_rcv.sb_cc;
5928 				held_length = 0;
5929 			}
5930 			goto wait_some_more;
5931 		} else if (control->data == NULL) {
5932 			/*
5933 			 * we must re-sync since data is probably being
5934 			 * added
5935 			 */
5936 			SCTP_INP_READ_LOCK(inp);
5937 			if ((control->length > 0) && (control->data == NULL)) {
5938 				/*
5939 				 * big trouble.. we have the lock and its
5940 				 * corrupt?
5941 				 */
5942 #ifdef INVARIANTS
5943 				panic("Impossible data==NULL length !=0");
5944 #endif
5945 				out_flags |= MSG_EOR;
5946 				out_flags |= MSG_TRUNC;
5947 				control->length = 0;
5948 				SCTP_INP_READ_UNLOCK(inp);
5949 				goto done_with_control;
5950 			}
5951 			SCTP_INP_READ_UNLOCK(inp);
5952 			/* We will fall around to get more data */
5953 		}
5954 		goto get_more_data;
5955 	} else {
5956 		/*-
5957 		 * Give caller back the mbuf chain,
5958 		 * store in uio_resid the length
5959 		 */
5960 		wakeup_read_socket = 0;
5961 		if ((control->end_added == 0) ||
5962 		    (TAILQ_NEXT(control, next) == NULL)) {
5963 			/* Need to get rlock */
5964 			if (hold_rlock == 0) {
5965 				SCTP_INP_READ_LOCK(inp);
5966 				hold_rlock = 1;
5967 			}
5968 		}
5969 		if (control->end_added) {
5970 			out_flags |= MSG_EOR;
5971 			if ((control->do_not_ref_stcb == 0) &&
5972 			    (control->stcb != NULL) &&
5973 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5974 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5975 		}
5976 		if (control->spec_flags & M_NOTIFICATION) {
5977 			out_flags |= MSG_NOTIFICATION;
5978 		}
5979 		uio->uio_resid = control->length;
5980 		*mp = control->data;
5981 		m = control->data;
5982 		while (m) {
5983 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5984 				sctp_sblog(&so->so_rcv,
5985 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5986 			}
5987 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5988 			freed_so_far += SCTP_BUF_LEN(m);
5989 			freed_so_far += MSIZE;
5990 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5991 				sctp_sblog(&so->so_rcv,
5992 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5993 			}
5994 			m = SCTP_BUF_NEXT(m);
5995 		}
5996 		control->data = control->tail_mbuf = NULL;
5997 		control->length = 0;
5998 		if (out_flags & MSG_EOR) {
5999 			/* Done with this control */
6000 			goto done_with_control;
6001 		}
6002 	}
6003 release:
6004 	if (hold_rlock == 1) {
6005 		SCTP_INP_READ_UNLOCK(inp);
6006 		hold_rlock = 0;
6007 	}
6008 	if (hold_sblock == 1) {
6009 		SOCKBUF_UNLOCK(&so->so_rcv);
6010 		hold_sblock = 0;
6011 	}
6012 	sbunlock(&so->so_rcv);
6013 	sockbuf_lock = 0;
6014 
6015 release_unlocked:
6016 	if (hold_sblock) {
6017 		SOCKBUF_UNLOCK(&so->so_rcv);
6018 		hold_sblock = 0;
6019 	}
6020 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6021 		if ((freed_so_far >= rwnd_req) &&
6022 		    (control && (control->do_not_ref_stcb == 0)) &&
6023 		    (no_rcv_needed == 0))
6024 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6025 	}
6026 out:
6027 	if (msg_flags) {
6028 		*msg_flags = out_flags;
6029 	}
6030 	if (((out_flags & MSG_EOR) == 0) &&
6031 	    ((in_flags & MSG_PEEK) == 0) &&
6032 	    (sinfo) &&
6033 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6034 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6035 		struct sctp_extrcvinfo *s_extra;
6036 
6037 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6038 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6039 	}
6040 	if (hold_rlock == 1) {
6041 		SCTP_INP_READ_UNLOCK(inp);
6042 	}
6043 	if (hold_sblock) {
6044 		SOCKBUF_UNLOCK(&so->so_rcv);
6045 	}
6046 	if (sockbuf_lock) {
6047 		sbunlock(&so->so_rcv);
6048 	}
6049 	if (freecnt_applied) {
6050 		/*
6051 		 * The lock on the socket buffer protects us so the free
6052 		 * code will stop. But since we used the socketbuf lock and
6053 		 * the sender uses the tcb_lock to increment, we need to use
6054 		 * the atomic add to the refcnt.
6055 		 */
6056 		if (stcb == NULL) {
6057 #ifdef INVARIANTS
6058 			panic("stcb for refcnt has gone NULL?");
6059 			goto stage_left;
6060 #else
6061 			goto stage_left;
6062 #endif
6063 		}
6064 		/* Save the value back for next time */
6065 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6066 		atomic_add_int(&stcb->asoc.refcnt, -1);
6067 	}
6068 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6069 		if (stcb) {
6070 			sctp_misc_ints(SCTP_SORECV_DONE,
6071 			    freed_so_far,
6072 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6073 			    stcb->asoc.my_rwnd,
6074 			    so->so_rcv.sb_cc);
6075 		} else {
6076 			sctp_misc_ints(SCTP_SORECV_DONE,
6077 			    freed_so_far,
6078 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6079 			    0,
6080 			    so->so_rcv.sb_cc);
6081 		}
6082 	}
6083 stage_left:
6084 	if (wakeup_read_socket) {
6085 		sctp_sorwakeup(inp, so);
6086 	}
6087 	return (error);
6088 }
6089 
6090 
6091 #ifdef SCTP_MBUF_LOGGING
6092 struct mbuf *
6093 sctp_m_free(struct mbuf *m)
6094 {
6095 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6096 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6097 	}
6098 	return (m_free(m));
6099 }
6100 
6101 void
6102 sctp_m_freem(struct mbuf *mb)
6103 {
6104 	while (mb != NULL)
6105 		mb = sctp_m_free(mb);
6106 }
6107 
6108 #endif
6109 
6110 int
6111 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6112 {
6113 	/*
6114 	 * Given a local address. For all associations that holds the
6115 	 * address, request a peer-set-primary.
6116 	 */
6117 	struct sctp_ifa *ifa;
6118 	struct sctp_laddr *wi;
6119 
6120 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6121 	if (ifa == NULL) {
6122 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6123 		return (EADDRNOTAVAIL);
6124 	}
6125 	/*
6126 	 * Now that we have the ifa we must awaken the iterator with this
6127 	 * message.
6128 	 */
6129 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6130 	if (wi == NULL) {
6131 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6132 		return (ENOMEM);
6133 	}
6134 	/* Now incr the count and int wi structure */
6135 	SCTP_INCR_LADDR_COUNT();
6136 	memset(wi, 0, sizeof(*wi));
6137 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6138 	wi->ifa = ifa;
6139 	wi->action = SCTP_SET_PRIM_ADDR;
6140 	atomic_add_int(&ifa->refcount, 1);
6141 
6142 	/* Now add it to the work queue */
6143 	SCTP_WQ_ADDR_LOCK();
6144 	/*
6145 	 * Should this really be a tailq? As it is we will process the
6146 	 * newest first :-0
6147 	 */
6148 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6149 	SCTP_WQ_ADDR_UNLOCK();
6150 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6151 	    (struct sctp_inpcb *)NULL,
6152 	    (struct sctp_tcb *)NULL,
6153 	    (struct sctp_nets *)NULL);
6154 	return (0);
6155 }
6156 
6157 
6158 int
6159 sctp_soreceive(struct socket *so,
6160     struct sockaddr **psa,
6161     struct uio *uio,
6162     struct mbuf **mp0,
6163     struct mbuf **controlp,
6164     int *flagsp)
6165 {
6166 	int error, fromlen;
6167 	uint8_t sockbuf[256];
6168 	struct sockaddr *from;
6169 	struct sctp_extrcvinfo sinfo;
6170 	int filling_sinfo = 1;
6171 	struct sctp_inpcb *inp;
6172 
6173 	inp = (struct sctp_inpcb *)so->so_pcb;
6174 	/* pickup the assoc we are reading from */
6175 	if (inp == NULL) {
6176 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6177 		return (EINVAL);
6178 	}
6179 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6180 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6181 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6182 	    (controlp == NULL)) {
6183 		/* user does not want the sndrcv ctl */
6184 		filling_sinfo = 0;
6185 	}
6186 	if (psa) {
6187 		from = (struct sockaddr *)sockbuf;
6188 		fromlen = sizeof(sockbuf);
6189 		from->sa_len = 0;
6190 	} else {
6191 		from = NULL;
6192 		fromlen = 0;
6193 	}
6194 
6195 	if (filling_sinfo) {
6196 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6197 	}
6198 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6199 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6200 	if (controlp != NULL) {
6201 		/* copy back the sinfo in a CMSG format */
6202 		if (filling_sinfo)
6203 			*controlp = sctp_build_ctl_nchunk(inp,
6204 			    (struct sctp_sndrcvinfo *)&sinfo);
6205 		else
6206 			*controlp = NULL;
6207 	}
6208 	if (psa) {
6209 		/* copy back the address info */
6210 		if (from && from->sa_len) {
6211 			*psa = sodupsockaddr(from, M_NOWAIT);
6212 		} else {
6213 			*psa = NULL;
6214 		}
6215 	}
6216 	return (error);
6217 }
6218 
6219 
6220 
6221 
6222 
6223 int
6224 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6225     int totaddr, int *error)
6226 {
6227 	int added = 0;
6228 	int i;
6229 	struct sctp_inpcb *inp;
6230 	struct sockaddr *sa;
6231 	size_t incr = 0;
6232 #ifdef INET
6233 	struct sockaddr_in *sin;
6234 #endif
6235 #ifdef INET6
6236 	struct sockaddr_in6 *sin6;
6237 #endif
6238 
6239 	sa = addr;
6240 	inp = stcb->sctp_ep;
6241 	*error = 0;
6242 	for (i = 0; i < totaddr; i++) {
6243 		switch (sa->sa_family) {
6244 #ifdef INET
6245 		case AF_INET:
6246 			incr = sizeof(struct sockaddr_in);
6247 			sin = (struct sockaddr_in *)sa;
6248 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6249 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6250 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6251 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6252 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6253 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6254 				*error = EINVAL;
6255 				goto out_now;
6256 			}
6257 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6258 			    SCTP_DONOT_SETSCOPE,
6259 			    SCTP_ADDR_IS_CONFIRMED)) {
6260 				/* assoc gone no un-lock */
6261 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6262 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6263 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6264 				*error = ENOBUFS;
6265 				goto out_now;
6266 			}
6267 			added++;
6268 			break;
6269 #endif
6270 #ifdef INET6
6271 		case AF_INET6:
6272 			incr = sizeof(struct sockaddr_in6);
6273 			sin6 = (struct sockaddr_in6 *)sa;
6274 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6275 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6276 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6277 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6278 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6279 				*error = EINVAL;
6280 				goto out_now;
6281 			}
6282 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6283 			    SCTP_DONOT_SETSCOPE,
6284 			    SCTP_ADDR_IS_CONFIRMED)) {
6285 				/* assoc gone no un-lock */
6286 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6287 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6288 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6289 				*error = ENOBUFS;
6290 				goto out_now;
6291 			}
6292 			added++;
6293 			break;
6294 #endif
6295 		default:
6296 			break;
6297 		}
6298 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6299 	}
6300 out_now:
6301 	return (added);
6302 }
6303 
6304 struct sctp_tcb *
6305 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6306     unsigned int *totaddr,
6307     unsigned int *num_v4, unsigned int *num_v6, int *error,
6308     unsigned int limit, int *bad_addr)
6309 {
6310 	struct sockaddr *sa;
6311 	struct sctp_tcb *stcb = NULL;
6312 	unsigned int incr, at, i;
6313 
6314 	at = 0;
6315 	sa = addr;
6316 	*error = *num_v6 = *num_v4 = 0;
6317 	/* account and validate addresses */
6318 	for (i = 0; i < *totaddr; i++) {
6319 		switch (sa->sa_family) {
6320 #ifdef INET
6321 		case AF_INET:
6322 			incr = (unsigned int)sizeof(struct sockaddr_in);
6323 			if (sa->sa_len != incr) {
6324 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6325 				*error = EINVAL;
6326 				*bad_addr = 1;
6327 				return (NULL);
6328 			}
6329 			(*num_v4) += 1;
6330 			break;
6331 #endif
6332 #ifdef INET6
6333 		case AF_INET6:
6334 			{
6335 				struct sockaddr_in6 *sin6;
6336 
6337 				sin6 = (struct sockaddr_in6 *)sa;
6338 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6339 					/* Must be non-mapped for connectx */
6340 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6341 					*error = EINVAL;
6342 					*bad_addr = 1;
6343 					return (NULL);
6344 				}
6345 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6346 				if (sa->sa_len != incr) {
6347 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6348 					*error = EINVAL;
6349 					*bad_addr = 1;
6350 					return (NULL);
6351 				}
6352 				(*num_v6) += 1;
6353 				break;
6354 			}
6355 #endif
6356 		default:
6357 			*totaddr = i;
6358 			incr = 0;
6359 			/* we are done */
6360 			break;
6361 		}
6362 		if (i == *totaddr) {
6363 			break;
6364 		}
6365 		SCTP_INP_INCR_REF(inp);
6366 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6367 		if (stcb != NULL) {
6368 			/* Already have or am bring up an association */
6369 			return (stcb);
6370 		} else {
6371 			SCTP_INP_DECR_REF(inp);
6372 		}
6373 		if ((at + incr) > limit) {
6374 			*totaddr = i;
6375 			break;
6376 		}
6377 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6378 	}
6379 	return ((struct sctp_tcb *)NULL);
6380 }
6381 
6382 /*
6383  * sctp_bindx(ADD) for one address.
6384  * assumes all arguments are valid/checked by caller.
6385  */
6386 void
6387 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6388     struct sockaddr *sa, sctp_assoc_t assoc_id,
6389     uint32_t vrf_id, int *error, void *p)
6390 {
6391 	struct sockaddr *addr_touse;
6392 #if defined(INET) && defined(INET6)
6393 	struct sockaddr_in sin;
6394 #endif
6395 
6396 	/* see if we're bound all already! */
6397 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6398 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6399 		*error = EINVAL;
6400 		return;
6401 	}
6402 	addr_touse = sa;
6403 #ifdef INET6
6404 	if (sa->sa_family == AF_INET6) {
6405 #ifdef INET
6406 		struct sockaddr_in6 *sin6;
6407 
6408 #endif
6409 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6410 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 			*error = EINVAL;
6412 			return;
6413 		}
6414 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6415 			/* can only bind v6 on PF_INET6 sockets */
6416 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 			*error = EINVAL;
6418 			return;
6419 		}
6420 #ifdef INET
6421 		sin6 = (struct sockaddr_in6 *)addr_touse;
6422 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6423 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6424 			    SCTP_IPV6_V6ONLY(inp)) {
6425 				/* can't bind v4-mapped on PF_INET sockets */
6426 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6427 				*error = EINVAL;
6428 				return;
6429 			}
6430 			in6_sin6_2_sin(&sin, sin6);
6431 			addr_touse = (struct sockaddr *)&sin;
6432 		}
6433 #endif
6434 	}
6435 #endif
6436 #ifdef INET
6437 	if (sa->sa_family == AF_INET) {
6438 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6439 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6440 			*error = EINVAL;
6441 			return;
6442 		}
6443 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6444 		    SCTP_IPV6_V6ONLY(inp)) {
6445 			/* can't bind v4 on PF_INET sockets */
6446 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 			*error = EINVAL;
6448 			return;
6449 		}
6450 	}
6451 #endif
6452 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6453 		if (p == NULL) {
6454 			/* Can't get proc for Net/Open BSD */
6455 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6456 			*error = EINVAL;
6457 			return;
6458 		}
6459 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6460 		return;
6461 	}
6462 	/*
6463 	 * No locks required here since bind and mgmt_ep_sa all do their own
6464 	 * locking. If we do something for the FIX: below we may need to
6465 	 * lock in that case.
6466 	 */
6467 	if (assoc_id == 0) {
6468 		/* add the address */
6469 		struct sctp_inpcb *lep;
6470 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6471 
6472 		/* validate the incoming port */
6473 		if ((lsin->sin_port != 0) &&
6474 		    (lsin->sin_port != inp->sctp_lport)) {
6475 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6476 			*error = EINVAL;
6477 			return;
6478 		} else {
6479 			/* user specified 0 port, set it to existing port */
6480 			lsin->sin_port = inp->sctp_lport;
6481 		}
6482 
6483 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6484 		if (lep != NULL) {
6485 			/*
6486 			 * We must decrement the refcount since we have the
6487 			 * ep already and are binding. No remove going on
6488 			 * here.
6489 			 */
6490 			SCTP_INP_DECR_REF(lep);
6491 		}
6492 		if (lep == inp) {
6493 			/* already bound to it.. ok */
6494 			return;
6495 		} else if (lep == NULL) {
6496 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6497 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6498 			    SCTP_ADD_IP_ADDRESS,
6499 			    vrf_id, NULL);
6500 		} else {
6501 			*error = EADDRINUSE;
6502 		}
6503 		if (*error)
6504 			return;
6505 	} else {
6506 		/*
6507 		 * FIX: decide whether we allow assoc based bindx
6508 		 */
6509 	}
6510 }
6511 
6512 /*
6513  * sctp_bindx(DELETE) for one address.
6514  * assumes all arguments are valid/checked by caller.
6515  */
6516 void
6517 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6518     struct sockaddr *sa, sctp_assoc_t assoc_id,
6519     uint32_t vrf_id, int *error)
6520 {
6521 	struct sockaddr *addr_touse;
6522 #if defined(INET) && defined(INET6)
6523 	struct sockaddr_in sin;
6524 #endif
6525 
6526 	/* see if we're bound all already! */
6527 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6528 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6529 		*error = EINVAL;
6530 		return;
6531 	}
6532 	addr_touse = sa;
6533 #ifdef INET6
6534 	if (sa->sa_family == AF_INET6) {
6535 #ifdef INET
6536 		struct sockaddr_in6 *sin6;
6537 #endif
6538 
6539 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6540 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6541 			*error = EINVAL;
6542 			return;
6543 		}
6544 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6545 			/* can only bind v6 on PF_INET6 sockets */
6546 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6547 			*error = EINVAL;
6548 			return;
6549 		}
6550 #ifdef INET
6551 		sin6 = (struct sockaddr_in6 *)addr_touse;
6552 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6553 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6554 			    SCTP_IPV6_V6ONLY(inp)) {
6555 				/* can't bind mapped-v4 on PF_INET sockets */
6556 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6557 				*error = EINVAL;
6558 				return;
6559 			}
6560 			in6_sin6_2_sin(&sin, sin6);
6561 			addr_touse = (struct sockaddr *)&sin;
6562 		}
6563 #endif
6564 	}
6565 #endif
6566 #ifdef INET
6567 	if (sa->sa_family == AF_INET) {
6568 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6569 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570 			*error = EINVAL;
6571 			return;
6572 		}
6573 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6574 		    SCTP_IPV6_V6ONLY(inp)) {
6575 			/* can't bind v4 on PF_INET sockets */
6576 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 			*error = EINVAL;
6578 			return;
6579 		}
6580 	}
6581 #endif
6582 	/*
6583 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6584 	 * below is ever changed we may need to lock before calling
6585 	 * association level binding.
6586 	 */
6587 	if (assoc_id == 0) {
6588 		/* delete the address */
6589 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6590 		    SCTP_DEL_IP_ADDRESS,
6591 		    vrf_id, NULL);
6592 	} else {
6593 		/*
6594 		 * FIX: decide whether we allow assoc based bindx
6595 		 */
6596 	}
6597 }
6598 
6599 /*
6600  * returns the valid local address count for an assoc, taking into account
6601  * all scoping rules
6602  */
6603 int
6604 sctp_local_addr_count(struct sctp_tcb *stcb)
6605 {
6606 	int loopback_scope;
6607 #if defined(INET)
6608 	int ipv4_local_scope, ipv4_addr_legal;
6609 #endif
6610 #if defined (INET6)
6611 	int local_scope, site_scope, ipv6_addr_legal;
6612 #endif
6613 	struct sctp_vrf *vrf;
6614 	struct sctp_ifn *sctp_ifn;
6615 	struct sctp_ifa *sctp_ifa;
6616 	int count = 0;
6617 
6618 	/* Turn on all the appropriate scopes */
6619 	loopback_scope = stcb->asoc.scope.loopback_scope;
6620 #if defined(INET)
6621 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6622 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6623 #endif
6624 #if defined(INET6)
6625 	local_scope = stcb->asoc.scope.local_scope;
6626 	site_scope = stcb->asoc.scope.site_scope;
6627 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6628 #endif
6629 	SCTP_IPI_ADDR_RLOCK();
6630 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6631 	if (vrf == NULL) {
6632 		/* no vrf, no addresses */
6633 		SCTP_IPI_ADDR_RUNLOCK();
6634 		return (0);
6635 	}
6636 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6637 		/*
6638 		 * bound all case: go through all ifns on the vrf
6639 		 */
6640 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6641 			if ((loopback_scope == 0) &&
6642 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6643 				continue;
6644 			}
6645 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6646 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6647 					continue;
6648 				switch (sctp_ifa->address.sa.sa_family) {
6649 #ifdef INET
6650 				case AF_INET:
6651 					if (ipv4_addr_legal) {
6652 						struct sockaddr_in *sin;
6653 
6654 						sin = &sctp_ifa->address.sin;
6655 						if (sin->sin_addr.s_addr == 0) {
6656 							/*
6657 							 * skip unspecified
6658 							 * addrs
6659 							 */
6660 							continue;
6661 						}
6662 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6663 						    &sin->sin_addr) != 0) {
6664 							continue;
6665 						}
6666 						if ((ipv4_local_scope == 0) &&
6667 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6668 							continue;
6669 						}
6670 						/* count this one */
6671 						count++;
6672 					} else {
6673 						continue;
6674 					}
6675 					break;
6676 #endif
6677 #ifdef INET6
6678 				case AF_INET6:
6679 					if (ipv6_addr_legal) {
6680 						struct sockaddr_in6 *sin6;
6681 
6682 						sin6 = &sctp_ifa->address.sin6;
6683 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6684 							continue;
6685 						}
6686 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6687 						    &sin6->sin6_addr) != 0) {
6688 							continue;
6689 						}
6690 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6691 							if (local_scope == 0)
6692 								continue;
6693 							if (sin6->sin6_scope_id == 0) {
6694 								if (sa6_recoverscope(sin6) != 0)
6695 									/*
6696 									 *
6697 									 * bad
6698 									 * link
6699 									 *
6700 									 * local
6701 									 *
6702 									 * address
6703 									 */
6704 									continue;
6705 							}
6706 						}
6707 						if ((site_scope == 0) &&
6708 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6709 							continue;
6710 						}
6711 						/* count this one */
6712 						count++;
6713 					}
6714 					break;
6715 #endif
6716 				default:
6717 					/* TSNH */
6718 					break;
6719 				}
6720 			}
6721 		}
6722 	} else {
6723 		/*
6724 		 * subset bound case
6725 		 */
6726 		struct sctp_laddr *laddr;
6727 
6728 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6729 		    sctp_nxt_addr) {
6730 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6731 				continue;
6732 			}
6733 			/* count this one */
6734 			count++;
6735 		}
6736 	}
6737 	SCTP_IPI_ADDR_RUNLOCK();
6738 	return (count);
6739 }
6740 
6741 #if defined(SCTP_LOCAL_TRACE_BUF)
6742 
6743 void
6744 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6745 {
6746 	uint32_t saveindex, newindex;
6747 
6748 	do {
6749 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6750 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6751 			newindex = 1;
6752 		} else {
6753 			newindex = saveindex + 1;
6754 		}
6755 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6756 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6757 		saveindex = 0;
6758 	}
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6760 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6761 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6762 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6763 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6764 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6765 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6766 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6767 }
6768 
6769 #endif
6770 static void
6771 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6772     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6773 {
6774 	struct ip *iph;
6775 #ifdef INET6
6776 	struct ip6_hdr *ip6;
6777 #endif
6778 	struct mbuf *sp, *last;
6779 	struct udphdr *uhdr;
6780 	uint16_t port;
6781 
6782 	if ((m->m_flags & M_PKTHDR) == 0) {
6783 		/* Can't handle one that is not a pkt hdr */
6784 		goto out;
6785 	}
6786 	/* Pull the src port */
6787 	iph = mtod(m, struct ip *);
6788 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6789 	port = uhdr->uh_sport;
6790 	/*
6791 	 * Split out the mbuf chain. Leave the IP header in m, place the
6792 	 * rest in the sp.
6793 	 */
6794 	sp = m_split(m, off, M_NOWAIT);
6795 	if (sp == NULL) {
6796 		/* Gak, drop packet, we can't do a split */
6797 		goto out;
6798 	}
6799 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6800 		/* Gak, packet can't have an SCTP header in it - too small */
6801 		m_freem(sp);
6802 		goto out;
6803 	}
6804 	/* Now pull up the UDP header and SCTP header together */
6805 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6806 	if (sp == NULL) {
6807 		/* Gak pullup failed */
6808 		goto out;
6809 	}
6810 	/* Trim out the UDP header */
6811 	m_adj(sp, sizeof(struct udphdr));
6812 
6813 	/* Now reconstruct the mbuf chain */
6814 	for (last = m; last->m_next; last = last->m_next);
6815 	last->m_next = sp;
6816 	m->m_pkthdr.len += sp->m_pkthdr.len;
6817 	/*
6818 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6819 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6820 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6821 	 * SCTP checksum. Therefore, clear the bit.
6822 	 */
6823 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6824 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6825 	    m->m_pkthdr.len,
6826 	    if_name(m->m_pkthdr.rcvif),
6827 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6828 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6829 	iph = mtod(m, struct ip *);
6830 	switch (iph->ip_v) {
6831 #ifdef INET
6832 	case IPVERSION:
6833 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6834 		sctp_input_with_port(m, off, port);
6835 		break;
6836 #endif
6837 #ifdef INET6
6838 	case IPV6_VERSION >> 4:
6839 		ip6 = mtod(m, struct ip6_hdr *);
6840 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6841 		sctp6_input_with_port(&m, &off, port);
6842 		break;
6843 #endif
6844 	default:
6845 		goto out;
6846 		break;
6847 	}
6848 	return;
6849 out:
6850 	m_freem(m);
6851 }
6852 
6853 #ifdef INET
6854 static void
6855 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6856 {
6857 	struct ip *outer_ip, *inner_ip;
6858 	struct sctphdr *sh;
6859 	struct icmp *icmp;
6860 	struct udphdr *udp;
6861 	struct sctp_inpcb *inp;
6862 	struct sctp_tcb *stcb;
6863 	struct sctp_nets *net;
6864 	struct sctp_init_chunk *ch;
6865 	struct sockaddr_in src, dst;
6866 	uint8_t type, code;
6867 
6868 	inner_ip = (struct ip *)vip;
6869 	icmp = (struct icmp *)((caddr_t)inner_ip -
6870 	    (sizeof(struct icmp) - sizeof(struct ip)));
6871 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6872 	if (ntohs(outer_ip->ip_len) <
6873 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6874 		return;
6875 	}
6876 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6877 	sh = (struct sctphdr *)(udp + 1);
6878 	memset(&src, 0, sizeof(struct sockaddr_in));
6879 	src.sin_family = AF_INET;
6880 	src.sin_len = sizeof(struct sockaddr_in);
6881 	src.sin_port = sh->src_port;
6882 	src.sin_addr = inner_ip->ip_src;
6883 	memset(&dst, 0, sizeof(struct sockaddr_in));
6884 	dst.sin_family = AF_INET;
6885 	dst.sin_len = sizeof(struct sockaddr_in);
6886 	dst.sin_port = sh->dest_port;
6887 	dst.sin_addr = inner_ip->ip_dst;
6888 	/*
6889 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6890 	 * holds our local endpoint address. Thus we reverse the dst and the
6891 	 * src in the lookup.
6892 	 */
6893 	inp = NULL;
6894 	net = NULL;
6895 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6896 	    (struct sockaddr *)&src,
6897 	    &inp, &net, 1,
6898 	    SCTP_DEFAULT_VRFID);
6899 	if ((stcb != NULL) &&
6900 	    (net != NULL) &&
6901 	    (inp != NULL)) {
6902 		/* Check the UDP port numbers */
6903 		if ((udp->uh_dport != net->port) ||
6904 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6905 			SCTP_TCB_UNLOCK(stcb);
6906 			return;
6907 		}
6908 		/* Check the verification tag */
6909 		if (ntohl(sh->v_tag) != 0) {
6910 			/*
6911 			 * This must be the verification tag used for
6912 			 * sending out packets. We don't consider packets
6913 			 * reflecting the verification tag.
6914 			 */
6915 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6916 				SCTP_TCB_UNLOCK(stcb);
6917 				return;
6918 			}
6919 		} else {
6920 			if (ntohs(outer_ip->ip_len) >=
6921 			    sizeof(struct ip) +
6922 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6923 				/*
6924 				 * In this case we can check if we got an
6925 				 * INIT chunk and if the initiate tag
6926 				 * matches.
6927 				 */
6928 				ch = (struct sctp_init_chunk *)(sh + 1);
6929 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6930 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6931 					SCTP_TCB_UNLOCK(stcb);
6932 					return;
6933 				}
6934 			} else {
6935 				SCTP_TCB_UNLOCK(stcb);
6936 				return;
6937 			}
6938 		}
6939 		type = icmp->icmp_type;
6940 		code = icmp->icmp_code;
6941 		if ((type == ICMP_UNREACH) &&
6942 		    (code == ICMP_UNREACH_PORT)) {
6943 			code = ICMP_UNREACH_PROTOCOL;
6944 		}
6945 		sctp_notify(inp, stcb, net, type, code,
6946 		    ntohs(inner_ip->ip_len),
6947 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6948 	} else {
6949 		if ((stcb == NULL) && (inp != NULL)) {
6950 			/* reduce ref-count */
6951 			SCTP_INP_WLOCK(inp);
6952 			SCTP_INP_DECR_REF(inp);
6953 			SCTP_INP_WUNLOCK(inp);
6954 		}
6955 		if (stcb) {
6956 			SCTP_TCB_UNLOCK(stcb);
6957 		}
6958 	}
6959 	return;
6960 }
6961 #endif
6962 
6963 #ifdef INET6
6964 static void
6965 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6966 {
6967 	struct ip6ctlparam *ip6cp;
6968 	struct sctp_inpcb *inp;
6969 	struct sctp_tcb *stcb;
6970 	struct sctp_nets *net;
6971 	struct sctphdr sh;
6972 	struct udphdr udp;
6973 	struct sockaddr_in6 src, dst;
6974 	uint8_t type, code;
6975 
6976 	ip6cp = (struct ip6ctlparam *)d;
6977 	/*
6978 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6979 	 */
6980 	if (ip6cp->ip6c_m == NULL) {
6981 		return;
6982 	}
6983 	/*
6984 	 * Check if we can safely examine the ports and the verification tag
6985 	 * of the SCTP common header.
6986 	 */
6987 	if (ip6cp->ip6c_m->m_pkthdr.len <
6988 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
6989 		return;
6990 	}
6991 	/* Copy out the UDP header. */
6992 	memset(&udp, 0, sizeof(struct udphdr));
6993 	m_copydata(ip6cp->ip6c_m,
6994 	    ip6cp->ip6c_off,
6995 	    sizeof(struct udphdr),
6996 	    (caddr_t)&udp);
6997 	/* Copy out the port numbers and the verification tag. */
6998 	memset(&sh, 0, sizeof(struct sctphdr));
6999 	m_copydata(ip6cp->ip6c_m,
7000 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7001 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7002 	    (caddr_t)&sh);
7003 	memset(&src, 0, sizeof(struct sockaddr_in6));
7004 	src.sin6_family = AF_INET6;
7005 	src.sin6_len = sizeof(struct sockaddr_in6);
7006 	src.sin6_port = sh.src_port;
7007 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7008 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7009 		return;
7010 	}
7011 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7012 	dst.sin6_family = AF_INET6;
7013 	dst.sin6_len = sizeof(struct sockaddr_in6);
7014 	dst.sin6_port = sh.dest_port;
7015 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7016 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7017 		return;
7018 	}
7019 	inp = NULL;
7020 	net = NULL;
7021 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7022 	    (struct sockaddr *)&src,
7023 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7024 	if ((stcb != NULL) &&
7025 	    (net != NULL) &&
7026 	    (inp != NULL)) {
7027 		/* Check the UDP port numbers */
7028 		if ((udp.uh_dport != net->port) ||
7029 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7030 			SCTP_TCB_UNLOCK(stcb);
7031 			return;
7032 		}
7033 		/* Check the verification tag */
7034 		if (ntohl(sh.v_tag) != 0) {
7035 			/*
7036 			 * This must be the verification tag used for
7037 			 * sending out packets. We don't consider packets
7038 			 * reflecting the verification tag.
7039 			 */
7040 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7041 				SCTP_TCB_UNLOCK(stcb);
7042 				return;
7043 			}
7044 		} else {
7045 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7046 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7047 			    sizeof(struct sctphdr) +
7048 			    sizeof(struct sctp_chunkhdr) +
7049 			    offsetof(struct sctp_init, a_rwnd)) {
7050 				/*
7051 				 * In this case we can check if we got an
7052 				 * INIT chunk and if the initiate tag
7053 				 * matches.
7054 				 */
7055 				uint32_t initiate_tag;
7056 				uint8_t chunk_type;
7057 
7058 				m_copydata(ip6cp->ip6c_m,
7059 				    ip6cp->ip6c_off +
7060 				    sizeof(struct udphdr) +
7061 				    sizeof(struct sctphdr),
7062 				    sizeof(uint8_t),
7063 				    (caddr_t)&chunk_type);
7064 				m_copydata(ip6cp->ip6c_m,
7065 				    ip6cp->ip6c_off +
7066 				    sizeof(struct udphdr) +
7067 				    sizeof(struct sctphdr) +
7068 				    sizeof(struct sctp_chunkhdr),
7069 				    sizeof(uint32_t),
7070 				    (caddr_t)&initiate_tag);
7071 				if ((chunk_type != SCTP_INITIATION) ||
7072 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7073 					SCTP_TCB_UNLOCK(stcb);
7074 					return;
7075 				}
7076 			} else {
7077 				SCTP_TCB_UNLOCK(stcb);
7078 				return;
7079 			}
7080 		}
7081 		type = ip6cp->ip6c_icmp6->icmp6_type;
7082 		code = ip6cp->ip6c_icmp6->icmp6_code;
7083 		if ((type == ICMP6_DST_UNREACH) &&
7084 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7085 			type = ICMP6_PARAM_PROB;
7086 			code = ICMP6_PARAMPROB_NEXTHEADER;
7087 		}
7088 		sctp6_notify(inp, stcb, net, type, code,
7089 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7090 	} else {
7091 		if ((stcb == NULL) && (inp != NULL)) {
7092 			/* reduce inp's ref-count */
7093 			SCTP_INP_WLOCK(inp);
7094 			SCTP_INP_DECR_REF(inp);
7095 			SCTP_INP_WUNLOCK(inp);
7096 		}
7097 		if (stcb) {
7098 			SCTP_TCB_UNLOCK(stcb);
7099 		}
7100 	}
7101 }
7102 #endif
7103 
7104 void
7105 sctp_over_udp_stop(void)
7106 {
7107 	/*
7108 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7109 	 * for writting!
7110 	 */
7111 #ifdef INET
7112 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7113 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7114 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7115 	}
7116 #endif
7117 #ifdef INET6
7118 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7119 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7120 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7121 	}
7122 #endif
7123 }
7124 
7125 int
7126 sctp_over_udp_start(void)
7127 {
7128 	uint16_t port;
7129 	int ret;
7130 #ifdef INET
7131 	struct sockaddr_in sin;
7132 #endif
7133 #ifdef INET6
7134 	struct sockaddr_in6 sin6;
7135 #endif
7136 	/*
7137 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7138 	 * for writting!
7139 	 */
7140 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7141 	if (ntohs(port) == 0) {
7142 		/* Must have a port set */
7143 		return (EINVAL);
7144 	}
7145 #ifdef INET
7146 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7147 		/* Already running -- must stop first */
7148 		return (EALREADY);
7149 	}
7150 #endif
7151 #ifdef INET6
7152 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7153 		/* Already running -- must stop first */
7154 		return (EALREADY);
7155 	}
7156 #endif
7157 #ifdef INET
7158 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7159 	    SOCK_DGRAM, IPPROTO_UDP,
7160 	    curthread->td_ucred, curthread))) {
7161 		sctp_over_udp_stop();
7162 		return (ret);
7163 	}
7164 	/* Call the special UDP hook. */
7165 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7166 	    sctp_recv_udp_tunneled_packet,
7167 	    sctp_recv_icmp_tunneled_packet,
7168 	    NULL))) {
7169 		sctp_over_udp_stop();
7170 		return (ret);
7171 	}
7172 	/* Ok, we have a socket, bind it to the port. */
7173 	memset(&sin, 0, sizeof(struct sockaddr_in));
7174 	sin.sin_len = sizeof(struct sockaddr_in);
7175 	sin.sin_family = AF_INET;
7176 	sin.sin_port = htons(port);
7177 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7178 	    (struct sockaddr *)&sin, curthread))) {
7179 		sctp_over_udp_stop();
7180 		return (ret);
7181 	}
7182 #endif
7183 #ifdef INET6
7184 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7185 	    SOCK_DGRAM, IPPROTO_UDP,
7186 	    curthread->td_ucred, curthread))) {
7187 		sctp_over_udp_stop();
7188 		return (ret);
7189 	}
7190 	/* Call the special UDP hook. */
7191 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7192 	    sctp_recv_udp_tunneled_packet,
7193 	    sctp_recv_icmp6_tunneled_packet,
7194 	    NULL))) {
7195 		sctp_over_udp_stop();
7196 		return (ret);
7197 	}
7198 	/* Ok, we have a socket, bind it to the port. */
7199 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7200 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7201 	sin6.sin6_family = AF_INET6;
7202 	sin6.sin6_port = htons(port);
7203 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7204 	    (struct sockaddr *)&sin6, curthread))) {
7205 		sctp_over_udp_stop();
7206 		return (ret);
7207 	}
7208 #endif
7209 	return (0);
7210 }
7211 
7212 /*
7213  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7214  * If all arguments are zero, zero is returned.
7215  */
7216 uint32_t
7217 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7218 {
7219 	if (mtu1 > 0) {
7220 		if (mtu2 > 0) {
7221 			if (mtu3 > 0) {
7222 				return (min(mtu1, min(mtu2, mtu3)));
7223 			} else {
7224 				return (min(mtu1, mtu2));
7225 			}
7226 		} else {
7227 			if (mtu3 > 0) {
7228 				return (min(mtu1, mtu3));
7229 			} else {
7230 				return (mtu1);
7231 			}
7232 		}
7233 	} else {
7234 		if (mtu2 > 0) {
7235 			if (mtu3 > 0) {
7236 				return (min(mtu2, mtu3));
7237 			} else {
7238 				return (mtu2);
7239 			}
7240 		} else {
7241 			return (mtu3);
7242 		}
7243 	}
7244 }
7245 
7246 void
7247 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7248 {
7249 	struct in_conninfo inc;
7250 
7251 	memset(&inc, 0, sizeof(struct in_conninfo));
7252 	inc.inc_fibnum = fibnum;
7253 	switch (addr->sa.sa_family) {
7254 #ifdef INET
7255 	case AF_INET:
7256 		inc.inc_faddr = addr->sin.sin_addr;
7257 		break;
7258 #endif
7259 #ifdef INET6
7260 	case AF_INET6:
7261 		inc.inc_flags |= INC_ISIPV6;
7262 		inc.inc6_faddr = addr->sin6.sin6_addr;
7263 		break;
7264 #endif
7265 	default:
7266 		return;
7267 	}
7268 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7269 }
7270 
7271 uint32_t
7272 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7273 {
7274 	struct in_conninfo inc;
7275 
7276 	memset(&inc, 0, sizeof(struct in_conninfo));
7277 	inc.inc_fibnum = fibnum;
7278 	switch (addr->sa.sa_family) {
7279 #ifdef INET
7280 	case AF_INET:
7281 		inc.inc_faddr = addr->sin.sin_addr;
7282 		break;
7283 #endif
7284 #ifdef INET6
7285 	case AF_INET6:
7286 		inc.inc_flags |= INC_ISIPV6;
7287 		inc.inc6_faddr = addr->sin6.sin6_addr;
7288 		break;
7289 #endif
7290 	default:
7291 		return (0);
7292 	}
7293 	return ((uint32_t)tcp_hc_getmtu(&inc));
7294 }
7295