xref: /freebsd/sys/netinet/sctputil.c (revision ca987d4641cdcd7f27e153db17c5bf064934faf5)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61 
62 
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66 
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69 
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 	struct sctp_cwnd_log sctp_clog;
74 
75 	sctp_clog.x.sb.stcb = stcb;
76 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 	if (stcb)
78 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 	else
80 		sctp_clog.x.sb.stcb_sbcc = 0;
81 	sctp_clog.x.sb.incr = incr;
82 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 	    SCTP_LOG_EVENT_SB,
84 	    from,
85 	    sctp_clog.x.misc.log1,
86 	    sctp_clog.x.misc.log2,
87 	    sctp_clog.x.misc.log3,
88 	    sctp_clog.x.misc.log4);
89 }
90 
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 	struct sctp_cwnd_log sctp_clog;
95 
96 	sctp_clog.x.close.inp = (void *)inp;
97 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 	if (stcb) {
99 		sctp_clog.x.close.stcb = (void *)stcb;
100 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 	} else {
102 		sctp_clog.x.close.stcb = 0;
103 		sctp_clog.x.close.state = 0;
104 	}
105 	sctp_clog.x.close.loc = loc;
106 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 	    SCTP_LOG_EVENT_CLOSE,
108 	    0,
109 	    sctp_clog.x.misc.log1,
110 	    sctp_clog.x.misc.log2,
111 	    sctp_clog.x.misc.log3,
112 	    sctp_clog.x.misc.log4);
113 }
114 
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 	struct sctp_cwnd_log sctp_clog;
119 
120 	memset(&sctp_clog, 0, sizeof(sctp_clog));
121 	sctp_clog.x.rto.net = (void *)net;
122 	sctp_clog.x.rto.rtt = net->rtt / 1000;
123 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 	    SCTP_LOG_EVENT_RTT,
125 	    from,
126 	    sctp_clog.x.misc.log1,
127 	    sctp_clog.x.misc.log2,
128 	    sctp_clog.x.misc.log3,
129 	    sctp_clog.x.misc.log4);
130 }
131 
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 	struct sctp_cwnd_log sctp_clog;
136 
137 	sctp_clog.x.strlog.stcb = stcb;
138 	sctp_clog.x.strlog.n_tsn = tsn;
139 	sctp_clog.x.strlog.n_sseq = sseq;
140 	sctp_clog.x.strlog.e_tsn = 0;
141 	sctp_clog.x.strlog.e_sseq = 0;
142 	sctp_clog.x.strlog.strm = stream;
143 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 	    SCTP_LOG_EVENT_STRM,
145 	    from,
146 	    sctp_clog.x.misc.log1,
147 	    sctp_clog.x.misc.log2,
148 	    sctp_clog.x.misc.log3,
149 	    sctp_clog.x.misc.log4);
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 	struct sctp_cwnd_log sctp_clog;
212 
213 	memset(&sctp_clog, 0, sizeof(sctp_clog));
214 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 	sctp_clog.x.fr.tsn = tsn;
217 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 	    SCTP_LOG_EVENT_FR,
219 	    from,
220 	    sctp_clog.x.misc.log1,
221 	    sctp_clog.x.misc.log2,
222 	    sctp_clog.x.misc.log3,
223 	    sctp_clog.x.misc.log4);
224 }
225 
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 	struct sctp_cwnd_log sctp_clog;
231 
232 	sctp_clog.x.mb.mp = m;
233 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 	if (SCTP_BUF_IS_EXTENDED(m)) {
237 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 	} else {
240 		sctp_clog.x.mb.ext = 0;
241 		sctp_clog.x.mb.refcnt = 0;
242 	}
243 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 	    SCTP_LOG_EVENT_MBUF,
245 	    from,
246 	    sctp_clog.x.misc.log1,
247 	    sctp_clog.x.misc.log2,
248 	    sctp_clog.x.misc.log3,
249 	    sctp_clog.x.misc.log4);
250 }
251 
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 	struct mbuf *mat;
256 
257 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 		sctp_log_mb(mat, from);
259 	}
260 }
261 #endif
262 
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 	struct sctp_cwnd_log sctp_clog;
267 
268 	if (control == NULL) {
269 		SCTP_PRINTF("Gak log of NULL?\n");
270 		return;
271 	}
272 	sctp_clog.x.strlog.stcb = control->stcb;
273 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 	sctp_clog.x.strlog.strm = control->sinfo_stream;
276 	if (poschk != NULL) {
277 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 	} else {
280 		sctp_clog.x.strlog.e_tsn = 0;
281 		sctp_clog.x.strlog.e_sseq = 0;
282 	}
283 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 	    SCTP_LOG_EVENT_STRM,
285 	    from,
286 	    sctp_clog.x.misc.log1,
287 	    sctp_clog.x.misc.log2,
288 	    sctp_clog.x.misc.log3,
289 	    sctp_clog.x.misc.log4);
290 }
291 
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 	struct sctp_cwnd_log sctp_clog;
296 
297 	sctp_clog.x.cwnd.net = net;
298 	if (stcb->asoc.send_queue_cnt > 255)
299 		sctp_clog.x.cwnd.cnt_in_send = 255;
300 	else
301 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 	if (stcb->asoc.stream_queue_cnt > 255)
303 		sctp_clog.x.cwnd.cnt_in_str = 255;
304 	else
305 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306 
307 	if (net) {
308 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 		sctp_clog.x.cwnd.inflight = net->flight_size;
310 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 	}
314 	if (SCTP_CWNDLOG_PRESEND == from) {
315 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 	}
317 	sctp_clog.x.cwnd.cwnd_augment = augment;
318 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 	    SCTP_LOG_EVENT_CWND,
320 	    from,
321 	    sctp_clog.x.misc.log1,
322 	    sctp_clog.x.misc.log2,
323 	    sctp_clog.x.misc.log3,
324 	    sctp_clog.x.misc.log4);
325 }
326 
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 	struct sctp_cwnd_log sctp_clog;
331 
332 	memset(&sctp_clog, 0, sizeof(sctp_clog));
333 	if (inp) {
334 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335 
336 	} else {
337 		sctp_clog.x.lock.sock = (void *)NULL;
338 	}
339 	sctp_clog.x.lock.inp = (void *)inp;
340 	if (stcb) {
341 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 	} else {
343 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 	}
345 	if (inp) {
346 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 	} else {
349 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 	if (inp && (inp->sctp_socket)) {
354 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 	} else {
358 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 	}
362 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 	    SCTP_LOG_LOCK_EVENT,
364 	    from,
365 	    sctp_clog.x.misc.log1,
366 	    sctp_clog.x.misc.log2,
367 	    sctp_clog.x.misc.log3,
368 	    sctp_clog.x.misc.log4);
369 }
370 
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 	struct sctp_cwnd_log sctp_clog;
375 
376 	memset(&sctp_clog, 0, sizeof(sctp_clog));
377 	sctp_clog.x.cwnd.net = net;
378 	sctp_clog.x.cwnd.cwnd_new_value = error;
379 	sctp_clog.x.cwnd.inflight = net->flight_size;
380 	sctp_clog.x.cwnd.cwnd_augment = burst;
381 	if (stcb->asoc.send_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_send = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 	if (stcb->asoc.stream_queue_cnt > 255)
386 		sctp_clog.x.cwnd.cnt_in_str = 255;
387 	else
388 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_MAXBURST,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = snd_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = 0;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 	sctp_clog.x.rwnd.send_size = flight_size;
423 	sctp_clog.x.rwnd.overhead = overhead;
424 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_RWND,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 	struct sctp_cwnd_log sctp_clog;
439 
440 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 	sctp_clog.x.mbcnt.size_change = book;
442 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_EVENT_MBCNT,
446 	    from,
447 	    sctp_clog.x.misc.log1,
448 	    sctp_clog.x.misc.log2,
449 	    sctp_clog.x.misc.log3,
450 	    sctp_clog.x.misc.log4);
451 }
452 #endif
453 
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_MISC_EVENT,
459 	    from,
460 	    a, b, c, d);
461 }
462 
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 	struct sctp_cwnd_log sctp_clog;
467 
468 	sctp_clog.x.wake.stcb = (void *)stcb;
469 	sctp_clog.x.wake.wake_cnt = wake_cnt;
470 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473 
474 	if (stcb->asoc.stream_queue_cnt < 0xff)
475 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 	else
477 		sctp_clog.x.wake.stream_qcnt = 0xff;
478 
479 	if (stcb->asoc.chunks_on_out_queue < 0xff)
480 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 	else
482 		sctp_clog.x.wake.chunks_on_oque = 0xff;
483 
484 	sctp_clog.x.wake.sctpflags = 0;
485 	/* set in the defered mode stuff */
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 		sctp_clog.x.wake.sctpflags |= 1;
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 		sctp_clog.x.wake.sctpflags |= 2;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 		sctp_clog.x.wake.sctpflags |= 4;
492 	/* what about the sb */
493 	if (stcb->sctp_socket) {
494 		struct socket *so = stcb->sctp_socket;
495 
496 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 	} else {
498 		sctp_clog.x.wake.sbflags = 0xff;
499 	}
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	    SCTP_LOG_EVENT_WAKE,
502 	    from,
503 	    sctp_clog.x.misc.log1,
504 	    sctp_clog.x.misc.log2,
505 	    sctp_clog.x.misc.log3,
506 	    sctp_clog.x.misc.log4);
507 }
508 
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 	    SCTP_LOG_EVENT_BLOCK,
523 	    from,
524 	    sctp_clog.x.misc.log1,
525 	    sctp_clog.x.misc.log2,
526 	    sctp_clog.x.misc.log3,
527 	    sctp_clog.x.misc.log4);
528 }
529 
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 	/* May need to fix this if ktrdump does not work */
534 	return (0);
535 }
536 
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540 
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 	int i;
546 	int cnt;
547 
548 	cnt = 0;
549 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 		if ((sctp_audit_data[i][0] == 0xe0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if (sctp_audit_data[i][0] == 0xf0) {
555 			cnt = 0;
556 			SCTP_PRINTF("\n");
557 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558 		    (sctp_audit_data[i][1] == 0x01)) {
559 			SCTP_PRINTF("\n");
560 			cnt = 0;
561 		}
562 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 		    (uint32_t)sctp_audit_data[i][1]);
564 		cnt++;
565 		if ((cnt % 14) == 0)
566 			SCTP_PRINTF("\n");
567 	}
568 	for (i = 0; i < sctp_audit_indx; i++) {
569 		if ((sctp_audit_data[i][0] == 0xe0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if (sctp_audit_data[i][0] == 0xf0) {
574 			cnt = 0;
575 			SCTP_PRINTF("\n");
576 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577 		    (sctp_audit_data[i][1] == 0x01)) {
578 			SCTP_PRINTF("\n");
579 			cnt = 0;
580 		}
581 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 		    (uint32_t)sctp_audit_data[i][1]);
583 		cnt++;
584 		if ((cnt % 14) == 0)
585 			SCTP_PRINTF("\n");
586 	}
587 	SCTP_PRINTF("\n");
588 }
589 
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592     struct sctp_nets *net)
593 {
594 	int resend_cnt, tot_out, rep, tot_book_cnt;
595 	struct sctp_nets *lnet;
596 	struct sctp_tmit_chunk *chk;
597 
598 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 	sctp_audit_indx++;
601 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 		sctp_audit_indx = 0;
603 	}
604 	if (inp == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	if (stcb == NULL) {
614 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 		sctp_audit_indx++;
617 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 			sctp_audit_indx = 0;
619 		}
620 		return;
621 	}
622 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 	sctp_audit_data[sctp_audit_indx][1] =
624 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 	sctp_audit_indx++;
626 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 		sctp_audit_indx = 0;
628 	}
629 	rep = 0;
630 	tot_book_cnt = 0;
631 	resend_cnt = tot_out = 0;
632 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 			resend_cnt++;
635 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 			tot_out += chk->book_size;
637 			tot_book_cnt++;
638 		}
639 	}
640 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 		rep = 1;
650 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 		sctp_audit_data[sctp_audit_indx][1] =
653 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 	}
659 	if (tot_out != stcb->asoc.total_flight) {
660 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 		sctp_audit_indx++;
663 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 			sctp_audit_indx = 0;
665 		}
666 		rep = 1;
667 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 		    (int)stcb->asoc.total_flight);
669 		stcb->asoc.total_flight = tot_out;
670 	}
671 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 		sctp_audit_indx++;
675 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 			sctp_audit_indx = 0;
677 		}
678 		rep = 1;
679 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680 
681 		stcb->asoc.total_flight_count = tot_book_cnt;
682 	}
683 	tot_out = 0;
684 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 		tot_out += lnet->flight_size;
686 	}
687 	if (tot_out != stcb->asoc.total_flight) {
688 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 		sctp_audit_indx++;
691 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 			sctp_audit_indx = 0;
693 		}
694 		rep = 1;
695 		SCTP_PRINTF("real flight:%d net total was %d\n",
696 		    stcb->asoc.total_flight, tot_out);
697 		/* now corrective action */
698 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699 
700 			tot_out = 0;
701 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 				if ((chk->whoTo == lnet) &&
703 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 					tot_out += chk->book_size;
705 				}
706 			}
707 			if (lnet->flight_size != tot_out) {
708 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 				    (void *)lnet, lnet->flight_size,
710 				    tot_out);
711 				lnet->flight_size = tot_out;
712 			}
713 		}
714 	}
715 	if (rep) {
716 		sctp_print_audit_report();
717 	}
718 }
719 
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723 
724 	sctp_audit_data[sctp_audit_indx][0] = ev;
725 	sctp_audit_data[sctp_audit_indx][1] = fd;
726 	sctp_audit_indx++;
727 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 		sctp_audit_indx = 0;
729 	}
730 }
731 
732 #endif
733 
734 /*
735  * sctp_stop_timers_for_shutdown() should be called
736  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737  * state to make sure that all timers are stopped.
738  */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 	struct sctp_association *asoc;
743 	struct sctp_nets *net;
744 
745 	asoc = &stcb->asoc;
746 
747 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 	}
756 }
757 
758 /*
759  * a list of sizes based on typical mtu's, used only if next hop size not
760  * returned.
761  */
762 static uint32_t sctp_mtu_sizes[] = {
763 	68,
764 	296,
765 	508,
766 	512,
767 	544,
768 	576,
769 	1006,
770 	1492,
771 	1500,
772 	1536,
773 	2002,
774 	2048,
775 	4352,
776 	4464,
777 	8166,
778 	17914,
779 	32000,
780 	65535
781 };
782 
783 /*
784  * Return the largest MTU smaller than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 	uint32_t i;
791 
792 	if (val <= sctp_mtu_sizes[0]) {
793 		return (val);
794 	}
795 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val <= sctp_mtu_sizes[i]) {
797 			break;
798 		}
799 	}
800 	return (sctp_mtu_sizes[i - 1]);
801 }
802 
803 /*
804  * Return the smallest MTU larger than val. If there is no
805  * entry, just return val.
806  */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 	/* select another MTU that is just bigger than this one */
811 	uint32_t i;
812 
813 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 		if (val < sctp_mtu_sizes[i]) {
815 			return (sctp_mtu_sizes[i]);
816 		}
817 	}
818 	return (val);
819 }
820 
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 	/*
825 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 	 * our counter. The result becomes our good random numbers and we
827 	 * then setup to give these out. Note that we do no locking to
828 	 * protect this. This is ok, since if competing folks call this we
829 	 * will get more gobbled gook in the random store which is what we
830 	 * want. There is a danger that two guys will use the same random
831 	 * numbers, but thats ok too since that is random as well :->
832 	 */
833 	m->store_at = 0;
834 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837 	m->random_counter++;
838 }
839 
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 	/*
844 	 * A true implementation should use random selection process to get
845 	 * the initial stream sequence number, using RFC1750 as a good
846 	 * guideline
847 	 */
848 	uint32_t x, *xp;
849 	uint8_t *p;
850 	int store_at, new_store;
851 
852 	if (inp->initial_sequence_debug != 0) {
853 		uint32_t ret;
854 
855 		ret = inp->initial_sequence_debug;
856 		inp->initial_sequence_debug++;
857 		return (ret);
858 	}
859 retry:
860 	store_at = inp->store_at;
861 	new_store = store_at + sizeof(uint32_t);
862 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 		new_store = 0;
864 	}
865 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 		goto retry;
867 	}
868 	if (new_store == 0) {
869 		/* Refill the random store */
870 		sctp_fill_random_store(inp);
871 	}
872 	p = &inp->random_store[store_at];
873 	xp = (uint32_t *)p;
874 	x = *xp;
875 	return (x);
876 }
877 
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 	uint32_t x;
882 	struct timeval now;
883 
884 	if (check) {
885 		(void)SCTP_GETTIME_TIMEVAL(&now);
886 	}
887 	for (;;) {
888 		x = sctp_select_initial_TSN(&inp->sctp_ep);
889 		if (x == 0) {
890 			/* we never use 0 */
891 			continue;
892 		}
893 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 			break;
895 		}
896 	}
897 	return (x);
898 }
899 
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 	int32_t user_state;
904 
905 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 		user_state = SCTP_CLOSED;
907 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 		user_state = SCTP_SHUTDOWN_PENDING;
909 	} else {
910 		switch (kernel_state & SCTP_STATE_MASK) {
911 		case SCTP_STATE_EMPTY:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_INUSE:
915 			user_state = SCTP_CLOSED;
916 			break;
917 		case SCTP_STATE_COOKIE_WAIT:
918 			user_state = SCTP_COOKIE_WAIT;
919 			break;
920 		case SCTP_STATE_COOKIE_ECHOED:
921 			user_state = SCTP_COOKIE_ECHOED;
922 			break;
923 		case SCTP_STATE_OPEN:
924 			user_state = SCTP_ESTABLISHED;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_SENT:
927 			user_state = SCTP_SHUTDOWN_SENT;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_RECEIVED:
930 			user_state = SCTP_SHUTDOWN_RECEIVED;
931 			break;
932 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 			user_state = SCTP_SHUTDOWN_ACK_SENT;
934 			break;
935 		default:
936 			user_state = SCTP_CLOSED;
937 			break;
938 		}
939 	}
940 	return (user_state);
941 }
942 
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 	struct sctp_association *asoc;
948 
949 	/*
950 	 * Anything set to zero is taken care of by the allocation routine's
951 	 * bzero
952 	 */
953 
954 	/*
955 	 * Up front select what scoping to apply on addresses I tell my peer
956 	 * Not sure what to do with these right now, we will need to come up
957 	 * with a way to set them. We may need to pass them through from the
958 	 * caller in the sctp_aloc_assoc() function.
959 	 */
960 	int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 #endif
964 
965 	asoc = &stcb->asoc;
966 	/* init all variables to a known value. */
967 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 	asoc->max_burst = inp->sctp_ep.max_burst;
969 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 	asoc->ecn_supported = inp->ecn_supported;
974 	asoc->prsctp_supported = inp->prsctp_supported;
975 	asoc->idata_supported = inp->idata_supported;
976 	asoc->auth_supported = inp->auth_supported;
977 	asoc->asconf_supported = inp->asconf_supported;
978 	asoc->reconfig_supported = inp->reconfig_supported;
979 	asoc->nrsack_supported = inp->nrsack_supported;
980 	asoc->pktdrop_supported = inp->pktdrop_supported;
981 	asoc->idata_supported = inp->idata_supported;
982 	asoc->sctp_cmt_pf = (uint8_t)0;
983 	asoc->sctp_frag_point = inp->sctp_frag_point;
984 	asoc->sctp_features = inp->sctp_features;
985 	asoc->default_dscp = inp->sctp_ep.default_dscp;
986 	asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 	if (inp->sctp_ep.default_flowlabel) {
989 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 	} else {
991 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 			asoc->default_flowlabel &= 0x000fffff;
994 			asoc->default_flowlabel |= 0x80000000;
995 		} else {
996 			asoc->default_flowlabel = 0;
997 		}
998 	}
999 #endif
1000 	asoc->sb_send_resv = 0;
1001 	if (override_tag) {
1002 		asoc->my_vtag = override_tag;
1003 	} else {
1004 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 	}
1006 	/* Get the nonce tags */
1007 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->vrf_id = vrf_id;
1010 
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 	asoc->tsn_in_at = 0;
1013 	asoc->tsn_out_at = 0;
1014 	asoc->tsn_in_wrapped = 0;
1015 	asoc->tsn_out_wrapped = 0;
1016 	asoc->cumack_log_at = 0;
1017 	asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 	asoc->fs_index = 0;
1021 #endif
1022 	asoc->refcnt = 0;
1023 	asoc->assoc_up_sent = 0;
1024 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 	    sctp_select_initial_TSN(&inp->sctp_ep);
1026 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 	/* we are optimisitic here */
1028 	asoc->peer_supports_nat = 0;
1029 	asoc->sent_queue_retran_cnt = 0;
1030 
1031 	/* for CMT */
1032 	asoc->last_net_cmt_send_started = NULL;
1033 
1034 	/* This will need to be adjusted */
1035 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 	asoc->asconf_seq_in = asoc->last_acked_seq;
1038 
1039 	/* here we are different, we hold the next one we expect */
1040 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041 
1042 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044 
1045 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1046 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1047 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1048 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1049 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1050 	asoc->free_chunk_cnt = 0;
1051 
1052 	asoc->iam_blocking = 0;
1053 	asoc->context = inp->sctp_context;
1054 	asoc->local_strreset_support = inp->local_strreset_support;
1055 	asoc->def_send = inp->def_send;
1056 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1057 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1058 	asoc->pr_sctp_cnt = 0;
1059 	asoc->total_output_queue_size = 0;
1060 
1061 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1062 		asoc->scope.ipv6_addr_legal = 1;
1063 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1064 			asoc->scope.ipv4_addr_legal = 1;
1065 		} else {
1066 			asoc->scope.ipv4_addr_legal = 0;
1067 		}
1068 	} else {
1069 		asoc->scope.ipv6_addr_legal = 0;
1070 		asoc->scope.ipv4_addr_legal = 1;
1071 	}
1072 
1073 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1074 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1075 
1076 	asoc->smallest_mtu = inp->sctp_frag_point;
1077 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1078 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1079 
1080 	asoc->stream_locked_on = 0;
1081 	asoc->ecn_echo_cnt_onq = 0;
1082 	asoc->stream_locked = 0;
1083 
1084 	asoc->send_sack = 1;
1085 
1086 	LIST_INIT(&asoc->sctp_restricted_addrs);
1087 
1088 	TAILQ_INIT(&asoc->nets);
1089 	TAILQ_INIT(&asoc->pending_reply_queue);
1090 	TAILQ_INIT(&asoc->asconf_ack_sent);
1091 	/* Setup to fill the hb random cache at first HB */
1092 	asoc->hb_random_idx = 4;
1093 
1094 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1095 
1096 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1097 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1098 
1099 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1100 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1101 
1102 	/*
1103 	 * Now the stream parameters, here we allocate space for all streams
1104 	 * that we request by default.
1105 	 */
1106 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1107 	    o_strms;
1108 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1109 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1110 	    SCTP_M_STRMO);
1111 	if (asoc->strmout == NULL) {
1112 		/* big trouble no memory */
1113 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1114 		return (ENOMEM);
1115 	}
1116 	for (i = 0; i < asoc->streamoutcnt; i++) {
1117 		/*
1118 		 * inbound side must be set to 0xffff, also NOTE when we get
1119 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1120 		 * count (streamoutcnt) but first check if we sent to any of
1121 		 * the upper streams that were dropped (if some were). Those
1122 		 * that were dropped must be notified to the upper layer as
1123 		 * failed to send.
1124 		 */
1125 		asoc->strmout[i].next_mid_ordered = 0;
1126 		asoc->strmout[i].next_mid_unordered = 0;
1127 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1128 		asoc->strmout[i].chunks_on_queues = 0;
1129 #if defined(SCTP_DETAILED_STR_STATS)
1130 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1131 			asoc->strmout[i].abandoned_sent[j] = 0;
1132 			asoc->strmout[i].abandoned_unsent[j] = 0;
1133 		}
1134 #else
1135 		asoc->strmout[i].abandoned_sent[0] = 0;
1136 		asoc->strmout[i].abandoned_unsent[0] = 0;
1137 #endif
1138 		asoc->strmout[i].sid = i;
1139 		asoc->strmout[i].last_msg_incomplete = 0;
1140 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1141 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1142 	}
1143 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1144 
1145 	/* Now the mapping array */
1146 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1147 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1148 	    SCTP_M_MAP);
1149 	if (asoc->mapping_array == NULL) {
1150 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1151 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1152 		return (ENOMEM);
1153 	}
1154 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1155 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1156 	    SCTP_M_MAP);
1157 	if (asoc->nr_mapping_array == NULL) {
1158 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1159 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1160 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1161 		return (ENOMEM);
1162 	}
1163 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1164 
1165 	/* Now the init of the other outqueues */
1166 	TAILQ_INIT(&asoc->free_chunks);
1167 	TAILQ_INIT(&asoc->control_send_queue);
1168 	TAILQ_INIT(&asoc->asconf_send_queue);
1169 	TAILQ_INIT(&asoc->send_queue);
1170 	TAILQ_INIT(&asoc->sent_queue);
1171 	TAILQ_INIT(&asoc->resetHead);
1172 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1173 	TAILQ_INIT(&asoc->asconf_queue);
1174 	/* authentication fields */
1175 	asoc->authinfo.random = NULL;
1176 	asoc->authinfo.active_keyid = 0;
1177 	asoc->authinfo.assoc_key = NULL;
1178 	asoc->authinfo.assoc_keyid = 0;
1179 	asoc->authinfo.recv_key = NULL;
1180 	asoc->authinfo.recv_keyid = 0;
1181 	LIST_INIT(&asoc->shared_keys);
1182 	asoc->marked_retrans = 0;
1183 	asoc->port = inp->sctp_ep.port;
1184 	asoc->timoinit = 0;
1185 	asoc->timodata = 0;
1186 	asoc->timosack = 0;
1187 	asoc->timoshutdown = 0;
1188 	asoc->timoheartbeat = 0;
1189 	asoc->timocookie = 0;
1190 	asoc->timoshutdownack = 0;
1191 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1192 	asoc->discontinuity_time = asoc->start_time;
1193 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1194 		asoc->abandoned_unsent[i] = 0;
1195 		asoc->abandoned_sent[i] = 0;
1196 	}
1197 	/*
1198 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1199 	 * freed later when the association is freed.
1200 	 */
1201 	return (0);
1202 }
1203 
1204 void
1205 sctp_print_mapping_array(struct sctp_association *asoc)
1206 {
1207 	unsigned int i, limit;
1208 
1209 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1210 	    asoc->mapping_array_size,
1211 	    asoc->mapping_array_base_tsn,
1212 	    asoc->cumulative_tsn,
1213 	    asoc->highest_tsn_inside_map,
1214 	    asoc->highest_tsn_inside_nr_map);
1215 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1216 		if (asoc->mapping_array[limit - 1] != 0) {
1217 			break;
1218 		}
1219 	}
1220 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1221 	for (i = 0; i < limit; i++) {
1222 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1223 	}
1224 	if (limit % 16)
1225 		SCTP_PRINTF("\n");
1226 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1227 		if (asoc->nr_mapping_array[limit - 1]) {
1228 			break;
1229 		}
1230 	}
1231 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1232 	for (i = 0; i < limit; i++) {
1233 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1234 	}
1235 	if (limit % 16)
1236 		SCTP_PRINTF("\n");
1237 }
1238 
1239 int
1240 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1241 {
1242 	/* mapping array needs to grow */
1243 	uint8_t *new_array1, *new_array2;
1244 	uint32_t new_size;
1245 
1246 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1247 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1248 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1249 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1250 		/* can't get more, forget it */
1251 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1252 		if (new_array1) {
1253 			SCTP_FREE(new_array1, SCTP_M_MAP);
1254 		}
1255 		if (new_array2) {
1256 			SCTP_FREE(new_array2, SCTP_M_MAP);
1257 		}
1258 		return (-1);
1259 	}
1260 	memset(new_array1, 0, new_size);
1261 	memset(new_array2, 0, new_size);
1262 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1263 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1264 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1265 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1266 	asoc->mapping_array = new_array1;
1267 	asoc->nr_mapping_array = new_array2;
1268 	asoc->mapping_array_size = new_size;
1269 	return (0);
1270 }
1271 
1272 
1273 static void
1274 sctp_iterator_work(struct sctp_iterator *it)
1275 {
1276 	int iteration_count = 0;
1277 	int inp_skip = 0;
1278 	int first_in = 1;
1279 	struct sctp_inpcb *tinp;
1280 
1281 	SCTP_INP_INFO_RLOCK();
1282 	SCTP_ITERATOR_LOCK();
1283 	sctp_it_ctl.cur_it = it;
1284 	if (it->inp) {
1285 		SCTP_INP_RLOCK(it->inp);
1286 		SCTP_INP_DECR_REF(it->inp);
1287 	}
1288 	if (it->inp == NULL) {
1289 		/* iterator is complete */
1290 done_with_iterator:
1291 		sctp_it_ctl.cur_it = NULL;
1292 		SCTP_ITERATOR_UNLOCK();
1293 		SCTP_INP_INFO_RUNLOCK();
1294 		if (it->function_atend != NULL) {
1295 			(*it->function_atend) (it->pointer, it->val);
1296 		}
1297 		SCTP_FREE(it, SCTP_M_ITER);
1298 		return;
1299 	}
1300 select_a_new_ep:
1301 	if (first_in) {
1302 		first_in = 0;
1303 	} else {
1304 		SCTP_INP_RLOCK(it->inp);
1305 	}
1306 	while (((it->pcb_flags) &&
1307 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1308 	    ((it->pcb_features) &&
1309 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1310 		/* endpoint flags or features don't match, so keep looking */
1311 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1312 			SCTP_INP_RUNLOCK(it->inp);
1313 			goto done_with_iterator;
1314 		}
1315 		tinp = it->inp;
1316 		it->inp = LIST_NEXT(it->inp, sctp_list);
1317 		SCTP_INP_RUNLOCK(tinp);
1318 		if (it->inp == NULL) {
1319 			goto done_with_iterator;
1320 		}
1321 		SCTP_INP_RLOCK(it->inp);
1322 	}
1323 	/* now go through each assoc which is in the desired state */
1324 	if (it->done_current_ep == 0) {
1325 		if (it->function_inp != NULL)
1326 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1327 		it->done_current_ep = 1;
1328 	}
1329 	if (it->stcb == NULL) {
1330 		/* run the per instance function */
1331 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1332 	}
1333 	if ((inp_skip) || it->stcb == NULL) {
1334 		if (it->function_inp_end != NULL) {
1335 			inp_skip = (*it->function_inp_end) (it->inp,
1336 			    it->pointer,
1337 			    it->val);
1338 		}
1339 		SCTP_INP_RUNLOCK(it->inp);
1340 		goto no_stcb;
1341 	}
1342 	while (it->stcb) {
1343 		SCTP_TCB_LOCK(it->stcb);
1344 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1345 			/* not in the right state... keep looking */
1346 			SCTP_TCB_UNLOCK(it->stcb);
1347 			goto next_assoc;
1348 		}
1349 		/* see if we have limited out the iterator loop */
1350 		iteration_count++;
1351 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1352 			/* Pause to let others grab the lock */
1353 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1354 			SCTP_TCB_UNLOCK(it->stcb);
1355 			SCTP_INP_INCR_REF(it->inp);
1356 			SCTP_INP_RUNLOCK(it->inp);
1357 			SCTP_ITERATOR_UNLOCK();
1358 			SCTP_INP_INFO_RUNLOCK();
1359 			SCTP_INP_INFO_RLOCK();
1360 			SCTP_ITERATOR_LOCK();
1361 			if (sctp_it_ctl.iterator_flags) {
1362 				/* We won't be staying here */
1363 				SCTP_INP_DECR_REF(it->inp);
1364 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1365 				if (sctp_it_ctl.iterator_flags &
1366 				    SCTP_ITERATOR_STOP_CUR_IT) {
1367 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1368 					goto done_with_iterator;
1369 				}
1370 				if (sctp_it_ctl.iterator_flags &
1371 				    SCTP_ITERATOR_STOP_CUR_INP) {
1372 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1373 					goto no_stcb;
1374 				}
1375 				/* If we reach here huh? */
1376 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1377 				    sctp_it_ctl.iterator_flags);
1378 				sctp_it_ctl.iterator_flags = 0;
1379 			}
1380 			SCTP_INP_RLOCK(it->inp);
1381 			SCTP_INP_DECR_REF(it->inp);
1382 			SCTP_TCB_LOCK(it->stcb);
1383 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1384 			iteration_count = 0;
1385 		}
1386 		/* run function on this one */
1387 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1388 
1389 		/*
1390 		 * we lie here, it really needs to have its own type but
1391 		 * first I must verify that this won't effect things :-0
1392 		 */
1393 		if (it->no_chunk_output == 0)
1394 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1395 
1396 		SCTP_TCB_UNLOCK(it->stcb);
1397 next_assoc:
1398 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1399 		if (it->stcb == NULL) {
1400 			/* Run last function */
1401 			if (it->function_inp_end != NULL) {
1402 				inp_skip = (*it->function_inp_end) (it->inp,
1403 				    it->pointer,
1404 				    it->val);
1405 			}
1406 		}
1407 	}
1408 	SCTP_INP_RUNLOCK(it->inp);
1409 no_stcb:
1410 	/* done with all assocs on this endpoint, move on to next endpoint */
1411 	it->done_current_ep = 0;
1412 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1413 		it->inp = NULL;
1414 	} else {
1415 		it->inp = LIST_NEXT(it->inp, sctp_list);
1416 	}
1417 	if (it->inp == NULL) {
1418 		goto done_with_iterator;
1419 	}
1420 	goto select_a_new_ep;
1421 }
1422 
1423 void
1424 sctp_iterator_worker(void)
1425 {
1426 	struct sctp_iterator *it, *nit;
1427 
1428 	/* This function is called with the WQ lock in place */
1429 
1430 	sctp_it_ctl.iterator_running = 1;
1431 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1432 		/* now lets work on this one */
1433 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1434 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1435 		CURVNET_SET(it->vn);
1436 		sctp_iterator_work(it);
1437 		CURVNET_RESTORE();
1438 		SCTP_IPI_ITERATOR_WQ_LOCK();
1439 		/* sa_ignore FREED_MEMORY */
1440 	}
1441 	sctp_it_ctl.iterator_running = 0;
1442 	return;
1443 }
1444 
1445 
1446 static void
1447 sctp_handle_addr_wq(void)
1448 {
1449 	/* deal with the ADDR wq from the rtsock calls */
1450 	struct sctp_laddr *wi, *nwi;
1451 	struct sctp_asconf_iterator *asc;
1452 
1453 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1454 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1455 	if (asc == NULL) {
1456 		/* Try later, no memory */
1457 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1458 		    (struct sctp_inpcb *)NULL,
1459 		    (struct sctp_tcb *)NULL,
1460 		    (struct sctp_nets *)NULL);
1461 		return;
1462 	}
1463 	LIST_INIT(&asc->list_of_work);
1464 	asc->cnt = 0;
1465 
1466 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1467 		LIST_REMOVE(wi, sctp_nxt_addr);
1468 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1469 		asc->cnt++;
1470 	}
1471 
1472 	if (asc->cnt == 0) {
1473 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1474 	} else {
1475 		int ret;
1476 
1477 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1478 		    sctp_asconf_iterator_stcb,
1479 		    NULL,	/* No ep end for boundall */
1480 		    SCTP_PCB_FLAGS_BOUNDALL,
1481 		    SCTP_PCB_ANY_FEATURES,
1482 		    SCTP_ASOC_ANY_STATE,
1483 		    (void *)asc, 0,
1484 		    sctp_asconf_iterator_end, NULL, 0);
1485 		if (ret) {
1486 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1487 			/*
1488 			 * Freeing if we are stopping or put back on the
1489 			 * addr_wq.
1490 			 */
1491 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1492 				sctp_asconf_iterator_end(asc, 0);
1493 			} else {
1494 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1495 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1496 				}
1497 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1498 			}
1499 		}
1500 	}
1501 }
1502 
1503 void
1504 sctp_timeout_handler(void *t)
1505 {
1506 	struct sctp_inpcb *inp;
1507 	struct sctp_tcb *stcb;
1508 	struct sctp_nets *net;
1509 	struct sctp_timer *tmr;
1510 	struct mbuf *op_err;
1511 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1512 	struct socket *so;
1513 #endif
1514 	int did_output;
1515 	int type;
1516 
1517 	tmr = (struct sctp_timer *)t;
1518 	inp = (struct sctp_inpcb *)tmr->ep;
1519 	stcb = (struct sctp_tcb *)tmr->tcb;
1520 	net = (struct sctp_nets *)tmr->net;
1521 	CURVNET_SET((struct vnet *)tmr->vnet);
1522 	did_output = 1;
1523 
1524 #ifdef SCTP_AUDITING_ENABLED
1525 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1526 	sctp_auditing(3, inp, stcb, net);
1527 #endif
1528 
1529 	/* sanity checks... */
1530 	if (tmr->self != (void *)tmr) {
1531 		/*
1532 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1533 		 * (void *)tmr);
1534 		 */
1535 		CURVNET_RESTORE();
1536 		return;
1537 	}
1538 	tmr->stopped_from = 0xa001;
1539 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1540 		/*
1541 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1542 		 * tmr->type);
1543 		 */
1544 		CURVNET_RESTORE();
1545 		return;
1546 	}
1547 	tmr->stopped_from = 0xa002;
1548 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1549 		CURVNET_RESTORE();
1550 		return;
1551 	}
1552 	/* if this is an iterator timeout, get the struct and clear inp */
1553 	tmr->stopped_from = 0xa003;
1554 	if (inp) {
1555 		SCTP_INP_INCR_REF(inp);
1556 		if ((inp->sctp_socket == NULL) &&
1557 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1558 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1559 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1560 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1566 			SCTP_INP_DECR_REF(inp);
1567 			CURVNET_RESTORE();
1568 			return;
1569 		}
1570 	}
1571 	tmr->stopped_from = 0xa004;
1572 	if (stcb) {
1573 		atomic_add_int(&stcb->asoc.refcnt, 1);
1574 		if (stcb->asoc.state == 0) {
1575 			atomic_add_int(&stcb->asoc.refcnt, -1);
1576 			if (inp) {
1577 				SCTP_INP_DECR_REF(inp);
1578 			}
1579 			CURVNET_RESTORE();
1580 			return;
1581 		}
1582 	}
1583 	type = tmr->type;
1584 	tmr->stopped_from = 0xa005;
1585 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1586 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1587 		if (inp) {
1588 			SCTP_INP_DECR_REF(inp);
1589 		}
1590 		if (stcb) {
1591 			atomic_add_int(&stcb->asoc.refcnt, -1);
1592 		}
1593 		CURVNET_RESTORE();
1594 		return;
1595 	}
1596 	tmr->stopped_from = 0xa006;
1597 
1598 	if (stcb) {
1599 		SCTP_TCB_LOCK(stcb);
1600 		atomic_add_int(&stcb->asoc.refcnt, -1);
1601 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1602 		    ((stcb->asoc.state == 0) ||
1603 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1604 			SCTP_TCB_UNLOCK(stcb);
1605 			if (inp) {
1606 				SCTP_INP_DECR_REF(inp);
1607 			}
1608 			CURVNET_RESTORE();
1609 			return;
1610 		}
1611 	} else if (inp != NULL) {
1612 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1613 			SCTP_INP_WLOCK(inp);
1614 		}
1615 	} else {
1616 		SCTP_WQ_ADDR_LOCK();
1617 	}
1618 	/* record in stopped what t-o occurred */
1619 	tmr->stopped_from = type;
1620 
1621 	/* mark as being serviced now */
1622 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1623 		/*
1624 		 * Callout has been rescheduled.
1625 		 */
1626 		goto get_out;
1627 	}
1628 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1629 		/*
1630 		 * Not active, so no action.
1631 		 */
1632 		goto get_out;
1633 	}
1634 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1635 
1636 	/* call the handler for the appropriate timer type */
1637 	switch (type) {
1638 	case SCTP_TIMER_TYPE_ADDR_WQ:
1639 		sctp_handle_addr_wq();
1640 		break;
1641 	case SCTP_TIMER_TYPE_SEND:
1642 		if ((stcb == NULL) || (inp == NULL)) {
1643 			break;
1644 		}
1645 		SCTP_STAT_INCR(sctps_timodata);
1646 		stcb->asoc.timodata++;
1647 		stcb->asoc.num_send_timers_up--;
1648 		if (stcb->asoc.num_send_timers_up < 0) {
1649 			stcb->asoc.num_send_timers_up = 0;
1650 		}
1651 		SCTP_TCB_LOCK_ASSERT(stcb);
1652 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1653 			/* no need to unlock on tcb its gone */
1654 
1655 			goto out_decr;
1656 		}
1657 		SCTP_TCB_LOCK_ASSERT(stcb);
1658 #ifdef SCTP_AUDITING_ENABLED
1659 		sctp_auditing(4, inp, stcb, net);
1660 #endif
1661 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1662 		if ((stcb->asoc.num_send_timers_up == 0) &&
1663 		    (stcb->asoc.sent_queue_cnt > 0)) {
1664 			struct sctp_tmit_chunk *chk;
1665 
1666 			/*
1667 			 * safeguard. If there on some on the sent queue
1668 			 * somewhere but no timers running something is
1669 			 * wrong... so we start a timer on the first chunk
1670 			 * on the send queue on whatever net it is sent to.
1671 			 */
1672 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1673 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1674 			    chk->whoTo);
1675 		}
1676 		break;
1677 	case SCTP_TIMER_TYPE_INIT:
1678 		if ((stcb == NULL) || (inp == NULL)) {
1679 			break;
1680 		}
1681 		SCTP_STAT_INCR(sctps_timoinit);
1682 		stcb->asoc.timoinit++;
1683 		if (sctp_t1init_timer(inp, stcb, net)) {
1684 			/* no need to unlock on tcb its gone */
1685 			goto out_decr;
1686 		}
1687 		/* We do output but not here */
1688 		did_output = 0;
1689 		break;
1690 	case SCTP_TIMER_TYPE_RECV:
1691 		if ((stcb == NULL) || (inp == NULL)) {
1692 			break;
1693 		}
1694 		SCTP_STAT_INCR(sctps_timosack);
1695 		stcb->asoc.timosack++;
1696 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1697 #ifdef SCTP_AUDITING_ENABLED
1698 		sctp_auditing(4, inp, stcb, net);
1699 #endif
1700 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1701 		break;
1702 	case SCTP_TIMER_TYPE_SHUTDOWN:
1703 		if ((stcb == NULL) || (inp == NULL)) {
1704 			break;
1705 		}
1706 		if (sctp_shutdown_timer(inp, stcb, net)) {
1707 			/* no need to unlock on tcb its gone */
1708 			goto out_decr;
1709 		}
1710 		SCTP_STAT_INCR(sctps_timoshutdown);
1711 		stcb->asoc.timoshutdown++;
1712 #ifdef SCTP_AUDITING_ENABLED
1713 		sctp_auditing(4, inp, stcb, net);
1714 #endif
1715 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1716 		break;
1717 	case SCTP_TIMER_TYPE_HEARTBEAT:
1718 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1719 			break;
1720 		}
1721 		SCTP_STAT_INCR(sctps_timoheartbeat);
1722 		stcb->asoc.timoheartbeat++;
1723 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1724 			/* no need to unlock on tcb its gone */
1725 			goto out_decr;
1726 		}
1727 #ifdef SCTP_AUDITING_ENABLED
1728 		sctp_auditing(4, inp, stcb, net);
1729 #endif
1730 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1731 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1732 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1733 		}
1734 		break;
1735 	case SCTP_TIMER_TYPE_COOKIE:
1736 		if ((stcb == NULL) || (inp == NULL)) {
1737 			break;
1738 		}
1739 		if (sctp_cookie_timer(inp, stcb, net)) {
1740 			/* no need to unlock on tcb its gone */
1741 			goto out_decr;
1742 		}
1743 		SCTP_STAT_INCR(sctps_timocookie);
1744 		stcb->asoc.timocookie++;
1745 #ifdef SCTP_AUDITING_ENABLED
1746 		sctp_auditing(4, inp, stcb, net);
1747 #endif
1748 		/*
1749 		 * We consider T3 and Cookie timer pretty much the same with
1750 		 * respect to where from in chunk_output.
1751 		 */
1752 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1753 		break;
1754 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1755 		{
1756 			struct timeval tv;
1757 			int i, secret;
1758 
1759 			if (inp == NULL) {
1760 				break;
1761 			}
1762 			SCTP_STAT_INCR(sctps_timosecret);
1763 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1764 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1765 			inp->sctp_ep.last_secret_number =
1766 			    inp->sctp_ep.current_secret_number;
1767 			inp->sctp_ep.current_secret_number++;
1768 			if (inp->sctp_ep.current_secret_number >=
1769 			    SCTP_HOW_MANY_SECRETS) {
1770 				inp->sctp_ep.current_secret_number = 0;
1771 			}
1772 			secret = (int)inp->sctp_ep.current_secret_number;
1773 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1774 				inp->sctp_ep.secret_key[secret][i] =
1775 				    sctp_select_initial_TSN(&inp->sctp_ep);
1776 			}
1777 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1778 		}
1779 		did_output = 0;
1780 		break;
1781 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1782 		if ((stcb == NULL) || (inp == NULL)) {
1783 			break;
1784 		}
1785 		SCTP_STAT_INCR(sctps_timopathmtu);
1786 		sctp_pathmtu_timer(inp, stcb, net);
1787 		did_output = 0;
1788 		break;
1789 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1790 		if ((stcb == NULL) || (inp == NULL)) {
1791 			break;
1792 		}
1793 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1794 			/* no need to unlock on tcb its gone */
1795 			goto out_decr;
1796 		}
1797 		SCTP_STAT_INCR(sctps_timoshutdownack);
1798 		stcb->asoc.timoshutdownack++;
1799 #ifdef SCTP_AUDITING_ENABLED
1800 		sctp_auditing(4, inp, stcb, net);
1801 #endif
1802 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1803 		break;
1804 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1809 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1810 		    "Shutdown guard timer expired");
1811 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1812 		/* no need to unlock on tcb its gone */
1813 		goto out_decr;
1814 
1815 	case SCTP_TIMER_TYPE_STRRESET:
1816 		if ((stcb == NULL) || (inp == NULL)) {
1817 			break;
1818 		}
1819 		if (sctp_strreset_timer(inp, stcb, net)) {
1820 			/* no need to unlock on tcb its gone */
1821 			goto out_decr;
1822 		}
1823 		SCTP_STAT_INCR(sctps_timostrmrst);
1824 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1825 		break;
1826 	case SCTP_TIMER_TYPE_ASCONF:
1827 		if ((stcb == NULL) || (inp == NULL)) {
1828 			break;
1829 		}
1830 		if (sctp_asconf_timer(inp, stcb, net)) {
1831 			/* no need to unlock on tcb its gone */
1832 			goto out_decr;
1833 		}
1834 		SCTP_STAT_INCR(sctps_timoasconf);
1835 #ifdef SCTP_AUDITING_ENABLED
1836 		sctp_auditing(4, inp, stcb, net);
1837 #endif
1838 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1839 		break;
1840 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1841 		if ((stcb == NULL) || (inp == NULL)) {
1842 			break;
1843 		}
1844 		sctp_delete_prim_timer(inp, stcb, net);
1845 		SCTP_STAT_INCR(sctps_timodelprim);
1846 		break;
1847 
1848 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1849 		if ((stcb == NULL) || (inp == NULL)) {
1850 			break;
1851 		}
1852 		SCTP_STAT_INCR(sctps_timoautoclose);
1853 		sctp_autoclose_timer(inp, stcb, net);
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1855 		did_output = 0;
1856 		break;
1857 	case SCTP_TIMER_TYPE_ASOCKILL:
1858 		if ((stcb == NULL) || (inp == NULL)) {
1859 			break;
1860 		}
1861 		SCTP_STAT_INCR(sctps_timoassockill);
1862 		/* Can we free it yet? */
1863 		SCTP_INP_DECR_REF(inp);
1864 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1865 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1866 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1867 		so = SCTP_INP_SO(inp);
1868 		atomic_add_int(&stcb->asoc.refcnt, 1);
1869 		SCTP_TCB_UNLOCK(stcb);
1870 		SCTP_SOCKET_LOCK(so, 1);
1871 		SCTP_TCB_LOCK(stcb);
1872 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1873 #endif
1874 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1875 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1877 		SCTP_SOCKET_UNLOCK(so, 1);
1878 #endif
1879 		/*
1880 		 * free asoc, always unlocks (or destroy's) so prevent
1881 		 * duplicate unlock or unlock of a free mtx :-0
1882 		 */
1883 		stcb = NULL;
1884 		goto out_no_decr;
1885 	case SCTP_TIMER_TYPE_INPKILL:
1886 		SCTP_STAT_INCR(sctps_timoinpkill);
1887 		if (inp == NULL) {
1888 			break;
1889 		}
1890 		/*
1891 		 * special case, take away our increment since WE are the
1892 		 * killer
1893 		 */
1894 		SCTP_INP_DECR_REF(inp);
1895 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1896 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1897 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1898 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1899 		inp = NULL;
1900 		goto out_no_decr;
1901 	default:
1902 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1903 		    type);
1904 		break;
1905 	}
1906 #ifdef SCTP_AUDITING_ENABLED
1907 	sctp_audit_log(0xF1, (uint8_t)type);
1908 	if (inp)
1909 		sctp_auditing(5, inp, stcb, net);
1910 #endif
1911 	if ((did_output) && stcb) {
1912 		/*
1913 		 * Now we need to clean up the control chunk chain if an
1914 		 * ECNE is on it. It must be marked as UNSENT again so next
1915 		 * call will continue to send it until such time that we get
1916 		 * a CWR, to remove it. It is, however, less likely that we
1917 		 * will find a ecn echo on the chain though.
1918 		 */
1919 		sctp_fix_ecn_echo(&stcb->asoc);
1920 	}
1921 get_out:
1922 	if (stcb) {
1923 		SCTP_TCB_UNLOCK(stcb);
1924 	} else if (inp != NULL) {
1925 		SCTP_INP_WUNLOCK(inp);
1926 	} else {
1927 		SCTP_WQ_ADDR_UNLOCK();
1928 	}
1929 
1930 out_decr:
1931 	if (inp) {
1932 		SCTP_INP_DECR_REF(inp);
1933 	}
1934 out_no_decr:
1935 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1936 	CURVNET_RESTORE();
1937 }
1938 
1939 void
1940 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1941     struct sctp_nets *net)
1942 {
1943 	uint32_t to_ticks;
1944 	struct sctp_timer *tmr;
1945 
1946 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1947 		return;
1948 
1949 	tmr = NULL;
1950 	if (stcb) {
1951 		SCTP_TCB_LOCK_ASSERT(stcb);
1952 	}
1953 	switch (t_type) {
1954 	case SCTP_TIMER_TYPE_ADDR_WQ:
1955 		/* Only 1 tick away :-) */
1956 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1957 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1958 		break;
1959 	case SCTP_TIMER_TYPE_SEND:
1960 		/* Here we use the RTO timer */
1961 		{
1962 			int rto_val;
1963 
1964 			if ((stcb == NULL) || (net == NULL)) {
1965 				return;
1966 			}
1967 			tmr = &net->rxt_timer;
1968 			if (net->RTO == 0) {
1969 				rto_val = stcb->asoc.initial_rto;
1970 			} else {
1971 				rto_val = net->RTO;
1972 			}
1973 			to_ticks = MSEC_TO_TICKS(rto_val);
1974 		}
1975 		break;
1976 	case SCTP_TIMER_TYPE_INIT:
1977 		/*
1978 		 * Here we use the INIT timer default usually about 1
1979 		 * minute.
1980 		 */
1981 		if ((stcb == NULL) || (net == NULL)) {
1982 			return;
1983 		}
1984 		tmr = &net->rxt_timer;
1985 		if (net->RTO == 0) {
1986 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1987 		} else {
1988 			to_ticks = MSEC_TO_TICKS(net->RTO);
1989 		}
1990 		break;
1991 	case SCTP_TIMER_TYPE_RECV:
1992 		/*
1993 		 * Here we use the Delayed-Ack timer value from the inp
1994 		 * ususually about 200ms.
1995 		 */
1996 		if (stcb == NULL) {
1997 			return;
1998 		}
1999 		tmr = &stcb->asoc.dack_timer;
2000 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2001 		break;
2002 	case SCTP_TIMER_TYPE_SHUTDOWN:
2003 		/* Here we use the RTO of the destination. */
2004 		if ((stcb == NULL) || (net == NULL)) {
2005 			return;
2006 		}
2007 		if (net->RTO == 0) {
2008 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2009 		} else {
2010 			to_ticks = MSEC_TO_TICKS(net->RTO);
2011 		}
2012 		tmr = &net->rxt_timer;
2013 		break;
2014 	case SCTP_TIMER_TYPE_HEARTBEAT:
2015 		/*
2016 		 * the net is used here so that we can add in the RTO. Even
2017 		 * though we use a different timer. We also add the HB timer
2018 		 * PLUS a random jitter.
2019 		 */
2020 		if ((stcb == NULL) || (net == NULL)) {
2021 			return;
2022 		} else {
2023 			uint32_t rndval;
2024 			uint32_t jitter;
2025 
2026 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2027 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2028 				return;
2029 			}
2030 			if (net->RTO == 0) {
2031 				to_ticks = stcb->asoc.initial_rto;
2032 			} else {
2033 				to_ticks = net->RTO;
2034 			}
2035 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2036 			jitter = rndval % to_ticks;
2037 			if (jitter >= (to_ticks >> 1)) {
2038 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2039 			} else {
2040 				to_ticks = to_ticks - jitter;
2041 			}
2042 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2043 			    !(net->dest_state & SCTP_ADDR_PF)) {
2044 				to_ticks += net->heart_beat_delay;
2045 			}
2046 			/*
2047 			 * Now we must convert the to_ticks that are now in
2048 			 * ms to ticks.
2049 			 */
2050 			to_ticks = MSEC_TO_TICKS(to_ticks);
2051 			tmr = &net->hb_timer;
2052 		}
2053 		break;
2054 	case SCTP_TIMER_TYPE_COOKIE:
2055 		/*
2056 		 * Here we can use the RTO timer from the network since one
2057 		 * RTT was compelete. If a retran happened then we will be
2058 		 * using the RTO initial value.
2059 		 */
2060 		if ((stcb == NULL) || (net == NULL)) {
2061 			return;
2062 		}
2063 		if (net->RTO == 0) {
2064 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2065 		} else {
2066 			to_ticks = MSEC_TO_TICKS(net->RTO);
2067 		}
2068 		tmr = &net->rxt_timer;
2069 		break;
2070 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2071 		/*
2072 		 * nothing needed but the endpoint here ususually about 60
2073 		 * minutes.
2074 		 */
2075 		tmr = &inp->sctp_ep.signature_change;
2076 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2077 		break;
2078 	case SCTP_TIMER_TYPE_ASOCKILL:
2079 		if (stcb == NULL) {
2080 			return;
2081 		}
2082 		tmr = &stcb->asoc.strreset_timer;
2083 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2084 		break;
2085 	case SCTP_TIMER_TYPE_INPKILL:
2086 		/*
2087 		 * The inp is setup to die. We re-use the signature_chage
2088 		 * timer since that has stopped and we are in the GONE
2089 		 * state.
2090 		 */
2091 		tmr = &inp->sctp_ep.signature_change;
2092 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2093 		break;
2094 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2095 		/*
2096 		 * Here we use the value found in the EP for PMTU ususually
2097 		 * about 10 minutes.
2098 		 */
2099 		if ((stcb == NULL) || (net == NULL)) {
2100 			return;
2101 		}
2102 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2103 			return;
2104 		}
2105 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2106 		tmr = &net->pmtu_timer;
2107 		break;
2108 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2109 		/* Here we use the RTO of the destination */
2110 		if ((stcb == NULL) || (net == NULL)) {
2111 			return;
2112 		}
2113 		if (net->RTO == 0) {
2114 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2115 		} else {
2116 			to_ticks = MSEC_TO_TICKS(net->RTO);
2117 		}
2118 		tmr = &net->rxt_timer;
2119 		break;
2120 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2121 		/*
2122 		 * Here we use the endpoints shutdown guard timer usually
2123 		 * about 3 minutes.
2124 		 */
2125 		if (stcb == NULL) {
2126 			return;
2127 		}
2128 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2129 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2130 		} else {
2131 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2132 		}
2133 		tmr = &stcb->asoc.shut_guard_timer;
2134 		break;
2135 	case SCTP_TIMER_TYPE_STRRESET:
2136 		/*
2137 		 * Here the timer comes from the stcb but its value is from
2138 		 * the net's RTO.
2139 		 */
2140 		if ((stcb == NULL) || (net == NULL)) {
2141 			return;
2142 		}
2143 		if (net->RTO == 0) {
2144 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2145 		} else {
2146 			to_ticks = MSEC_TO_TICKS(net->RTO);
2147 		}
2148 		tmr = &stcb->asoc.strreset_timer;
2149 		break;
2150 	case SCTP_TIMER_TYPE_ASCONF:
2151 		/*
2152 		 * Here the timer comes from the stcb but its value is from
2153 		 * the net's RTO.
2154 		 */
2155 		if ((stcb == NULL) || (net == NULL)) {
2156 			return;
2157 		}
2158 		if (net->RTO == 0) {
2159 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2160 		} else {
2161 			to_ticks = MSEC_TO_TICKS(net->RTO);
2162 		}
2163 		tmr = &stcb->asoc.asconf_timer;
2164 		break;
2165 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2166 		if ((stcb == NULL) || (net != NULL)) {
2167 			return;
2168 		}
2169 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2170 		tmr = &stcb->asoc.delete_prim_timer;
2171 		break;
2172 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2173 		if (stcb == NULL) {
2174 			return;
2175 		}
2176 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2177 			/*
2178 			 * Really an error since stcb is NOT set to
2179 			 * autoclose
2180 			 */
2181 			return;
2182 		}
2183 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2184 		tmr = &stcb->asoc.autoclose_timer;
2185 		break;
2186 	default:
2187 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2188 		    __func__, t_type);
2189 		return;
2190 		break;
2191 	}
2192 	if ((to_ticks <= 0) || (tmr == NULL)) {
2193 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2194 		    __func__, t_type, to_ticks, (void *)tmr);
2195 		return;
2196 	}
2197 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2198 		/*
2199 		 * we do NOT allow you to have it already running. if it is
2200 		 * we leave the current one up unchanged
2201 		 */
2202 		return;
2203 	}
2204 	/* At this point we can proceed */
2205 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2206 		stcb->asoc.num_send_timers_up++;
2207 	}
2208 	tmr->stopped_from = 0;
2209 	tmr->type = t_type;
2210 	tmr->ep = (void *)inp;
2211 	tmr->tcb = (void *)stcb;
2212 	tmr->net = (void *)net;
2213 	tmr->self = (void *)tmr;
2214 	tmr->vnet = (void *)curvnet;
2215 	tmr->ticks = sctp_get_tick_count();
2216 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2217 	return;
2218 }
2219 
2220 void
2221 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2222     struct sctp_nets *net, uint32_t from)
2223 {
2224 	struct sctp_timer *tmr;
2225 
2226 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2227 	    (inp == NULL))
2228 		return;
2229 
2230 	tmr = NULL;
2231 	if (stcb) {
2232 		SCTP_TCB_LOCK_ASSERT(stcb);
2233 	}
2234 	switch (t_type) {
2235 	case SCTP_TIMER_TYPE_ADDR_WQ:
2236 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2237 		break;
2238 	case SCTP_TIMER_TYPE_SEND:
2239 		if ((stcb == NULL) || (net == NULL)) {
2240 			return;
2241 		}
2242 		tmr = &net->rxt_timer;
2243 		break;
2244 	case SCTP_TIMER_TYPE_INIT:
2245 		if ((stcb == NULL) || (net == NULL)) {
2246 			return;
2247 		}
2248 		tmr = &net->rxt_timer;
2249 		break;
2250 	case SCTP_TIMER_TYPE_RECV:
2251 		if (stcb == NULL) {
2252 			return;
2253 		}
2254 		tmr = &stcb->asoc.dack_timer;
2255 		break;
2256 	case SCTP_TIMER_TYPE_SHUTDOWN:
2257 		if ((stcb == NULL) || (net == NULL)) {
2258 			return;
2259 		}
2260 		tmr = &net->rxt_timer;
2261 		break;
2262 	case SCTP_TIMER_TYPE_HEARTBEAT:
2263 		if ((stcb == NULL) || (net == NULL)) {
2264 			return;
2265 		}
2266 		tmr = &net->hb_timer;
2267 		break;
2268 	case SCTP_TIMER_TYPE_COOKIE:
2269 		if ((stcb == NULL) || (net == NULL)) {
2270 			return;
2271 		}
2272 		tmr = &net->rxt_timer;
2273 		break;
2274 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2275 		/* nothing needed but the endpoint here */
2276 		tmr = &inp->sctp_ep.signature_change;
2277 		/*
2278 		 * We re-use the newcookie timer for the INP kill timer. We
2279 		 * must assure that we do not kill it by accident.
2280 		 */
2281 		break;
2282 	case SCTP_TIMER_TYPE_ASOCKILL:
2283 		/*
2284 		 * Stop the asoc kill timer.
2285 		 */
2286 		if (stcb == NULL) {
2287 			return;
2288 		}
2289 		tmr = &stcb->asoc.strreset_timer;
2290 		break;
2291 
2292 	case SCTP_TIMER_TYPE_INPKILL:
2293 		/*
2294 		 * The inp is setup to die. We re-use the signature_chage
2295 		 * timer since that has stopped and we are in the GONE
2296 		 * state.
2297 		 */
2298 		tmr = &inp->sctp_ep.signature_change;
2299 		break;
2300 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2301 		if ((stcb == NULL) || (net == NULL)) {
2302 			return;
2303 		}
2304 		tmr = &net->pmtu_timer;
2305 		break;
2306 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2307 		if ((stcb == NULL) || (net == NULL)) {
2308 			return;
2309 		}
2310 		tmr = &net->rxt_timer;
2311 		break;
2312 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2313 		if (stcb == NULL) {
2314 			return;
2315 		}
2316 		tmr = &stcb->asoc.shut_guard_timer;
2317 		break;
2318 	case SCTP_TIMER_TYPE_STRRESET:
2319 		if (stcb == NULL) {
2320 			return;
2321 		}
2322 		tmr = &stcb->asoc.strreset_timer;
2323 		break;
2324 	case SCTP_TIMER_TYPE_ASCONF:
2325 		if (stcb == NULL) {
2326 			return;
2327 		}
2328 		tmr = &stcb->asoc.asconf_timer;
2329 		break;
2330 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2331 		if (stcb == NULL) {
2332 			return;
2333 		}
2334 		tmr = &stcb->asoc.delete_prim_timer;
2335 		break;
2336 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2337 		if (stcb == NULL) {
2338 			return;
2339 		}
2340 		tmr = &stcb->asoc.autoclose_timer;
2341 		break;
2342 	default:
2343 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2344 		    __func__, t_type);
2345 		break;
2346 	}
2347 	if (tmr == NULL) {
2348 		return;
2349 	}
2350 	if ((tmr->type != t_type) && tmr->type) {
2351 		/*
2352 		 * Ok we have a timer that is under joint use. Cookie timer
2353 		 * per chance with the SEND timer. We therefore are NOT
2354 		 * running the timer that the caller wants stopped.  So just
2355 		 * return.
2356 		 */
2357 		return;
2358 	}
2359 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2360 		stcb->asoc.num_send_timers_up--;
2361 		if (stcb->asoc.num_send_timers_up < 0) {
2362 			stcb->asoc.num_send_timers_up = 0;
2363 		}
2364 	}
2365 	tmr->self = NULL;
2366 	tmr->stopped_from = from;
2367 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2368 	return;
2369 }
2370 
2371 uint32_t
2372 sctp_calculate_len(struct mbuf *m)
2373 {
2374 	uint32_t tlen = 0;
2375 	struct mbuf *at;
2376 
2377 	at = m;
2378 	while (at) {
2379 		tlen += SCTP_BUF_LEN(at);
2380 		at = SCTP_BUF_NEXT(at);
2381 	}
2382 	return (tlen);
2383 }
2384 
2385 void
2386 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2387     struct sctp_association *asoc, uint32_t mtu)
2388 {
2389 	/*
2390 	 * Reset the P-MTU size on this association, this involves changing
2391 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2392 	 * allow the DF flag to be cleared.
2393 	 */
2394 	struct sctp_tmit_chunk *chk;
2395 	unsigned int eff_mtu, ovh;
2396 
2397 	asoc->smallest_mtu = mtu;
2398 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2399 		ovh = SCTP_MIN_OVERHEAD;
2400 	} else {
2401 		ovh = SCTP_MIN_V4_OVERHEAD;
2402 	}
2403 	eff_mtu = mtu - ovh;
2404 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2405 		if (chk->send_size > eff_mtu) {
2406 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2407 		}
2408 	}
2409 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2410 		if (chk->send_size > eff_mtu) {
2411 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2412 		}
2413 	}
2414 }
2415 
2416 
2417 /*
2418  * given an association and starting time of the current RTT period return
2419  * RTO in number of msecs net should point to the current network
2420  */
2421 
2422 uint32_t
2423 sctp_calculate_rto(struct sctp_tcb *stcb,
2424     struct sctp_association *asoc,
2425     struct sctp_nets *net,
2426     struct timeval *old,
2427     int rtt_from_sack)
2428 {
2429 	/*-
2430 	 * given an association and the starting time of the current RTT
2431 	 * period (in value1/value2) return RTO in number of msecs.
2432 	 */
2433 	int32_t rtt;		/* RTT in ms */
2434 	uint32_t new_rto;
2435 	int first_measure = 0;
2436 	struct timeval now;
2437 
2438 	/************************/
2439 	/* 1. calculate new RTT */
2440 	/************************/
2441 	/* get the current time */
2442 	if (stcb->asoc.use_precise_time) {
2443 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2444 	} else {
2445 		(void)SCTP_GETTIME_TIMEVAL(&now);
2446 	}
2447 	timevalsub(&now, old);
2448 	/* store the current RTT in us */
2449 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2450 	        (uint64_t)now.tv_usec;
2451 
2452 	/* compute rtt in ms */
2453 	rtt = (int32_t)(net->rtt / 1000);
2454 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2455 		/*
2456 		 * Tell the CC module that a new update has just occurred
2457 		 * from a sack
2458 		 */
2459 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2460 	}
2461 	/*
2462 	 * Do we need to determine the lan? We do this only on sacks i.e.
2463 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2464 	 */
2465 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2466 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2467 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2468 			net->lan_type = SCTP_LAN_INTERNET;
2469 		} else {
2470 			net->lan_type = SCTP_LAN_LOCAL;
2471 		}
2472 	}
2473 	/***************************/
2474 	/* 2. update RTTVAR & SRTT */
2475 	/***************************/
2476 	/*-
2477 	 * Compute the scaled average lastsa and the
2478 	 * scaled variance lastsv as described in van Jacobson
2479 	 * Paper "Congestion Avoidance and Control", Annex A.
2480 	 *
2481 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2482 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2483 	 */
2484 	if (net->RTO_measured) {
2485 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2486 		net->lastsa += rtt;
2487 		if (rtt < 0) {
2488 			rtt = -rtt;
2489 		}
2490 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2491 		net->lastsv += rtt;
2492 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2493 			rto_logging(net, SCTP_LOG_RTTVAR);
2494 		}
2495 	} else {
2496 		/* First RTO measurment */
2497 		net->RTO_measured = 1;
2498 		first_measure = 1;
2499 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2500 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2501 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2502 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2503 		}
2504 	}
2505 	if (net->lastsv == 0) {
2506 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2507 	}
2508 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2509 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2510 	    (stcb->asoc.sat_network_lockout == 0)) {
2511 		stcb->asoc.sat_network = 1;
2512 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2513 		stcb->asoc.sat_network = 0;
2514 		stcb->asoc.sat_network_lockout = 1;
2515 	}
2516 	/* bound it, per C6/C7 in Section 5.3.1 */
2517 	if (new_rto < stcb->asoc.minrto) {
2518 		new_rto = stcb->asoc.minrto;
2519 	}
2520 	if (new_rto > stcb->asoc.maxrto) {
2521 		new_rto = stcb->asoc.maxrto;
2522 	}
2523 	/* we are now returning the RTO */
2524 	return (new_rto);
2525 }
2526 
2527 /*
2528  * return a pointer to a contiguous piece of data from the given mbuf chain
2529  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2530  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2531  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2532  */
2533 caddr_t
2534 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2535 {
2536 	uint32_t count;
2537 	uint8_t *ptr;
2538 
2539 	ptr = in_ptr;
2540 	if ((off < 0) || (len <= 0))
2541 		return (NULL);
2542 
2543 	/* find the desired start location */
2544 	while ((m != NULL) && (off > 0)) {
2545 		if (off < SCTP_BUF_LEN(m))
2546 			break;
2547 		off -= SCTP_BUF_LEN(m);
2548 		m = SCTP_BUF_NEXT(m);
2549 	}
2550 	if (m == NULL)
2551 		return (NULL);
2552 
2553 	/* is the current mbuf large enough (eg. contiguous)? */
2554 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2555 		return (mtod(m, caddr_t)+off);
2556 	} else {
2557 		/* else, it spans more than one mbuf, so save a temp copy... */
2558 		while ((m != NULL) && (len > 0)) {
2559 			count = min(SCTP_BUF_LEN(m) - off, len);
2560 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2561 			len -= count;
2562 			ptr += count;
2563 			off = 0;
2564 			m = SCTP_BUF_NEXT(m);
2565 		}
2566 		if ((m == NULL) && (len > 0))
2567 			return (NULL);
2568 		else
2569 			return ((caddr_t)in_ptr);
2570 	}
2571 }
2572 
2573 
2574 
2575 struct sctp_paramhdr *
2576 sctp_get_next_param(struct mbuf *m,
2577     int offset,
2578     struct sctp_paramhdr *pull,
2579     int pull_limit)
2580 {
2581 	/* This just provides a typed signature to Peter's Pull routine */
2582 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2583 	    (uint8_t *)pull));
2584 }
2585 
2586 
2587 struct mbuf *
2588 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2589 {
2590 	struct mbuf *m_last;
2591 	caddr_t dp;
2592 
2593 	if (padlen > 3) {
2594 		return (NULL);
2595 	}
2596 	if (padlen <= M_TRAILINGSPACE(m)) {
2597 		/*
2598 		 * The easy way. We hope the majority of the time we hit
2599 		 * here :)
2600 		 */
2601 		m_last = m;
2602 	} else {
2603 		/* Hard way we must grow the mbuf chain */
2604 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2605 		if (m_last == NULL) {
2606 			return (NULL);
2607 		}
2608 		SCTP_BUF_LEN(m_last) = 0;
2609 		SCTP_BUF_NEXT(m_last) = NULL;
2610 		SCTP_BUF_NEXT(m) = m_last;
2611 	}
2612 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2613 	SCTP_BUF_LEN(m_last) += padlen;
2614 	memset(dp, 0, padlen);
2615 	return (m_last);
2616 }
2617 
2618 struct mbuf *
2619 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2620 {
2621 	/* find the last mbuf in chain and pad it */
2622 	struct mbuf *m_at;
2623 
2624 	if (last_mbuf != NULL) {
2625 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2626 	} else {
2627 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2628 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2629 				return (sctp_add_pad_tombuf(m_at, padval));
2630 			}
2631 		}
2632 	}
2633 	return (NULL);
2634 }
2635 
2636 static void
2637 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2638     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2639 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2640     SCTP_UNUSED
2641 #endif
2642 )
2643 {
2644 	struct mbuf *m_notify;
2645 	struct sctp_assoc_change *sac;
2646 	struct sctp_queued_to_read *control;
2647 	unsigned int notif_len;
2648 	uint16_t abort_len;
2649 	unsigned int i;
2650 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2651 	struct socket *so;
2652 #endif
2653 
2654 	if (stcb == NULL) {
2655 		return;
2656 	}
2657 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2658 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2659 		if (abort != NULL) {
2660 			abort_len = ntohs(abort->ch.chunk_length);
2661 		} else {
2662 			abort_len = 0;
2663 		}
2664 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2665 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2666 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2667 			notif_len += abort_len;
2668 		}
2669 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2670 		if (m_notify == NULL) {
2671 			/* Retry with smaller value. */
2672 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2673 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2674 			if (m_notify == NULL) {
2675 				goto set_error;
2676 			}
2677 		}
2678 		SCTP_BUF_NEXT(m_notify) = NULL;
2679 		sac = mtod(m_notify, struct sctp_assoc_change *);
2680 		memset(sac, 0, notif_len);
2681 		sac->sac_type = SCTP_ASSOC_CHANGE;
2682 		sac->sac_flags = 0;
2683 		sac->sac_length = sizeof(struct sctp_assoc_change);
2684 		sac->sac_state = state;
2685 		sac->sac_error = error;
2686 		/* XXX verify these stream counts */
2687 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2688 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2689 		sac->sac_assoc_id = sctp_get_associd(stcb);
2690 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2691 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2692 				i = 0;
2693 				if (stcb->asoc.prsctp_supported == 1) {
2694 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2695 				}
2696 				if (stcb->asoc.auth_supported == 1) {
2697 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2698 				}
2699 				if (stcb->asoc.asconf_supported == 1) {
2700 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2701 				}
2702 				if (stcb->asoc.idata_supported == 1) {
2703 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2704 				}
2705 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2706 				if (stcb->asoc.reconfig_supported == 1) {
2707 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2708 				}
2709 				sac->sac_length += i;
2710 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2711 				memcpy(sac->sac_info, abort, abort_len);
2712 				sac->sac_length += abort_len;
2713 			}
2714 		}
2715 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2716 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2717 		    0, 0, stcb->asoc.context, 0, 0, 0,
2718 		    m_notify);
2719 		if (control != NULL) {
2720 			control->length = SCTP_BUF_LEN(m_notify);
2721 			control->spec_flags = M_NOTIFICATION;
2722 			/* not that we need this */
2723 			control->tail_mbuf = m_notify;
2724 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2725 			    control,
2726 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2727 			    so_locked);
2728 		} else {
2729 			sctp_m_freem(m_notify);
2730 		}
2731 	}
2732 	/*
2733 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2734 	 * comes in.
2735 	 */
2736 set_error:
2737 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2738 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2739 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2740 		SOCK_LOCK(stcb->sctp_socket);
2741 		if (from_peer) {
2742 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2743 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2744 				stcb->sctp_socket->so_error = ECONNREFUSED;
2745 			} else {
2746 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2747 				stcb->sctp_socket->so_error = ECONNRESET;
2748 			}
2749 		} else {
2750 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2751 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2752 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2753 				stcb->sctp_socket->so_error = ETIMEDOUT;
2754 			} else {
2755 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2756 				stcb->sctp_socket->so_error = ECONNABORTED;
2757 			}
2758 		}
2759 		SOCK_UNLOCK(stcb->sctp_socket);
2760 	}
2761 	/* Wake ANY sleepers */
2762 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2763 	so = SCTP_INP_SO(stcb->sctp_ep);
2764 	if (!so_locked) {
2765 		atomic_add_int(&stcb->asoc.refcnt, 1);
2766 		SCTP_TCB_UNLOCK(stcb);
2767 		SCTP_SOCKET_LOCK(so, 1);
2768 		SCTP_TCB_LOCK(stcb);
2769 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2770 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2771 			SCTP_SOCKET_UNLOCK(so, 1);
2772 			return;
2773 		}
2774 	}
2775 #endif
2776 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2777 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2778 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2779 		socantrcvmore(stcb->sctp_socket);
2780 	}
2781 	sorwakeup(stcb->sctp_socket);
2782 	sowwakeup(stcb->sctp_socket);
2783 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2784 	if (!so_locked) {
2785 		SCTP_SOCKET_UNLOCK(so, 1);
2786 	}
2787 #endif
2788 }
2789 
2790 static void
2791 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2792     struct sockaddr *sa, uint32_t error, int so_locked
2793 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2794     SCTP_UNUSED
2795 #endif
2796 )
2797 {
2798 	struct mbuf *m_notify;
2799 	struct sctp_paddr_change *spc;
2800 	struct sctp_queued_to_read *control;
2801 
2802 	if ((stcb == NULL) ||
2803 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2804 		/* event not enabled */
2805 		return;
2806 	}
2807 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2808 	if (m_notify == NULL)
2809 		return;
2810 	SCTP_BUF_LEN(m_notify) = 0;
2811 	spc = mtod(m_notify, struct sctp_paddr_change *);
2812 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2813 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2814 	spc->spc_flags = 0;
2815 	spc->spc_length = sizeof(struct sctp_paddr_change);
2816 	switch (sa->sa_family) {
2817 #ifdef INET
2818 	case AF_INET:
2819 #ifdef INET6
2820 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2821 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2822 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2823 		} else {
2824 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2825 		}
2826 #else
2827 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2828 #endif
2829 		break;
2830 #endif
2831 #ifdef INET6
2832 	case AF_INET6:
2833 		{
2834 			struct sockaddr_in6 *sin6;
2835 
2836 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2837 
2838 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2839 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2840 				if (sin6->sin6_scope_id == 0) {
2841 					/* recover scope_id for user */
2842 					(void)sa6_recoverscope(sin6);
2843 				} else {
2844 					/* clear embedded scope_id for user */
2845 					in6_clearscope(&sin6->sin6_addr);
2846 				}
2847 			}
2848 			break;
2849 		}
2850 #endif
2851 	default:
2852 		/* TSNH */
2853 		break;
2854 	}
2855 	spc->spc_state = state;
2856 	spc->spc_error = error;
2857 	spc->spc_assoc_id = sctp_get_associd(stcb);
2858 
2859 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2860 	SCTP_BUF_NEXT(m_notify) = NULL;
2861 
2862 	/* append to socket */
2863 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2864 	    0, 0, stcb->asoc.context, 0, 0, 0,
2865 	    m_notify);
2866 	if (control == NULL) {
2867 		/* no memory */
2868 		sctp_m_freem(m_notify);
2869 		return;
2870 	}
2871 	control->length = SCTP_BUF_LEN(m_notify);
2872 	control->spec_flags = M_NOTIFICATION;
2873 	/* not that we need this */
2874 	control->tail_mbuf = m_notify;
2875 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2876 	    control,
2877 	    &stcb->sctp_socket->so_rcv, 1,
2878 	    SCTP_READ_LOCK_NOT_HELD,
2879 	    so_locked);
2880 }
2881 
2882 
2883 static void
2884 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2885     struct sctp_tmit_chunk *chk, int so_locked
2886 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2887     SCTP_UNUSED
2888 #endif
2889 )
2890 {
2891 	struct mbuf *m_notify;
2892 	struct sctp_send_failed *ssf;
2893 	struct sctp_send_failed_event *ssfe;
2894 	struct sctp_queued_to_read *control;
2895 	struct sctp_chunkhdr *chkhdr;
2896 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2897 
2898 	if ((stcb == NULL) ||
2899 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2900 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2901 		/* event not enabled */
2902 		return;
2903 	}
2904 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2905 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2906 	} else {
2907 		notifhdr_len = sizeof(struct sctp_send_failed);
2908 	}
2909 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2910 	if (m_notify == NULL)
2911 		/* no space left */
2912 		return;
2913 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2914 	if (stcb->asoc.idata_supported) {
2915 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2916 	} else {
2917 		chkhdr_len = sizeof(struct sctp_data_chunk);
2918 	}
2919 	/* Use some defaults in case we can't access the chunk header */
2920 	if (chk->send_size >= chkhdr_len) {
2921 		payload_len = chk->send_size - chkhdr_len;
2922 	} else {
2923 		payload_len = 0;
2924 	}
2925 	padding_len = 0;
2926 	if (chk->data != NULL) {
2927 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2928 		if (chkhdr != NULL) {
2929 			chk_len = ntohs(chkhdr->chunk_length);
2930 			if ((chk_len >= chkhdr_len) &&
2931 			    (chk->send_size >= chk_len) &&
2932 			    (chk->send_size - chk_len < 4)) {
2933 				padding_len = chk->send_size - chk_len;
2934 				payload_len = chk->send_size - chkhdr_len - padding_len;
2935 			}
2936 		}
2937 	}
2938 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2939 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2940 		memset(ssfe, 0, notifhdr_len);
2941 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2942 		if (sent) {
2943 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2944 		} else {
2945 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2946 		}
2947 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2948 		ssfe->ssfe_error = error;
2949 		/* not exactly what the user sent in, but should be close :) */
2950 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2951 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2952 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2953 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2954 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2955 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2956 	} else {
2957 		ssf = mtod(m_notify, struct sctp_send_failed *);
2958 		memset(ssf, 0, notifhdr_len);
2959 		ssf->ssf_type = SCTP_SEND_FAILED;
2960 		if (sent) {
2961 			ssf->ssf_flags = SCTP_DATA_SENT;
2962 		} else {
2963 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2964 		}
2965 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2966 		ssf->ssf_error = error;
2967 		/* not exactly what the user sent in, but should be close :) */
2968 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2969 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
2970 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2971 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
2972 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2973 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2974 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2975 	}
2976 	if (chk->data != NULL) {
2977 		/* Trim off the sctp chunk header (it should be there) */
2978 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
2979 			m_adj(chk->data, chkhdr_len);
2980 			m_adj(chk->data, -padding_len);
2981 			sctp_mbuf_crush(chk->data);
2982 			chk->send_size -= (chkhdr_len + padding_len);
2983 		}
2984 	}
2985 	SCTP_BUF_NEXT(m_notify) = chk->data;
2986 	/* Steal off the mbuf */
2987 	chk->data = NULL;
2988 	/*
2989 	 * For this case, we check the actual socket buffer, since the assoc
2990 	 * is going away we don't want to overfill the socket buffer for a
2991 	 * non-reader
2992 	 */
2993 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2994 		sctp_m_freem(m_notify);
2995 		return;
2996 	}
2997 	/* append to socket */
2998 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2999 	    0, 0, stcb->asoc.context, 0, 0, 0,
3000 	    m_notify);
3001 	if (control == NULL) {
3002 		/* no memory */
3003 		sctp_m_freem(m_notify);
3004 		return;
3005 	}
3006 	control->length = SCTP_BUF_LEN(m_notify);
3007 	control->spec_flags = M_NOTIFICATION;
3008 	/* not that we need this */
3009 	control->tail_mbuf = m_notify;
3010 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3011 	    control,
3012 	    &stcb->sctp_socket->so_rcv, 1,
3013 	    SCTP_READ_LOCK_NOT_HELD,
3014 	    so_locked);
3015 }
3016 
3017 
3018 static void
3019 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3020     struct sctp_stream_queue_pending *sp, int so_locked
3021 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3022     SCTP_UNUSED
3023 #endif
3024 )
3025 {
3026 	struct mbuf *m_notify;
3027 	struct sctp_send_failed *ssf;
3028 	struct sctp_send_failed_event *ssfe;
3029 	struct sctp_queued_to_read *control;
3030 	int notifhdr_len;
3031 
3032 	if ((stcb == NULL) ||
3033 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3034 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3035 		/* event not enabled */
3036 		return;
3037 	}
3038 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3039 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3040 	} else {
3041 		notifhdr_len = sizeof(struct sctp_send_failed);
3042 	}
3043 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3044 	if (m_notify == NULL) {
3045 		/* no space left */
3046 		return;
3047 	}
3048 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3049 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3050 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3051 		memset(ssfe, 0, notifhdr_len);
3052 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3053 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3054 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3055 		ssfe->ssfe_error = error;
3056 		/* not exactly what the user sent in, but should be close :) */
3057 		ssfe->ssfe_info.snd_sid = sp->sid;
3058 		if (sp->some_taken) {
3059 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3060 		} else {
3061 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3062 		}
3063 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3064 		ssfe->ssfe_info.snd_context = sp->context;
3065 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3066 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3067 	} else {
3068 		ssf = mtod(m_notify, struct sctp_send_failed *);
3069 		memset(ssf, 0, notifhdr_len);
3070 		ssf->ssf_type = SCTP_SEND_FAILED;
3071 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3072 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3073 		ssf->ssf_error = error;
3074 		/* not exactly what the user sent in, but should be close :) */
3075 		ssf->ssf_info.sinfo_stream = sp->sid;
3076 		ssf->ssf_info.sinfo_ssn = 0;
3077 		if (sp->some_taken) {
3078 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3079 		} else {
3080 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3081 		}
3082 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3083 		ssf->ssf_info.sinfo_context = sp->context;
3084 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3085 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3086 	}
3087 	SCTP_BUF_NEXT(m_notify) = sp->data;
3088 
3089 	/* Steal off the mbuf */
3090 	sp->data = NULL;
3091 	/*
3092 	 * For this case, we check the actual socket buffer, since the assoc
3093 	 * is going away we don't want to overfill the socket buffer for a
3094 	 * non-reader
3095 	 */
3096 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3097 		sctp_m_freem(m_notify);
3098 		return;
3099 	}
3100 	/* append to socket */
3101 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3102 	    0, 0, stcb->asoc.context, 0, 0, 0,
3103 	    m_notify);
3104 	if (control == NULL) {
3105 		/* no memory */
3106 		sctp_m_freem(m_notify);
3107 		return;
3108 	}
3109 	control->length = SCTP_BUF_LEN(m_notify);
3110 	control->spec_flags = M_NOTIFICATION;
3111 	/* not that we need this */
3112 	control->tail_mbuf = m_notify;
3113 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3114 	    control,
3115 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3116 }
3117 
3118 
3119 
3120 static void
3121 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3122 {
3123 	struct mbuf *m_notify;
3124 	struct sctp_adaptation_event *sai;
3125 	struct sctp_queued_to_read *control;
3126 
3127 	if ((stcb == NULL) ||
3128 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3129 		/* event not enabled */
3130 		return;
3131 	}
3132 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3133 	if (m_notify == NULL)
3134 		/* no space left */
3135 		return;
3136 	SCTP_BUF_LEN(m_notify) = 0;
3137 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3138 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3139 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3140 	sai->sai_flags = 0;
3141 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3142 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3143 	sai->sai_assoc_id = sctp_get_associd(stcb);
3144 
3145 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3146 	SCTP_BUF_NEXT(m_notify) = NULL;
3147 
3148 	/* append to socket */
3149 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3150 	    0, 0, stcb->asoc.context, 0, 0, 0,
3151 	    m_notify);
3152 	if (control == NULL) {
3153 		/* no memory */
3154 		sctp_m_freem(m_notify);
3155 		return;
3156 	}
3157 	control->length = SCTP_BUF_LEN(m_notify);
3158 	control->spec_flags = M_NOTIFICATION;
3159 	/* not that we need this */
3160 	control->tail_mbuf = m_notify;
3161 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3162 	    control,
3163 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3164 }
3165 
3166 /* This always must be called with the read-queue LOCKED in the INP */
3167 static void
3168 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3169     uint32_t val, int so_locked
3170 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3171     SCTP_UNUSED
3172 #endif
3173 )
3174 {
3175 	struct mbuf *m_notify;
3176 	struct sctp_pdapi_event *pdapi;
3177 	struct sctp_queued_to_read *control;
3178 	struct sockbuf *sb;
3179 
3180 	if ((stcb == NULL) ||
3181 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3182 		/* event not enabled */
3183 		return;
3184 	}
3185 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3186 		return;
3187 	}
3188 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3189 	if (m_notify == NULL)
3190 		/* no space left */
3191 		return;
3192 	SCTP_BUF_LEN(m_notify) = 0;
3193 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3194 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3195 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3196 	pdapi->pdapi_flags = 0;
3197 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3198 	pdapi->pdapi_indication = error;
3199 	pdapi->pdapi_stream = (val >> 16);
3200 	pdapi->pdapi_seq = (val & 0x0000ffff);
3201 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3202 
3203 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3204 	SCTP_BUF_NEXT(m_notify) = NULL;
3205 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3206 	    0, 0, stcb->asoc.context, 0, 0, 0,
3207 	    m_notify);
3208 	if (control == NULL) {
3209 		/* no memory */
3210 		sctp_m_freem(m_notify);
3211 		return;
3212 	}
3213 	control->length = SCTP_BUF_LEN(m_notify);
3214 	control->spec_flags = M_NOTIFICATION;
3215 	/* not that we need this */
3216 	control->tail_mbuf = m_notify;
3217 	sb = &stcb->sctp_socket->so_rcv;
3218 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3219 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3220 	}
3221 	sctp_sballoc(stcb, sb, m_notify);
3222 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3223 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3224 	}
3225 	control->end_added = 1;
3226 	if (stcb->asoc.control_pdapi)
3227 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3228 	else {
3229 		/* we really should not see this case */
3230 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3231 	}
3232 	if (stcb->sctp_ep && stcb->sctp_socket) {
3233 		/* This should always be the case */
3234 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3235 		struct socket *so;
3236 
3237 		so = SCTP_INP_SO(stcb->sctp_ep);
3238 		if (!so_locked) {
3239 			atomic_add_int(&stcb->asoc.refcnt, 1);
3240 			SCTP_TCB_UNLOCK(stcb);
3241 			SCTP_SOCKET_LOCK(so, 1);
3242 			SCTP_TCB_LOCK(stcb);
3243 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3244 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3245 				SCTP_SOCKET_UNLOCK(so, 1);
3246 				return;
3247 			}
3248 		}
3249 #endif
3250 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3251 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3252 		if (!so_locked) {
3253 			SCTP_SOCKET_UNLOCK(so, 1);
3254 		}
3255 #endif
3256 	}
3257 }
3258 
3259 static void
3260 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3261 {
3262 	struct mbuf *m_notify;
3263 	struct sctp_shutdown_event *sse;
3264 	struct sctp_queued_to_read *control;
3265 
3266 	/*
3267 	 * For TCP model AND UDP connected sockets we will send an error up
3268 	 * when an SHUTDOWN completes
3269 	 */
3270 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3271 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3272 		/* mark socket closed for read/write and wakeup! */
3273 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3274 		struct socket *so;
3275 
3276 		so = SCTP_INP_SO(stcb->sctp_ep);
3277 		atomic_add_int(&stcb->asoc.refcnt, 1);
3278 		SCTP_TCB_UNLOCK(stcb);
3279 		SCTP_SOCKET_LOCK(so, 1);
3280 		SCTP_TCB_LOCK(stcb);
3281 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3282 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3283 			SCTP_SOCKET_UNLOCK(so, 1);
3284 			return;
3285 		}
3286 #endif
3287 		socantsendmore(stcb->sctp_socket);
3288 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3289 		SCTP_SOCKET_UNLOCK(so, 1);
3290 #endif
3291 	}
3292 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3293 		/* event not enabled */
3294 		return;
3295 	}
3296 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3297 	if (m_notify == NULL)
3298 		/* no space left */
3299 		return;
3300 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3301 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3302 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3303 	sse->sse_flags = 0;
3304 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3305 	sse->sse_assoc_id = sctp_get_associd(stcb);
3306 
3307 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3308 	SCTP_BUF_NEXT(m_notify) = NULL;
3309 
3310 	/* append to socket */
3311 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3312 	    0, 0, stcb->asoc.context, 0, 0, 0,
3313 	    m_notify);
3314 	if (control == NULL) {
3315 		/* no memory */
3316 		sctp_m_freem(m_notify);
3317 		return;
3318 	}
3319 	control->length = SCTP_BUF_LEN(m_notify);
3320 	control->spec_flags = M_NOTIFICATION;
3321 	/* not that we need this */
3322 	control->tail_mbuf = m_notify;
3323 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3324 	    control,
3325 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3326 }
3327 
3328 static void
3329 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3330     int so_locked
3331 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3332     SCTP_UNUSED
3333 #endif
3334 )
3335 {
3336 	struct mbuf *m_notify;
3337 	struct sctp_sender_dry_event *event;
3338 	struct sctp_queued_to_read *control;
3339 
3340 	if ((stcb == NULL) ||
3341 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3342 		/* event not enabled */
3343 		return;
3344 	}
3345 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3346 	if (m_notify == NULL) {
3347 		/* no space left */
3348 		return;
3349 	}
3350 	SCTP_BUF_LEN(m_notify) = 0;
3351 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3352 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3353 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3354 	event->sender_dry_flags = 0;
3355 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3356 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3357 
3358 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3359 	SCTP_BUF_NEXT(m_notify) = NULL;
3360 
3361 	/* append to socket */
3362 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3363 	    0, 0, stcb->asoc.context, 0, 0, 0,
3364 	    m_notify);
3365 	if (control == NULL) {
3366 		/* no memory */
3367 		sctp_m_freem(m_notify);
3368 		return;
3369 	}
3370 	control->length = SCTP_BUF_LEN(m_notify);
3371 	control->spec_flags = M_NOTIFICATION;
3372 	/* not that we need this */
3373 	control->tail_mbuf = m_notify;
3374 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3375 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3376 }
3377 
3378 
3379 void
3380 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3381 {
3382 	struct mbuf *m_notify;
3383 	struct sctp_queued_to_read *control;
3384 	struct sctp_stream_change_event *stradd;
3385 
3386 	if ((stcb == NULL) ||
3387 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3388 		/* event not enabled */
3389 		return;
3390 	}
3391 	if ((stcb->asoc.peer_req_out) && flag) {
3392 		/* Peer made the request, don't tell the local user */
3393 		stcb->asoc.peer_req_out = 0;
3394 		return;
3395 	}
3396 	stcb->asoc.peer_req_out = 0;
3397 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3398 	if (m_notify == NULL)
3399 		/* no space left */
3400 		return;
3401 	SCTP_BUF_LEN(m_notify) = 0;
3402 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3403 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3404 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3405 	stradd->strchange_flags = flag;
3406 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3407 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3408 	stradd->strchange_instrms = numberin;
3409 	stradd->strchange_outstrms = numberout;
3410 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3411 	SCTP_BUF_NEXT(m_notify) = NULL;
3412 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3413 		/* no space */
3414 		sctp_m_freem(m_notify);
3415 		return;
3416 	}
3417 	/* append to socket */
3418 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3419 	    0, 0, stcb->asoc.context, 0, 0, 0,
3420 	    m_notify);
3421 	if (control == NULL) {
3422 		/* no memory */
3423 		sctp_m_freem(m_notify);
3424 		return;
3425 	}
3426 	control->length = SCTP_BUF_LEN(m_notify);
3427 	control->spec_flags = M_NOTIFICATION;
3428 	/* not that we need this */
3429 	control->tail_mbuf = m_notify;
3430 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3431 	    control,
3432 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3433 }
3434 
3435 void
3436 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3437 {
3438 	struct mbuf *m_notify;
3439 	struct sctp_queued_to_read *control;
3440 	struct sctp_assoc_reset_event *strasoc;
3441 
3442 	if ((stcb == NULL) ||
3443 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3444 		/* event not enabled */
3445 		return;
3446 	}
3447 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3448 	if (m_notify == NULL)
3449 		/* no space left */
3450 		return;
3451 	SCTP_BUF_LEN(m_notify) = 0;
3452 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3453 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3454 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3455 	strasoc->assocreset_flags = flag;
3456 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3457 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3458 	strasoc->assocreset_local_tsn = sending_tsn;
3459 	strasoc->assocreset_remote_tsn = recv_tsn;
3460 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3461 	SCTP_BUF_NEXT(m_notify) = NULL;
3462 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3463 		/* no space */
3464 		sctp_m_freem(m_notify);
3465 		return;
3466 	}
3467 	/* append to socket */
3468 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3469 	    0, 0, stcb->asoc.context, 0, 0, 0,
3470 	    m_notify);
3471 	if (control == NULL) {
3472 		/* no memory */
3473 		sctp_m_freem(m_notify);
3474 		return;
3475 	}
3476 	control->length = SCTP_BUF_LEN(m_notify);
3477 	control->spec_flags = M_NOTIFICATION;
3478 	/* not that we need this */
3479 	control->tail_mbuf = m_notify;
3480 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3481 	    control,
3482 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3483 }
3484 
3485 
3486 
3487 static void
3488 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3489     int number_entries, uint16_t *list, int flag)
3490 {
3491 	struct mbuf *m_notify;
3492 	struct sctp_queued_to_read *control;
3493 	struct sctp_stream_reset_event *strreset;
3494 	int len;
3495 
3496 	if ((stcb == NULL) ||
3497 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3498 		/* event not enabled */
3499 		return;
3500 	}
3501 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3502 	if (m_notify == NULL)
3503 		/* no space left */
3504 		return;
3505 	SCTP_BUF_LEN(m_notify) = 0;
3506 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3507 	if (len > M_TRAILINGSPACE(m_notify)) {
3508 		/* never enough room */
3509 		sctp_m_freem(m_notify);
3510 		return;
3511 	}
3512 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3513 	memset(strreset, 0, len);
3514 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3515 	strreset->strreset_flags = flag;
3516 	strreset->strreset_length = len;
3517 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3518 	if (number_entries) {
3519 		int i;
3520 
3521 		for (i = 0; i < number_entries; i++) {
3522 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3523 		}
3524 	}
3525 	SCTP_BUF_LEN(m_notify) = len;
3526 	SCTP_BUF_NEXT(m_notify) = NULL;
3527 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3528 		/* no space */
3529 		sctp_m_freem(m_notify);
3530 		return;
3531 	}
3532 	/* append to socket */
3533 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3534 	    0, 0, stcb->asoc.context, 0, 0, 0,
3535 	    m_notify);
3536 	if (control == NULL) {
3537 		/* no memory */
3538 		sctp_m_freem(m_notify);
3539 		return;
3540 	}
3541 	control->length = SCTP_BUF_LEN(m_notify);
3542 	control->spec_flags = M_NOTIFICATION;
3543 	/* not that we need this */
3544 	control->tail_mbuf = m_notify;
3545 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3546 	    control,
3547 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3548 }
3549 
3550 
3551 static void
3552 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3553 {
3554 	struct mbuf *m_notify;
3555 	struct sctp_remote_error *sre;
3556 	struct sctp_queued_to_read *control;
3557 	unsigned int notif_len;
3558 	uint16_t chunk_len;
3559 
3560 	if ((stcb == NULL) ||
3561 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3562 		return;
3563 	}
3564 	if (chunk != NULL) {
3565 		chunk_len = ntohs(chunk->ch.chunk_length);
3566 	} else {
3567 		chunk_len = 0;
3568 	}
3569 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3570 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3571 	if (m_notify == NULL) {
3572 		/* Retry with smaller value. */
3573 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3574 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3575 		if (m_notify == NULL) {
3576 			return;
3577 		}
3578 	}
3579 	SCTP_BUF_NEXT(m_notify) = NULL;
3580 	sre = mtod(m_notify, struct sctp_remote_error *);
3581 	memset(sre, 0, notif_len);
3582 	sre->sre_type = SCTP_REMOTE_ERROR;
3583 	sre->sre_flags = 0;
3584 	sre->sre_length = sizeof(struct sctp_remote_error);
3585 	sre->sre_error = error;
3586 	sre->sre_assoc_id = sctp_get_associd(stcb);
3587 	if (notif_len > sizeof(struct sctp_remote_error)) {
3588 		memcpy(sre->sre_data, chunk, chunk_len);
3589 		sre->sre_length += chunk_len;
3590 	}
3591 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3592 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3593 	    0, 0, stcb->asoc.context, 0, 0, 0,
3594 	    m_notify);
3595 	if (control != NULL) {
3596 		control->length = SCTP_BUF_LEN(m_notify);
3597 		control->spec_flags = M_NOTIFICATION;
3598 		/* not that we need this */
3599 		control->tail_mbuf = m_notify;
3600 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3601 		    control,
3602 		    &stcb->sctp_socket->so_rcv, 1,
3603 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3604 	} else {
3605 		sctp_m_freem(m_notify);
3606 	}
3607 }
3608 
3609 
3610 void
3611 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3612     uint32_t error, void *data, int so_locked
3613 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3614     SCTP_UNUSED
3615 #endif
3616 )
3617 {
3618 	if ((stcb == NULL) ||
3619 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3620 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3621 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3622 		/* If the socket is gone we are out of here */
3623 		return;
3624 	}
3625 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3626 		return;
3627 	}
3628 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3629 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3630 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3631 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3632 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3633 			/* Don't report these in front states */
3634 			return;
3635 		}
3636 	}
3637 	switch (notification) {
3638 	case SCTP_NOTIFY_ASSOC_UP:
3639 		if (stcb->asoc.assoc_up_sent == 0) {
3640 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3641 			stcb->asoc.assoc_up_sent = 1;
3642 		}
3643 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3644 			sctp_notify_adaptation_layer(stcb);
3645 		}
3646 		if (stcb->asoc.auth_supported == 0) {
3647 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3648 			    NULL, so_locked);
3649 		}
3650 		break;
3651 	case SCTP_NOTIFY_ASSOC_DOWN:
3652 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3653 		break;
3654 	case SCTP_NOTIFY_INTERFACE_DOWN:
3655 		{
3656 			struct sctp_nets *net;
3657 
3658 			net = (struct sctp_nets *)data;
3659 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3660 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3661 			break;
3662 		}
3663 	case SCTP_NOTIFY_INTERFACE_UP:
3664 		{
3665 			struct sctp_nets *net;
3666 
3667 			net = (struct sctp_nets *)data;
3668 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3669 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3670 			break;
3671 		}
3672 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3673 		{
3674 			struct sctp_nets *net;
3675 
3676 			net = (struct sctp_nets *)data;
3677 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3678 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3679 			break;
3680 		}
3681 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3682 		sctp_notify_send_failed2(stcb, error,
3683 		    (struct sctp_stream_queue_pending *)data, so_locked);
3684 		break;
3685 	case SCTP_NOTIFY_SENT_DG_FAIL:
3686 		sctp_notify_send_failed(stcb, 1, error,
3687 		    (struct sctp_tmit_chunk *)data, so_locked);
3688 		break;
3689 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3690 		sctp_notify_send_failed(stcb, 0, error,
3691 		    (struct sctp_tmit_chunk *)data, so_locked);
3692 		break;
3693 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3694 		{
3695 			uint32_t val;
3696 
3697 			val = *((uint32_t *)data);
3698 
3699 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3700 			break;
3701 		}
3702 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3703 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3704 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3705 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3706 		} else {
3707 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3708 		}
3709 		break;
3710 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3711 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3712 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3713 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3714 		} else {
3715 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3716 		}
3717 		break;
3718 	case SCTP_NOTIFY_ASSOC_RESTART:
3719 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3720 		if (stcb->asoc.auth_supported == 0) {
3721 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3722 			    NULL, so_locked);
3723 		}
3724 		break;
3725 	case SCTP_NOTIFY_STR_RESET_SEND:
3726 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3727 		break;
3728 	case SCTP_NOTIFY_STR_RESET_RECV:
3729 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3730 		break;
3731 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3732 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3733 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3734 		break;
3735 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3736 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3737 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3738 		break;
3739 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3740 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3741 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3742 		break;
3743 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3744 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3745 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3746 		break;
3747 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3748 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3749 		    error, so_locked);
3750 		break;
3751 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3752 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3753 		    error, so_locked);
3754 		break;
3755 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3756 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3757 		    error, so_locked);
3758 		break;
3759 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3760 		sctp_notify_shutdown_event(stcb);
3761 		break;
3762 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3763 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3764 		    (uint16_t)(uintptr_t)data,
3765 		    so_locked);
3766 		break;
3767 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3768 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3769 		    (uint16_t)(uintptr_t)data,
3770 		    so_locked);
3771 		break;
3772 	case SCTP_NOTIFY_NO_PEER_AUTH:
3773 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3774 		    (uint16_t)(uintptr_t)data,
3775 		    so_locked);
3776 		break;
3777 	case SCTP_NOTIFY_SENDER_DRY:
3778 		sctp_notify_sender_dry_event(stcb, so_locked);
3779 		break;
3780 	case SCTP_NOTIFY_REMOTE_ERROR:
3781 		sctp_notify_remote_error(stcb, error, data);
3782 		break;
3783 	default:
3784 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3785 		    __func__, notification, notification);
3786 		break;
3787 	}			/* end switch */
3788 }
3789 
3790 void
3791 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3792 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3793     SCTP_UNUSED
3794 #endif
3795 )
3796 {
3797 	struct sctp_association *asoc;
3798 	struct sctp_stream_out *outs;
3799 	struct sctp_tmit_chunk *chk, *nchk;
3800 	struct sctp_stream_queue_pending *sp, *nsp;
3801 	int i;
3802 
3803 	if (stcb == NULL) {
3804 		return;
3805 	}
3806 	asoc = &stcb->asoc;
3807 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3808 		/* already being freed */
3809 		return;
3810 	}
3811 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3812 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3813 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3814 		return;
3815 	}
3816 	/* now through all the gunk freeing chunks */
3817 	if (holds_lock == 0) {
3818 		SCTP_TCB_SEND_LOCK(stcb);
3819 	}
3820 	/* sent queue SHOULD be empty */
3821 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3822 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3823 		asoc->sent_queue_cnt--;
3824 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3825 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3826 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3827 #ifdef INVARIANTS
3828 			} else {
3829 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3830 #endif
3831 			}
3832 		}
3833 		if (chk->data != NULL) {
3834 			sctp_free_bufspace(stcb, asoc, chk, 1);
3835 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3836 			    error, chk, so_locked);
3837 			if (chk->data) {
3838 				sctp_m_freem(chk->data);
3839 				chk->data = NULL;
3840 			}
3841 		}
3842 		sctp_free_a_chunk(stcb, chk, so_locked);
3843 		/* sa_ignore FREED_MEMORY */
3844 	}
3845 	/* pending send queue SHOULD be empty */
3846 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3847 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3848 		asoc->send_queue_cnt--;
3849 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3850 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3851 #ifdef INVARIANTS
3852 		} else {
3853 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3854 #endif
3855 		}
3856 		if (chk->data != NULL) {
3857 			sctp_free_bufspace(stcb, asoc, chk, 1);
3858 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3859 			    error, chk, so_locked);
3860 			if (chk->data) {
3861 				sctp_m_freem(chk->data);
3862 				chk->data = NULL;
3863 			}
3864 		}
3865 		sctp_free_a_chunk(stcb, chk, so_locked);
3866 		/* sa_ignore FREED_MEMORY */
3867 	}
3868 	for (i = 0; i < asoc->streamoutcnt; i++) {
3869 		/* For each stream */
3870 		outs = &asoc->strmout[i];
3871 		/* clean up any sends there */
3872 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3873 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3874 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3875 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3876 			sctp_free_spbufspace(stcb, asoc, sp);
3877 			if (sp->data) {
3878 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3879 				    error, (void *)sp, so_locked);
3880 				if (sp->data) {
3881 					sctp_m_freem(sp->data);
3882 					sp->data = NULL;
3883 					sp->tail_mbuf = NULL;
3884 					sp->length = 0;
3885 				}
3886 			}
3887 			if (sp->net) {
3888 				sctp_free_remote_addr(sp->net);
3889 				sp->net = NULL;
3890 			}
3891 			/* Free the chunk */
3892 			sctp_free_a_strmoq(stcb, sp, so_locked);
3893 			/* sa_ignore FREED_MEMORY */
3894 		}
3895 	}
3896 
3897 	if (holds_lock == 0) {
3898 		SCTP_TCB_SEND_UNLOCK(stcb);
3899 	}
3900 }
3901 
3902 void
3903 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3904     struct sctp_abort_chunk *abort, int so_locked
3905 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3906     SCTP_UNUSED
3907 #endif
3908 )
3909 {
3910 	if (stcb == NULL) {
3911 		return;
3912 	}
3913 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3914 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3915 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3916 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3917 	}
3918 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3919 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3920 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3921 		return;
3922 	}
3923 	/* Tell them we lost the asoc */
3924 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3925 	if (from_peer) {
3926 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3927 	} else {
3928 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3929 	}
3930 }
3931 
3932 void
3933 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3934     struct mbuf *m, int iphlen,
3935     struct sockaddr *src, struct sockaddr *dst,
3936     struct sctphdr *sh, struct mbuf *op_err,
3937     uint8_t mflowtype, uint32_t mflowid,
3938     uint32_t vrf_id, uint16_t port)
3939 {
3940 	uint32_t vtag;
3941 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3942 	struct socket *so;
3943 #endif
3944 
3945 	vtag = 0;
3946 	if (stcb != NULL) {
3947 		vtag = stcb->asoc.peer_vtag;
3948 		vrf_id = stcb->asoc.vrf_id;
3949 	}
3950 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3951 	    mflowtype, mflowid, inp->fibnum,
3952 	    vrf_id, port);
3953 	if (stcb != NULL) {
3954 		/* We have a TCB to abort, send notification too */
3955 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3956 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3957 		/* Ok, now lets free it */
3958 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3959 		so = SCTP_INP_SO(inp);
3960 		atomic_add_int(&stcb->asoc.refcnt, 1);
3961 		SCTP_TCB_UNLOCK(stcb);
3962 		SCTP_SOCKET_LOCK(so, 1);
3963 		SCTP_TCB_LOCK(stcb);
3964 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3965 #endif
3966 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3967 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3968 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3969 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3970 		}
3971 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3972 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3973 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3974 		SCTP_SOCKET_UNLOCK(so, 1);
3975 #endif
3976 	}
3977 }
3978 #ifdef SCTP_ASOCLOG_OF_TSNS
3979 void
3980 sctp_print_out_track_log(struct sctp_tcb *stcb)
3981 {
3982 #ifdef NOSIY_PRINTS
3983 	int i;
3984 
3985 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3986 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3987 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3988 		SCTP_PRINTF("None rcvd\n");
3989 		goto none_in;
3990 	}
3991 	if (stcb->asoc.tsn_in_wrapped) {
3992 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3993 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3994 			    stcb->asoc.in_tsnlog[i].tsn,
3995 			    stcb->asoc.in_tsnlog[i].strm,
3996 			    stcb->asoc.in_tsnlog[i].seq,
3997 			    stcb->asoc.in_tsnlog[i].flgs,
3998 			    stcb->asoc.in_tsnlog[i].sz);
3999 		}
4000 	}
4001 	if (stcb->asoc.tsn_in_at) {
4002 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4003 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4004 			    stcb->asoc.in_tsnlog[i].tsn,
4005 			    stcb->asoc.in_tsnlog[i].strm,
4006 			    stcb->asoc.in_tsnlog[i].seq,
4007 			    stcb->asoc.in_tsnlog[i].flgs,
4008 			    stcb->asoc.in_tsnlog[i].sz);
4009 		}
4010 	}
4011 none_in:
4012 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4013 	if ((stcb->asoc.tsn_out_at == 0) &&
4014 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4015 		SCTP_PRINTF("None sent\n");
4016 	}
4017 	if (stcb->asoc.tsn_out_wrapped) {
4018 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4019 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4020 			    stcb->asoc.out_tsnlog[i].tsn,
4021 			    stcb->asoc.out_tsnlog[i].strm,
4022 			    stcb->asoc.out_tsnlog[i].seq,
4023 			    stcb->asoc.out_tsnlog[i].flgs,
4024 			    stcb->asoc.out_tsnlog[i].sz);
4025 		}
4026 	}
4027 	if (stcb->asoc.tsn_out_at) {
4028 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4029 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4030 			    stcb->asoc.out_tsnlog[i].tsn,
4031 			    stcb->asoc.out_tsnlog[i].strm,
4032 			    stcb->asoc.out_tsnlog[i].seq,
4033 			    stcb->asoc.out_tsnlog[i].flgs,
4034 			    stcb->asoc.out_tsnlog[i].sz);
4035 		}
4036 	}
4037 #endif
4038 }
4039 #endif
4040 
4041 void
4042 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4043     struct mbuf *op_err,
4044     int so_locked
4045 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4046     SCTP_UNUSED
4047 #endif
4048 )
4049 {
4050 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4051 	struct socket *so;
4052 #endif
4053 
4054 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4055 	so = SCTP_INP_SO(inp);
4056 #endif
4057 	if (stcb == NULL) {
4058 		/* Got to have a TCB */
4059 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4060 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4061 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4062 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4063 			}
4064 		}
4065 		return;
4066 	} else {
4067 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4068 	}
4069 	/* notify the peer */
4070 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4071 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4072 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4073 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4074 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4075 	}
4076 	/* notify the ulp */
4077 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4078 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4079 	}
4080 	/* now free the asoc */
4081 #ifdef SCTP_ASOCLOG_OF_TSNS
4082 	sctp_print_out_track_log(stcb);
4083 #endif
4084 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4085 	if (!so_locked) {
4086 		atomic_add_int(&stcb->asoc.refcnt, 1);
4087 		SCTP_TCB_UNLOCK(stcb);
4088 		SCTP_SOCKET_LOCK(so, 1);
4089 		SCTP_TCB_LOCK(stcb);
4090 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4091 	}
4092 #endif
4093 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4094 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4095 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4096 	if (!so_locked) {
4097 		SCTP_SOCKET_UNLOCK(so, 1);
4098 	}
4099 #endif
4100 }
4101 
4102 void
4103 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4104     struct sockaddr *src, struct sockaddr *dst,
4105     struct sctphdr *sh, struct sctp_inpcb *inp,
4106     struct mbuf *cause,
4107     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4108     uint32_t vrf_id, uint16_t port)
4109 {
4110 	struct sctp_chunkhdr *ch, chunk_buf;
4111 	unsigned int chk_length;
4112 	int contains_init_chunk;
4113 
4114 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4115 	/* Generate a TO address for future reference */
4116 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4117 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4118 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4119 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4120 		}
4121 	}
4122 	contains_init_chunk = 0;
4123 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4124 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4125 	while (ch != NULL) {
4126 		chk_length = ntohs(ch->chunk_length);
4127 		if (chk_length < sizeof(*ch)) {
4128 			/* break to abort land */
4129 			break;
4130 		}
4131 		switch (ch->chunk_type) {
4132 		case SCTP_INIT:
4133 			contains_init_chunk = 1;
4134 			break;
4135 		case SCTP_PACKET_DROPPED:
4136 			/* we don't respond to pkt-dropped */
4137 			return;
4138 		case SCTP_ABORT_ASSOCIATION:
4139 			/* we don't respond with an ABORT to an ABORT */
4140 			return;
4141 		case SCTP_SHUTDOWN_COMPLETE:
4142 			/*
4143 			 * we ignore it since we are not waiting for it and
4144 			 * peer is gone
4145 			 */
4146 			return;
4147 		case SCTP_SHUTDOWN_ACK:
4148 			sctp_send_shutdown_complete2(src, dst, sh,
4149 			    mflowtype, mflowid, fibnum,
4150 			    vrf_id, port);
4151 			return;
4152 		default:
4153 			break;
4154 		}
4155 		offset += SCTP_SIZE32(chk_length);
4156 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4157 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4158 	}
4159 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4160 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4161 	    (contains_init_chunk == 0))) {
4162 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4163 		    mflowtype, mflowid, fibnum,
4164 		    vrf_id, port);
4165 	}
4166 }
4167 
4168 /*
4169  * check the inbound datagram to make sure there is not an abort inside it,
4170  * if there is return 1, else return 0.
4171  */
4172 int
4173 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4174 {
4175 	struct sctp_chunkhdr *ch;
4176 	struct sctp_init_chunk *init_chk, chunk_buf;
4177 	int offset;
4178 	unsigned int chk_length;
4179 
4180 	offset = iphlen + sizeof(struct sctphdr);
4181 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4182 	    (uint8_t *)&chunk_buf);
4183 	while (ch != NULL) {
4184 		chk_length = ntohs(ch->chunk_length);
4185 		if (chk_length < sizeof(*ch)) {
4186 			/* packet is probably corrupt */
4187 			break;
4188 		}
4189 		/* we seem to be ok, is it an abort? */
4190 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4191 			/* yep, tell them */
4192 			return (1);
4193 		}
4194 		if (ch->chunk_type == SCTP_INITIATION) {
4195 			/* need to update the Vtag */
4196 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4197 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4198 			if (init_chk != NULL) {
4199 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4200 			}
4201 		}
4202 		/* Nope, move to the next chunk */
4203 		offset += SCTP_SIZE32(chk_length);
4204 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4205 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4206 	}
4207 	return (0);
4208 }
4209 
4210 /*
4211  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4212  * set (i.e. it's 0) so, create this function to compare link local scopes
4213  */
4214 #ifdef INET6
4215 uint32_t
4216 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4217 {
4218 	struct sockaddr_in6 a, b;
4219 
4220 	/* save copies */
4221 	a = *addr1;
4222 	b = *addr2;
4223 
4224 	if (a.sin6_scope_id == 0)
4225 		if (sa6_recoverscope(&a)) {
4226 			/* can't get scope, so can't match */
4227 			return (0);
4228 		}
4229 	if (b.sin6_scope_id == 0)
4230 		if (sa6_recoverscope(&b)) {
4231 			/* can't get scope, so can't match */
4232 			return (0);
4233 		}
4234 	if (a.sin6_scope_id != b.sin6_scope_id)
4235 		return (0);
4236 
4237 	return (1);
4238 }
4239 
4240 /*
4241  * returns a sockaddr_in6 with embedded scope recovered and removed
4242  */
4243 struct sockaddr_in6 *
4244 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4245 {
4246 	/* check and strip embedded scope junk */
4247 	if (addr->sin6_family == AF_INET6) {
4248 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4249 			if (addr->sin6_scope_id == 0) {
4250 				*store = *addr;
4251 				if (!sa6_recoverscope(store)) {
4252 					/* use the recovered scope */
4253 					addr = store;
4254 				}
4255 			} else {
4256 				/* else, return the original "to" addr */
4257 				in6_clearscope(&addr->sin6_addr);
4258 			}
4259 		}
4260 	}
4261 	return (addr);
4262 }
4263 #endif
4264 
4265 /*
4266  * are the two addresses the same?  currently a "scopeless" check returns: 1
4267  * if same, 0 if not
4268  */
4269 int
4270 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4271 {
4272 
4273 	/* must be valid */
4274 	if (sa1 == NULL || sa2 == NULL)
4275 		return (0);
4276 
4277 	/* must be the same family */
4278 	if (sa1->sa_family != sa2->sa_family)
4279 		return (0);
4280 
4281 	switch (sa1->sa_family) {
4282 #ifdef INET6
4283 	case AF_INET6:
4284 		{
4285 			/* IPv6 addresses */
4286 			struct sockaddr_in6 *sin6_1, *sin6_2;
4287 
4288 			sin6_1 = (struct sockaddr_in6 *)sa1;
4289 			sin6_2 = (struct sockaddr_in6 *)sa2;
4290 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4291 			    sin6_2));
4292 		}
4293 #endif
4294 #ifdef INET
4295 	case AF_INET:
4296 		{
4297 			/* IPv4 addresses */
4298 			struct sockaddr_in *sin_1, *sin_2;
4299 
4300 			sin_1 = (struct sockaddr_in *)sa1;
4301 			sin_2 = (struct sockaddr_in *)sa2;
4302 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4303 		}
4304 #endif
4305 	default:
4306 		/* we don't do these... */
4307 		return (0);
4308 	}
4309 }
4310 
4311 void
4312 sctp_print_address(struct sockaddr *sa)
4313 {
4314 #ifdef INET6
4315 	char ip6buf[INET6_ADDRSTRLEN];
4316 #endif
4317 
4318 	switch (sa->sa_family) {
4319 #ifdef INET6
4320 	case AF_INET6:
4321 		{
4322 			struct sockaddr_in6 *sin6;
4323 
4324 			sin6 = (struct sockaddr_in6 *)sa;
4325 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4326 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4327 			    ntohs(sin6->sin6_port),
4328 			    sin6->sin6_scope_id);
4329 			break;
4330 		}
4331 #endif
4332 #ifdef INET
4333 	case AF_INET:
4334 		{
4335 			struct sockaddr_in *sin;
4336 			unsigned char *p;
4337 
4338 			sin = (struct sockaddr_in *)sa;
4339 			p = (unsigned char *)&sin->sin_addr;
4340 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4341 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4342 			break;
4343 		}
4344 #endif
4345 	default:
4346 		SCTP_PRINTF("?\n");
4347 		break;
4348 	}
4349 }
4350 
4351 void
4352 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4353     struct sctp_inpcb *new_inp,
4354     struct sctp_tcb *stcb,
4355     int waitflags)
4356 {
4357 	/*
4358 	 * go through our old INP and pull off any control structures that
4359 	 * belong to stcb and move then to the new inp.
4360 	 */
4361 	struct socket *old_so, *new_so;
4362 	struct sctp_queued_to_read *control, *nctl;
4363 	struct sctp_readhead tmp_queue;
4364 	struct mbuf *m;
4365 	int error = 0;
4366 
4367 	old_so = old_inp->sctp_socket;
4368 	new_so = new_inp->sctp_socket;
4369 	TAILQ_INIT(&tmp_queue);
4370 	error = sblock(&old_so->so_rcv, waitflags);
4371 	if (error) {
4372 		/*
4373 		 * Gak, can't get sblock, we have a problem. data will be
4374 		 * left stranded.. and we don't dare look at it since the
4375 		 * other thread may be reading something. Oh well, its a
4376 		 * screwed up app that does a peeloff OR a accept while
4377 		 * reading from the main socket... actually its only the
4378 		 * peeloff() case, since I think read will fail on a
4379 		 * listening socket..
4380 		 */
4381 		return;
4382 	}
4383 	/* lock the socket buffers */
4384 	SCTP_INP_READ_LOCK(old_inp);
4385 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4386 		/* Pull off all for out target stcb */
4387 		if (control->stcb == stcb) {
4388 			/* remove it we want it */
4389 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4390 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4391 			m = control->data;
4392 			while (m) {
4393 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4394 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4395 				}
4396 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4397 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4398 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4399 				}
4400 				m = SCTP_BUF_NEXT(m);
4401 			}
4402 		}
4403 	}
4404 	SCTP_INP_READ_UNLOCK(old_inp);
4405 	/* Remove the sb-lock on the old socket */
4406 
4407 	sbunlock(&old_so->so_rcv);
4408 	/* Now we move them over to the new socket buffer */
4409 	SCTP_INP_READ_LOCK(new_inp);
4410 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4411 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4412 		m = control->data;
4413 		while (m) {
4414 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4415 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4416 			}
4417 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4418 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4419 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4420 			}
4421 			m = SCTP_BUF_NEXT(m);
4422 		}
4423 	}
4424 	SCTP_INP_READ_UNLOCK(new_inp);
4425 }
4426 
4427 void
4428 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4429     struct sctp_tcb *stcb,
4430     int so_locked
4431 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4432     SCTP_UNUSED
4433 #endif
4434 )
4435 {
4436 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4437 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4438 		struct socket *so;
4439 
4440 		so = SCTP_INP_SO(inp);
4441 		if (!so_locked) {
4442 			if (stcb) {
4443 				atomic_add_int(&stcb->asoc.refcnt, 1);
4444 				SCTP_TCB_UNLOCK(stcb);
4445 			}
4446 			SCTP_SOCKET_LOCK(so, 1);
4447 			if (stcb) {
4448 				SCTP_TCB_LOCK(stcb);
4449 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4450 			}
4451 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4452 				SCTP_SOCKET_UNLOCK(so, 1);
4453 				return;
4454 			}
4455 		}
4456 #endif
4457 		sctp_sorwakeup(inp, inp->sctp_socket);
4458 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4459 		if (!so_locked) {
4460 			SCTP_SOCKET_UNLOCK(so, 1);
4461 		}
4462 #endif
4463 	}
4464 }
4465 
4466 void
4467 sctp_add_to_readq(struct sctp_inpcb *inp,
4468     struct sctp_tcb *stcb,
4469     struct sctp_queued_to_read *control,
4470     struct sockbuf *sb,
4471     int end,
4472     int inp_read_lock_held,
4473     int so_locked
4474 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4475     SCTP_UNUSED
4476 #endif
4477 )
4478 {
4479 	/*
4480 	 * Here we must place the control on the end of the socket read
4481 	 * queue AND increment sb_cc so that select will work properly on
4482 	 * read.
4483 	 */
4484 	struct mbuf *m, *prev = NULL;
4485 
4486 	if (inp == NULL) {
4487 		/* Gak, TSNH!! */
4488 #ifdef INVARIANTS
4489 		panic("Gak, inp NULL on add_to_readq");
4490 #endif
4491 		return;
4492 	}
4493 	if (inp_read_lock_held == 0)
4494 		SCTP_INP_READ_LOCK(inp);
4495 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4496 		sctp_free_remote_addr(control->whoFrom);
4497 		if (control->data) {
4498 			sctp_m_freem(control->data);
4499 			control->data = NULL;
4500 		}
4501 		sctp_free_a_readq(stcb, control);
4502 		if (inp_read_lock_held == 0)
4503 			SCTP_INP_READ_UNLOCK(inp);
4504 		return;
4505 	}
4506 	if (!(control->spec_flags & M_NOTIFICATION)) {
4507 		atomic_add_int(&inp->total_recvs, 1);
4508 		if (!control->do_not_ref_stcb) {
4509 			atomic_add_int(&stcb->total_recvs, 1);
4510 		}
4511 	}
4512 	m = control->data;
4513 	control->held_length = 0;
4514 	control->length = 0;
4515 	while (m) {
4516 		if (SCTP_BUF_LEN(m) == 0) {
4517 			/* Skip mbufs with NO length */
4518 			if (prev == NULL) {
4519 				/* First one */
4520 				control->data = sctp_m_free(m);
4521 				m = control->data;
4522 			} else {
4523 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4524 				m = SCTP_BUF_NEXT(prev);
4525 			}
4526 			if (m == NULL) {
4527 				control->tail_mbuf = prev;
4528 			}
4529 			continue;
4530 		}
4531 		prev = m;
4532 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4533 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4534 		}
4535 		sctp_sballoc(stcb, sb, m);
4536 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4537 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4538 		}
4539 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4540 		m = SCTP_BUF_NEXT(m);
4541 	}
4542 	if (prev != NULL) {
4543 		control->tail_mbuf = prev;
4544 	} else {
4545 		/* Everything got collapsed out?? */
4546 		sctp_free_remote_addr(control->whoFrom);
4547 		sctp_free_a_readq(stcb, control);
4548 		if (inp_read_lock_held == 0)
4549 			SCTP_INP_READ_UNLOCK(inp);
4550 		return;
4551 	}
4552 	if (end) {
4553 		control->end_added = 1;
4554 	}
4555 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4556 	control->on_read_q = 1;
4557 	if (inp_read_lock_held == 0)
4558 		SCTP_INP_READ_UNLOCK(inp);
4559 	if (inp && inp->sctp_socket) {
4560 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4561 	}
4562 }
4563 
4564 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4565  *************ALTERNATE ROUTING CODE
4566  */
4567 
4568 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4569  *************ALTERNATE ROUTING CODE
4570  */
4571 
4572 struct mbuf *
4573 sctp_generate_cause(uint16_t code, char *info)
4574 {
4575 	struct mbuf *m;
4576 	struct sctp_gen_error_cause *cause;
4577 	size_t info_len;
4578 	uint16_t len;
4579 
4580 	if ((code == 0) || (info == NULL)) {
4581 		return (NULL);
4582 	}
4583 	info_len = strlen(info);
4584 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4585 		return (NULL);
4586 	}
4587 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4588 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4589 	if (m != NULL) {
4590 		SCTP_BUF_LEN(m) = len;
4591 		cause = mtod(m, struct sctp_gen_error_cause *);
4592 		cause->code = htons(code);
4593 		cause->length = htons(len);
4594 		memcpy(cause->info, info, info_len);
4595 	}
4596 	return (m);
4597 }
4598 
4599 struct mbuf *
4600 sctp_generate_no_user_data_cause(uint32_t tsn)
4601 {
4602 	struct mbuf *m;
4603 	struct sctp_error_no_user_data *no_user_data_cause;
4604 	uint16_t len;
4605 
4606 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4607 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4608 	if (m != NULL) {
4609 		SCTP_BUF_LEN(m) = len;
4610 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4611 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4612 		no_user_data_cause->cause.length = htons(len);
4613 		no_user_data_cause->tsn = htonl(tsn);
4614 	}
4615 	return (m);
4616 }
4617 
4618 #ifdef SCTP_MBCNT_LOGGING
4619 void
4620 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4621     struct sctp_tmit_chunk *tp1, int chk_cnt)
4622 {
4623 	if (tp1->data == NULL) {
4624 		return;
4625 	}
4626 	asoc->chunks_on_out_queue -= chk_cnt;
4627 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4628 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4629 		    asoc->total_output_queue_size,
4630 		    tp1->book_size,
4631 		    0,
4632 		    tp1->mbcnt);
4633 	}
4634 	if (asoc->total_output_queue_size >= tp1->book_size) {
4635 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4636 	} else {
4637 		asoc->total_output_queue_size = 0;
4638 	}
4639 
4640 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4641 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4642 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4643 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4644 		} else {
4645 			stcb->sctp_socket->so_snd.sb_cc = 0;
4646 
4647 		}
4648 	}
4649 }
4650 
4651 #endif
4652 
4653 int
4654 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4655     uint8_t sent, int so_locked
4656 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4657     SCTP_UNUSED
4658 #endif
4659 )
4660 {
4661 	struct sctp_stream_out *strq;
4662 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4663 	struct sctp_stream_queue_pending *sp;
4664 	uint32_t mid;
4665 	uint16_t sid;
4666 	uint8_t foundeom = 0;
4667 	int ret_sz = 0;
4668 	int notdone;
4669 	int do_wakeup_routine = 0;
4670 
4671 	sid = tp1->rec.data.sid;
4672 	mid = tp1->rec.data.mid;
4673 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4674 		stcb->asoc.abandoned_sent[0]++;
4675 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4676 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4677 #if defined(SCTP_DETAILED_STR_STATS)
4678 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4679 #endif
4680 	} else {
4681 		stcb->asoc.abandoned_unsent[0]++;
4682 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4683 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4684 #if defined(SCTP_DETAILED_STR_STATS)
4685 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4686 #endif
4687 	}
4688 	do {
4689 		ret_sz += tp1->book_size;
4690 		if (tp1->data != NULL) {
4691 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4692 				sctp_flight_size_decrease(tp1);
4693 				sctp_total_flight_decrease(stcb, tp1);
4694 			}
4695 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4696 			stcb->asoc.peers_rwnd += tp1->send_size;
4697 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4698 			if (sent) {
4699 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4700 			} else {
4701 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4702 			}
4703 			if (tp1->data) {
4704 				sctp_m_freem(tp1->data);
4705 				tp1->data = NULL;
4706 			}
4707 			do_wakeup_routine = 1;
4708 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4709 				stcb->asoc.sent_queue_cnt_removeable--;
4710 			}
4711 		}
4712 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4713 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4714 		    SCTP_DATA_NOT_FRAG) {
4715 			/* not frag'ed we ae done   */
4716 			notdone = 0;
4717 			foundeom = 1;
4718 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4719 			/* end of frag, we are done */
4720 			notdone = 0;
4721 			foundeom = 1;
4722 		} else {
4723 			/*
4724 			 * Its a begin or middle piece, we must mark all of
4725 			 * it
4726 			 */
4727 			notdone = 1;
4728 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4729 		}
4730 	} while (tp1 && notdone);
4731 	if (foundeom == 0) {
4732 		/*
4733 		 * The multi-part message was scattered across the send and
4734 		 * sent queue.
4735 		 */
4736 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4737 			if ((tp1->rec.data.sid != sid) ||
4738 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4739 				break;
4740 			}
4741 			/*
4742 			 * save to chk in case we have some on stream out
4743 			 * queue. If so and we have an un-transmitted one we
4744 			 * don't have to fudge the TSN.
4745 			 */
4746 			chk = tp1;
4747 			ret_sz += tp1->book_size;
4748 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4749 			if (sent) {
4750 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4751 			} else {
4752 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4753 			}
4754 			if (tp1->data) {
4755 				sctp_m_freem(tp1->data);
4756 				tp1->data = NULL;
4757 			}
4758 			/* No flight involved here book the size to 0 */
4759 			tp1->book_size = 0;
4760 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4761 				foundeom = 1;
4762 			}
4763 			do_wakeup_routine = 1;
4764 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4765 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4766 			/*
4767 			 * on to the sent queue so we can wait for it to be
4768 			 * passed by.
4769 			 */
4770 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4771 			    sctp_next);
4772 			stcb->asoc.send_queue_cnt--;
4773 			stcb->asoc.sent_queue_cnt++;
4774 		}
4775 	}
4776 	if (foundeom == 0) {
4777 		/*
4778 		 * Still no eom found. That means there is stuff left on the
4779 		 * stream out queue.. yuck.
4780 		 */
4781 		SCTP_TCB_SEND_LOCK(stcb);
4782 		strq = &stcb->asoc.strmout[sid];
4783 		sp = TAILQ_FIRST(&strq->outqueue);
4784 		if (sp != NULL) {
4785 			sp->discard_rest = 1;
4786 			/*
4787 			 * We may need to put a chunk on the queue that
4788 			 * holds the TSN that would have been sent with the
4789 			 * LAST bit.
4790 			 */
4791 			if (chk == NULL) {
4792 				/* Yep, we have to */
4793 				sctp_alloc_a_chunk(stcb, chk);
4794 				if (chk == NULL) {
4795 					/*
4796 					 * we are hosed. All we can do is
4797 					 * nothing.. which will cause an
4798 					 * abort if the peer is paying
4799 					 * attention.
4800 					 */
4801 					goto oh_well;
4802 				}
4803 				memset(chk, 0, sizeof(*chk));
4804 				chk->rec.data.rcv_flags = 0;
4805 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4806 				chk->asoc = &stcb->asoc;
4807 				if (stcb->asoc.idata_supported == 0) {
4808 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4809 						chk->rec.data.mid = 0;
4810 					} else {
4811 						chk->rec.data.mid = strq->next_mid_ordered;
4812 					}
4813 				} else {
4814 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4815 						chk->rec.data.mid = strq->next_mid_unordered;
4816 					} else {
4817 						chk->rec.data.mid = strq->next_mid_ordered;
4818 					}
4819 				}
4820 				chk->rec.data.sid = sp->sid;
4821 				chk->rec.data.ppid = sp->ppid;
4822 				chk->rec.data.context = sp->context;
4823 				chk->flags = sp->act_flags;
4824 				chk->whoTo = NULL;
4825 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4826 				strq->chunks_on_queues++;
4827 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4828 				stcb->asoc.sent_queue_cnt++;
4829 				stcb->asoc.pr_sctp_cnt++;
4830 			}
4831 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4832 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4833 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4834 			}
4835 			if (stcb->asoc.idata_supported == 0) {
4836 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4837 					strq->next_mid_ordered++;
4838 				}
4839 			} else {
4840 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4841 					strq->next_mid_unordered++;
4842 				} else {
4843 					strq->next_mid_ordered++;
4844 				}
4845 			}
4846 	oh_well:
4847 			if (sp->data) {
4848 				/*
4849 				 * Pull any data to free up the SB and allow
4850 				 * sender to "add more" while we will throw
4851 				 * away :-)
4852 				 */
4853 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4854 				ret_sz += sp->length;
4855 				do_wakeup_routine = 1;
4856 				sp->some_taken = 1;
4857 				sctp_m_freem(sp->data);
4858 				sp->data = NULL;
4859 				sp->tail_mbuf = NULL;
4860 				sp->length = 0;
4861 			}
4862 		}
4863 		SCTP_TCB_SEND_UNLOCK(stcb);
4864 	}
4865 	if (do_wakeup_routine) {
4866 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4867 		struct socket *so;
4868 
4869 		so = SCTP_INP_SO(stcb->sctp_ep);
4870 		if (!so_locked) {
4871 			atomic_add_int(&stcb->asoc.refcnt, 1);
4872 			SCTP_TCB_UNLOCK(stcb);
4873 			SCTP_SOCKET_LOCK(so, 1);
4874 			SCTP_TCB_LOCK(stcb);
4875 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4876 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4877 				/* assoc was freed while we were unlocked */
4878 				SCTP_SOCKET_UNLOCK(so, 1);
4879 				return (ret_sz);
4880 			}
4881 		}
4882 #endif
4883 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4885 		if (!so_locked) {
4886 			SCTP_SOCKET_UNLOCK(so, 1);
4887 		}
4888 #endif
4889 	}
4890 	return (ret_sz);
4891 }
4892 
4893 /*
4894  * checks to see if the given address, sa, is one that is currently known by
4895  * the kernel note: can't distinguish the same address on multiple interfaces
4896  * and doesn't handle multiple addresses with different zone/scope id's note:
4897  * ifa_ifwithaddr() compares the entire sockaddr struct
4898  */
4899 struct sctp_ifa *
4900 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4901     int holds_lock)
4902 {
4903 	struct sctp_laddr *laddr;
4904 
4905 	if (holds_lock == 0) {
4906 		SCTP_INP_RLOCK(inp);
4907 	}
4908 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4909 		if (laddr->ifa == NULL)
4910 			continue;
4911 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4912 			continue;
4913 #ifdef INET
4914 		if (addr->sa_family == AF_INET) {
4915 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4916 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4917 				/* found him. */
4918 				if (holds_lock == 0) {
4919 					SCTP_INP_RUNLOCK(inp);
4920 				}
4921 				return (laddr->ifa);
4922 				break;
4923 			}
4924 		}
4925 #endif
4926 #ifdef INET6
4927 		if (addr->sa_family == AF_INET6) {
4928 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4929 			    &laddr->ifa->address.sin6)) {
4930 				/* found him. */
4931 				if (holds_lock == 0) {
4932 					SCTP_INP_RUNLOCK(inp);
4933 				}
4934 				return (laddr->ifa);
4935 				break;
4936 			}
4937 		}
4938 #endif
4939 	}
4940 	if (holds_lock == 0) {
4941 		SCTP_INP_RUNLOCK(inp);
4942 	}
4943 	return (NULL);
4944 }
4945 
4946 uint32_t
4947 sctp_get_ifa_hash_val(struct sockaddr *addr)
4948 {
4949 	switch (addr->sa_family) {
4950 #ifdef INET
4951 	case AF_INET:
4952 		{
4953 			struct sockaddr_in *sin;
4954 
4955 			sin = (struct sockaddr_in *)addr;
4956 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4957 		}
4958 #endif
4959 #ifdef INET6
4960 	case AF_INET6:
4961 		{
4962 			struct sockaddr_in6 *sin6;
4963 			uint32_t hash_of_addr;
4964 
4965 			sin6 = (struct sockaddr_in6 *)addr;
4966 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4967 			    sin6->sin6_addr.s6_addr32[1] +
4968 			    sin6->sin6_addr.s6_addr32[2] +
4969 			    sin6->sin6_addr.s6_addr32[3]);
4970 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4971 			return (hash_of_addr);
4972 		}
4973 #endif
4974 	default:
4975 		break;
4976 	}
4977 	return (0);
4978 }
4979 
4980 struct sctp_ifa *
4981 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4982 {
4983 	struct sctp_ifa *sctp_ifap;
4984 	struct sctp_vrf *vrf;
4985 	struct sctp_ifalist *hash_head;
4986 	uint32_t hash_of_addr;
4987 
4988 	if (holds_lock == 0)
4989 		SCTP_IPI_ADDR_RLOCK();
4990 
4991 	vrf = sctp_find_vrf(vrf_id);
4992 	if (vrf == NULL) {
4993 		if (holds_lock == 0)
4994 			SCTP_IPI_ADDR_RUNLOCK();
4995 		return (NULL);
4996 	}
4997 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4998 
4999 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5000 	if (hash_head == NULL) {
5001 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5002 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5003 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5004 		sctp_print_address(addr);
5005 		SCTP_PRINTF("No such bucket for address\n");
5006 		if (holds_lock == 0)
5007 			SCTP_IPI_ADDR_RUNLOCK();
5008 
5009 		return (NULL);
5010 	}
5011 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5012 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5013 			continue;
5014 #ifdef INET
5015 		if (addr->sa_family == AF_INET) {
5016 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5017 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5018 				/* found him. */
5019 				if (holds_lock == 0)
5020 					SCTP_IPI_ADDR_RUNLOCK();
5021 				return (sctp_ifap);
5022 				break;
5023 			}
5024 		}
5025 #endif
5026 #ifdef INET6
5027 		if (addr->sa_family == AF_INET6) {
5028 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5029 			    &sctp_ifap->address.sin6)) {
5030 				/* found him. */
5031 				if (holds_lock == 0)
5032 					SCTP_IPI_ADDR_RUNLOCK();
5033 				return (sctp_ifap);
5034 				break;
5035 			}
5036 		}
5037 #endif
5038 	}
5039 	if (holds_lock == 0)
5040 		SCTP_IPI_ADDR_RUNLOCK();
5041 	return (NULL);
5042 }
5043 
5044 static void
5045 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5046     uint32_t rwnd_req)
5047 {
5048 	/* User pulled some data, do we need a rwnd update? */
5049 	int r_unlocked = 0;
5050 	uint32_t dif, rwnd;
5051 	struct socket *so = NULL;
5052 
5053 	if (stcb == NULL)
5054 		return;
5055 
5056 	atomic_add_int(&stcb->asoc.refcnt, 1);
5057 
5058 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5059 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5060 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5061 		/* Pre-check If we are freeing no update */
5062 		goto no_lock;
5063 	}
5064 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5065 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5066 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5067 		goto out;
5068 	}
5069 	so = stcb->sctp_socket;
5070 	if (so == NULL) {
5071 		goto out;
5072 	}
5073 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5074 	/* Have you have freed enough to look */
5075 	*freed_so_far = 0;
5076 	/* Yep, its worth a look and the lock overhead */
5077 
5078 	/* Figure out what the rwnd would be */
5079 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5080 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5081 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5082 	} else {
5083 		dif = 0;
5084 	}
5085 	if (dif >= rwnd_req) {
5086 		if (hold_rlock) {
5087 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5088 			r_unlocked = 1;
5089 		}
5090 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5091 			/*
5092 			 * One last check before we allow the guy possibly
5093 			 * to get in. There is a race, where the guy has not
5094 			 * reached the gate. In that case
5095 			 */
5096 			goto out;
5097 		}
5098 		SCTP_TCB_LOCK(stcb);
5099 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5100 			/* No reports here */
5101 			SCTP_TCB_UNLOCK(stcb);
5102 			goto out;
5103 		}
5104 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5105 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5106 
5107 		sctp_chunk_output(stcb->sctp_ep, stcb,
5108 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5109 		/* make sure no timer is running */
5110 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5111 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5112 		SCTP_TCB_UNLOCK(stcb);
5113 	} else {
5114 		/* Update how much we have pending */
5115 		stcb->freed_by_sorcv_sincelast = dif;
5116 	}
5117 out:
5118 	if (so && r_unlocked && hold_rlock) {
5119 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5120 	}
5121 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5122 no_lock:
5123 	atomic_add_int(&stcb->asoc.refcnt, -1);
5124 	return;
5125 }
5126 
5127 int
5128 sctp_sorecvmsg(struct socket *so,
5129     struct uio *uio,
5130     struct mbuf **mp,
5131     struct sockaddr *from,
5132     int fromlen,
5133     int *msg_flags,
5134     struct sctp_sndrcvinfo *sinfo,
5135     int filling_sinfo)
5136 {
5137 	/*
5138 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5139 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5140 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5141 	 * On the way out we may send out any combination of:
5142 	 * MSG_NOTIFICATION MSG_EOR
5143 	 *
5144 	 */
5145 	struct sctp_inpcb *inp = NULL;
5146 	int my_len = 0;
5147 	int cp_len = 0, error = 0;
5148 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5149 	struct mbuf *m = NULL;
5150 	struct sctp_tcb *stcb = NULL;
5151 	int wakeup_read_socket = 0;
5152 	int freecnt_applied = 0;
5153 	int out_flags = 0, in_flags = 0;
5154 	int block_allowed = 1;
5155 	uint32_t freed_so_far = 0;
5156 	uint32_t copied_so_far = 0;
5157 	int in_eeor_mode = 0;
5158 	int no_rcv_needed = 0;
5159 	uint32_t rwnd_req = 0;
5160 	int hold_sblock = 0;
5161 	int hold_rlock = 0;
5162 	ssize_t slen = 0;
5163 	uint32_t held_length = 0;
5164 	int sockbuf_lock = 0;
5165 
5166 	if (uio == NULL) {
5167 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5168 		return (EINVAL);
5169 	}
5170 	if (msg_flags) {
5171 		in_flags = *msg_flags;
5172 		if (in_flags & MSG_PEEK)
5173 			SCTP_STAT_INCR(sctps_read_peeks);
5174 	} else {
5175 		in_flags = 0;
5176 	}
5177 	slen = uio->uio_resid;
5178 
5179 	/* Pull in and set up our int flags */
5180 	if (in_flags & MSG_OOB) {
5181 		/* Out of band's NOT supported */
5182 		return (EOPNOTSUPP);
5183 	}
5184 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5185 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5186 		return (EINVAL);
5187 	}
5188 	if ((in_flags & (MSG_DONTWAIT
5189 	    | MSG_NBIO
5190 	    )) ||
5191 	    SCTP_SO_IS_NBIO(so)) {
5192 		block_allowed = 0;
5193 	}
5194 	/* setup the endpoint */
5195 	inp = (struct sctp_inpcb *)so->so_pcb;
5196 	if (inp == NULL) {
5197 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5198 		return (EFAULT);
5199 	}
5200 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5201 	/* Must be at least a MTU's worth */
5202 	if (rwnd_req < SCTP_MIN_RWND)
5203 		rwnd_req = SCTP_MIN_RWND;
5204 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5205 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5206 		sctp_misc_ints(SCTP_SORECV_ENTER,
5207 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5208 	}
5209 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5210 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5211 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5212 	}
5213 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5214 	if (error) {
5215 		goto release_unlocked;
5216 	}
5217 	sockbuf_lock = 1;
5218 restart:
5219 
5220 
5221 restart_nosblocks:
5222 	if (hold_sblock == 0) {
5223 		SOCKBUF_LOCK(&so->so_rcv);
5224 		hold_sblock = 1;
5225 	}
5226 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5227 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5228 		goto out;
5229 	}
5230 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5231 		if (so->so_error) {
5232 			error = so->so_error;
5233 			if ((in_flags & MSG_PEEK) == 0)
5234 				so->so_error = 0;
5235 			goto out;
5236 		} else {
5237 			if (so->so_rcv.sb_cc == 0) {
5238 				/* indicate EOF */
5239 				error = 0;
5240 				goto out;
5241 			}
5242 		}
5243 	}
5244 	if (so->so_rcv.sb_cc <= held_length) {
5245 		if (so->so_error) {
5246 			error = so->so_error;
5247 			if ((in_flags & MSG_PEEK) == 0) {
5248 				so->so_error = 0;
5249 			}
5250 			goto out;
5251 		}
5252 		if ((so->so_rcv.sb_cc == 0) &&
5253 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5254 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5255 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5256 				/*
5257 				 * For active open side clear flags for
5258 				 * re-use passive open is blocked by
5259 				 * connect.
5260 				 */
5261 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5262 					/*
5263 					 * You were aborted, passive side
5264 					 * always hits here
5265 					 */
5266 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5267 					error = ECONNRESET;
5268 				}
5269 				so->so_state &= ~(SS_ISCONNECTING |
5270 				    SS_ISDISCONNECTING |
5271 				    SS_ISCONFIRMING |
5272 				    SS_ISCONNECTED);
5273 				if (error == 0) {
5274 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5275 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5276 						error = ENOTCONN;
5277 					}
5278 				}
5279 				goto out;
5280 			}
5281 		}
5282 		if (block_allowed) {
5283 			error = sbwait(&so->so_rcv);
5284 			if (error) {
5285 				goto out;
5286 			}
5287 			held_length = 0;
5288 			goto restart_nosblocks;
5289 		} else {
5290 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5291 			error = EWOULDBLOCK;
5292 			goto out;
5293 		}
5294 	}
5295 	if (hold_sblock == 1) {
5296 		SOCKBUF_UNLOCK(&so->so_rcv);
5297 		hold_sblock = 0;
5298 	}
5299 	/* we possibly have data we can read */
5300 	/* sa_ignore FREED_MEMORY */
5301 	control = TAILQ_FIRST(&inp->read_queue);
5302 	if (control == NULL) {
5303 		/*
5304 		 * This could be happening since the appender did the
5305 		 * increment but as not yet did the tailq insert onto the
5306 		 * read_queue
5307 		 */
5308 		if (hold_rlock == 0) {
5309 			SCTP_INP_READ_LOCK(inp);
5310 		}
5311 		control = TAILQ_FIRST(&inp->read_queue);
5312 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5313 #ifdef INVARIANTS
5314 			panic("Huh, its non zero and nothing on control?");
5315 #endif
5316 			so->so_rcv.sb_cc = 0;
5317 		}
5318 		SCTP_INP_READ_UNLOCK(inp);
5319 		hold_rlock = 0;
5320 		goto restart;
5321 	}
5322 	if ((control->length == 0) &&
5323 	    (control->do_not_ref_stcb)) {
5324 		/*
5325 		 * Clean up code for freeing assoc that left behind a
5326 		 * pdapi.. maybe a peer in EEOR that just closed after
5327 		 * sending and never indicated a EOR.
5328 		 */
5329 		if (hold_rlock == 0) {
5330 			hold_rlock = 1;
5331 			SCTP_INP_READ_LOCK(inp);
5332 		}
5333 		control->held_length = 0;
5334 		if (control->data) {
5335 			/* Hmm there is data here .. fix */
5336 			struct mbuf *m_tmp;
5337 			int cnt = 0;
5338 
5339 			m_tmp = control->data;
5340 			while (m_tmp) {
5341 				cnt += SCTP_BUF_LEN(m_tmp);
5342 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5343 					control->tail_mbuf = m_tmp;
5344 					control->end_added = 1;
5345 				}
5346 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5347 			}
5348 			control->length = cnt;
5349 		} else {
5350 			/* remove it */
5351 			TAILQ_REMOVE(&inp->read_queue, control, next);
5352 			/* Add back any hiddend data */
5353 			sctp_free_remote_addr(control->whoFrom);
5354 			sctp_free_a_readq(stcb, control);
5355 		}
5356 		if (hold_rlock) {
5357 			hold_rlock = 0;
5358 			SCTP_INP_READ_UNLOCK(inp);
5359 		}
5360 		goto restart;
5361 	}
5362 	if ((control->length == 0) &&
5363 	    (control->end_added == 1)) {
5364 		/*
5365 		 * Do we also need to check for (control->pdapi_aborted ==
5366 		 * 1)?
5367 		 */
5368 		if (hold_rlock == 0) {
5369 			hold_rlock = 1;
5370 			SCTP_INP_READ_LOCK(inp);
5371 		}
5372 		TAILQ_REMOVE(&inp->read_queue, control, next);
5373 		if (control->data) {
5374 #ifdef INVARIANTS
5375 			panic("control->data not null but control->length == 0");
5376 #else
5377 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5378 			sctp_m_freem(control->data);
5379 			control->data = NULL;
5380 #endif
5381 		}
5382 		if (control->aux_data) {
5383 			sctp_m_free(control->aux_data);
5384 			control->aux_data = NULL;
5385 		}
5386 #ifdef INVARIANTS
5387 		if (control->on_strm_q) {
5388 			panic("About to free ctl:%p so:%p and its in %d",
5389 			    control, so, control->on_strm_q);
5390 		}
5391 #endif
5392 		sctp_free_remote_addr(control->whoFrom);
5393 		sctp_free_a_readq(stcb, control);
5394 		if (hold_rlock) {
5395 			hold_rlock = 0;
5396 			SCTP_INP_READ_UNLOCK(inp);
5397 		}
5398 		goto restart;
5399 	}
5400 	if (control->length == 0) {
5401 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5402 		    (filling_sinfo)) {
5403 			/* find a more suitable one then this */
5404 			ctl = TAILQ_NEXT(control, next);
5405 			while (ctl) {
5406 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5407 				    (ctl->some_taken ||
5408 				    (ctl->spec_flags & M_NOTIFICATION) ||
5409 				    ((ctl->do_not_ref_stcb == 0) &&
5410 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5411 				    ) {
5412 					/*-
5413 					 * If we have a different TCB next, and there is data
5414 					 * present. If we have already taken some (pdapi), OR we can
5415 					 * ref the tcb and no delivery as started on this stream, we
5416 					 * take it. Note we allow a notification on a different
5417 					 * assoc to be delivered..
5418 					 */
5419 					control = ctl;
5420 					goto found_one;
5421 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5422 					    (ctl->length) &&
5423 					    ((ctl->some_taken) ||
5424 					    ((ctl->do_not_ref_stcb == 0) &&
5425 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5426 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5427 					/*-
5428 					 * If we have the same tcb, and there is data present, and we
5429 					 * have the strm interleave feature present. Then if we have
5430 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5431 					 * not started a delivery for this stream, we can take it.
5432 					 * Note we do NOT allow a notificaiton on the same assoc to
5433 					 * be delivered.
5434 					 */
5435 					control = ctl;
5436 					goto found_one;
5437 				}
5438 				ctl = TAILQ_NEXT(ctl, next);
5439 			}
5440 		}
5441 		/*
5442 		 * if we reach here, not suitable replacement is available
5443 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5444 		 * into the our held count, and its time to sleep again.
5445 		 */
5446 		held_length = so->so_rcv.sb_cc;
5447 		control->held_length = so->so_rcv.sb_cc;
5448 		goto restart;
5449 	}
5450 	/* Clear the held length since there is something to read */
5451 	control->held_length = 0;
5452 found_one:
5453 	/*
5454 	 * If we reach here, control has a some data for us to read off.
5455 	 * Note that stcb COULD be NULL.
5456 	 */
5457 	if (hold_rlock == 0) {
5458 		hold_rlock = 1;
5459 		SCTP_INP_READ_LOCK(inp);
5460 	}
5461 	control->some_taken++;
5462 	stcb = control->stcb;
5463 	if (stcb) {
5464 		if ((control->do_not_ref_stcb == 0) &&
5465 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5466 			if (freecnt_applied == 0)
5467 				stcb = NULL;
5468 		} else if (control->do_not_ref_stcb == 0) {
5469 			/* you can't free it on me please */
5470 			/*
5471 			 * The lock on the socket buffer protects us so the
5472 			 * free code will stop. But since we used the
5473 			 * socketbuf lock and the sender uses the tcb_lock
5474 			 * to increment, we need to use the atomic add to
5475 			 * the refcnt
5476 			 */
5477 			if (freecnt_applied) {
5478 #ifdef INVARIANTS
5479 				panic("refcnt already incremented");
5480 #else
5481 				SCTP_PRINTF("refcnt already incremented?\n");
5482 #endif
5483 			} else {
5484 				atomic_add_int(&stcb->asoc.refcnt, 1);
5485 				freecnt_applied = 1;
5486 			}
5487 			/*
5488 			 * Setup to remember how much we have not yet told
5489 			 * the peer our rwnd has opened up. Note we grab the
5490 			 * value from the tcb from last time. Note too that
5491 			 * sack sending clears this when a sack is sent,
5492 			 * which is fine. Once we hit the rwnd_req, we then
5493 			 * will go to the sctp_user_rcvd() that will not
5494 			 * lock until it KNOWs it MUST send a WUP-SACK.
5495 			 */
5496 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5497 			stcb->freed_by_sorcv_sincelast = 0;
5498 		}
5499 	}
5500 	if (stcb &&
5501 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5502 	    control->do_not_ref_stcb == 0) {
5503 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5504 	}
5505 	/* First lets get off the sinfo and sockaddr info */
5506 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5507 		sinfo->sinfo_stream = control->sinfo_stream;
5508 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5509 		sinfo->sinfo_flags = control->sinfo_flags;
5510 		sinfo->sinfo_ppid = control->sinfo_ppid;
5511 		sinfo->sinfo_context = control->sinfo_context;
5512 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5513 		sinfo->sinfo_tsn = control->sinfo_tsn;
5514 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5515 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5516 		nxt = TAILQ_NEXT(control, next);
5517 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5518 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5519 			struct sctp_extrcvinfo *s_extra;
5520 
5521 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5522 			if ((nxt) &&
5523 			    (nxt->length)) {
5524 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5525 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5526 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5527 				}
5528 				if (nxt->spec_flags & M_NOTIFICATION) {
5529 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5530 				}
5531 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5532 				s_extra->serinfo_next_length = nxt->length;
5533 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5534 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5535 				if (nxt->tail_mbuf != NULL) {
5536 					if (nxt->end_added) {
5537 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5538 					}
5539 				}
5540 			} else {
5541 				/*
5542 				 * we explicitly 0 this, since the memcpy
5543 				 * got some other things beyond the older
5544 				 * sinfo_ that is on the control's structure
5545 				 * :-D
5546 				 */
5547 				nxt = NULL;
5548 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5549 				s_extra->serinfo_next_aid = 0;
5550 				s_extra->serinfo_next_length = 0;
5551 				s_extra->serinfo_next_ppid = 0;
5552 				s_extra->serinfo_next_stream = 0;
5553 			}
5554 		}
5555 		/*
5556 		 * update off the real current cum-ack, if we have an stcb.
5557 		 */
5558 		if ((control->do_not_ref_stcb == 0) && stcb)
5559 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5560 		/*
5561 		 * mask off the high bits, we keep the actual chunk bits in
5562 		 * there.
5563 		 */
5564 		sinfo->sinfo_flags &= 0x00ff;
5565 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5566 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5567 		}
5568 	}
5569 #ifdef SCTP_ASOCLOG_OF_TSNS
5570 	{
5571 		int index, newindex;
5572 		struct sctp_pcbtsn_rlog *entry;
5573 
5574 		do {
5575 			index = inp->readlog_index;
5576 			newindex = index + 1;
5577 			if (newindex >= SCTP_READ_LOG_SIZE) {
5578 				newindex = 0;
5579 			}
5580 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5581 		entry = &inp->readlog[index];
5582 		entry->vtag = control->sinfo_assoc_id;
5583 		entry->strm = control->sinfo_stream;
5584 		entry->seq = (uint16_t)control->mid;
5585 		entry->sz = control->length;
5586 		entry->flgs = control->sinfo_flags;
5587 	}
5588 #endif
5589 	if ((fromlen > 0) && (from != NULL)) {
5590 		union sctp_sockstore store;
5591 		size_t len;
5592 
5593 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5594 #ifdef INET6
5595 		case AF_INET6:
5596 			len = sizeof(struct sockaddr_in6);
5597 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5598 			store.sin6.sin6_port = control->port_from;
5599 			break;
5600 #endif
5601 #ifdef INET
5602 		case AF_INET:
5603 #ifdef INET6
5604 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5605 				len = sizeof(struct sockaddr_in6);
5606 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5607 				    &store.sin6);
5608 				store.sin6.sin6_port = control->port_from;
5609 			} else {
5610 				len = sizeof(struct sockaddr_in);
5611 				store.sin = control->whoFrom->ro._l_addr.sin;
5612 				store.sin.sin_port = control->port_from;
5613 			}
5614 #else
5615 			len = sizeof(struct sockaddr_in);
5616 			store.sin = control->whoFrom->ro._l_addr.sin;
5617 			store.sin.sin_port = control->port_from;
5618 #endif
5619 			break;
5620 #endif
5621 		default:
5622 			len = 0;
5623 			break;
5624 		}
5625 		memcpy(from, &store, min((size_t)fromlen, len));
5626 #ifdef INET6
5627 		{
5628 			struct sockaddr_in6 lsa6, *from6;
5629 
5630 			from6 = (struct sockaddr_in6 *)from;
5631 			sctp_recover_scope_mac(from6, (&lsa6));
5632 		}
5633 #endif
5634 	}
5635 	if (hold_rlock) {
5636 		SCTP_INP_READ_UNLOCK(inp);
5637 		hold_rlock = 0;
5638 	}
5639 	if (hold_sblock) {
5640 		SOCKBUF_UNLOCK(&so->so_rcv);
5641 		hold_sblock = 0;
5642 	}
5643 	/* now copy out what data we can */
5644 	if (mp == NULL) {
5645 		/* copy out each mbuf in the chain up to length */
5646 get_more_data:
5647 		m = control->data;
5648 		while (m) {
5649 			/* Move out all we can */
5650 			cp_len = (int)uio->uio_resid;
5651 			my_len = (int)SCTP_BUF_LEN(m);
5652 			if (cp_len > my_len) {
5653 				/* not enough in this buf */
5654 				cp_len = my_len;
5655 			}
5656 			if (hold_rlock) {
5657 				SCTP_INP_READ_UNLOCK(inp);
5658 				hold_rlock = 0;
5659 			}
5660 			if (cp_len > 0)
5661 				error = uiomove(mtod(m, char *), cp_len, uio);
5662 			/* re-read */
5663 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5664 				goto release;
5665 			}
5666 			if ((control->do_not_ref_stcb == 0) && stcb &&
5667 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5668 				no_rcv_needed = 1;
5669 			}
5670 			if (error) {
5671 				/* error we are out of here */
5672 				goto release;
5673 			}
5674 			SCTP_INP_READ_LOCK(inp);
5675 			hold_rlock = 1;
5676 			if (cp_len == SCTP_BUF_LEN(m)) {
5677 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5678 				    (control->end_added)) {
5679 					out_flags |= MSG_EOR;
5680 					if ((control->do_not_ref_stcb == 0) &&
5681 					    (control->stcb != NULL) &&
5682 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5683 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5684 				}
5685 				if (control->spec_flags & M_NOTIFICATION) {
5686 					out_flags |= MSG_NOTIFICATION;
5687 				}
5688 				/* we ate up the mbuf */
5689 				if (in_flags & MSG_PEEK) {
5690 					/* just looking */
5691 					m = SCTP_BUF_NEXT(m);
5692 					copied_so_far += cp_len;
5693 				} else {
5694 					/* dispose of the mbuf */
5695 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5696 						sctp_sblog(&so->so_rcv,
5697 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5698 					}
5699 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5700 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5701 						sctp_sblog(&so->so_rcv,
5702 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5703 					}
5704 					copied_so_far += cp_len;
5705 					freed_so_far += cp_len;
5706 					freed_so_far += MSIZE;
5707 					atomic_subtract_int(&control->length, cp_len);
5708 					control->data = sctp_m_free(m);
5709 					m = control->data;
5710 					/*
5711 					 * been through it all, must hold sb
5712 					 * lock ok to null tail
5713 					 */
5714 					if (control->data == NULL) {
5715 #ifdef INVARIANTS
5716 						if ((control->end_added == 0) ||
5717 						    (TAILQ_NEXT(control, next) == NULL)) {
5718 							/*
5719 							 * If the end is not
5720 							 * added, OR the
5721 							 * next is NOT null
5722 							 * we MUST have the
5723 							 * lock.
5724 							 */
5725 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5726 								panic("Hmm we don't own the lock?");
5727 							}
5728 						}
5729 #endif
5730 						control->tail_mbuf = NULL;
5731 #ifdef INVARIANTS
5732 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5733 							panic("end_added, nothing left and no MSG_EOR");
5734 						}
5735 #endif
5736 					}
5737 				}
5738 			} else {
5739 				/* Do we need to trim the mbuf? */
5740 				if (control->spec_flags & M_NOTIFICATION) {
5741 					out_flags |= MSG_NOTIFICATION;
5742 				}
5743 				if ((in_flags & MSG_PEEK) == 0) {
5744 					SCTP_BUF_RESV_UF(m, cp_len);
5745 					SCTP_BUF_LEN(m) -= cp_len;
5746 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5747 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5748 					}
5749 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5750 					if ((control->do_not_ref_stcb == 0) &&
5751 					    stcb) {
5752 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5753 					}
5754 					copied_so_far += cp_len;
5755 					freed_so_far += cp_len;
5756 					freed_so_far += MSIZE;
5757 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5758 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5759 						    SCTP_LOG_SBRESULT, 0);
5760 					}
5761 					atomic_subtract_int(&control->length, cp_len);
5762 				} else {
5763 					copied_so_far += cp_len;
5764 				}
5765 			}
5766 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5767 				break;
5768 			}
5769 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5770 			    (control->do_not_ref_stcb == 0) &&
5771 			    (freed_so_far >= rwnd_req)) {
5772 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5773 			}
5774 		}		/* end while(m) */
5775 		/*
5776 		 * At this point we have looked at it all and we either have
5777 		 * a MSG_EOR/or read all the user wants... <OR>
5778 		 * control->length == 0.
5779 		 */
5780 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5781 			/* we are done with this control */
5782 			if (control->length == 0) {
5783 				if (control->data) {
5784 #ifdef INVARIANTS
5785 					panic("control->data not null at read eor?");
5786 #else
5787 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5788 					sctp_m_freem(control->data);
5789 					control->data = NULL;
5790 #endif
5791 				}
5792 		done_with_control:
5793 				if (hold_rlock == 0) {
5794 					SCTP_INP_READ_LOCK(inp);
5795 					hold_rlock = 1;
5796 				}
5797 				TAILQ_REMOVE(&inp->read_queue, control, next);
5798 				/* Add back any hiddend data */
5799 				if (control->held_length) {
5800 					held_length = 0;
5801 					control->held_length = 0;
5802 					wakeup_read_socket = 1;
5803 				}
5804 				if (control->aux_data) {
5805 					sctp_m_free(control->aux_data);
5806 					control->aux_data = NULL;
5807 				}
5808 				no_rcv_needed = control->do_not_ref_stcb;
5809 				sctp_free_remote_addr(control->whoFrom);
5810 				control->data = NULL;
5811 #ifdef INVARIANTS
5812 				if (control->on_strm_q) {
5813 					panic("About to free ctl:%p so:%p and its in %d",
5814 					    control, so, control->on_strm_q);
5815 				}
5816 #endif
5817 				sctp_free_a_readq(stcb, control);
5818 				control = NULL;
5819 				if ((freed_so_far >= rwnd_req) &&
5820 				    (no_rcv_needed == 0))
5821 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5822 
5823 			} else {
5824 				/*
5825 				 * The user did not read all of this
5826 				 * message, turn off the returned MSG_EOR
5827 				 * since we are leaving more behind on the
5828 				 * control to read.
5829 				 */
5830 #ifdef INVARIANTS
5831 				if (control->end_added &&
5832 				    (control->data == NULL) &&
5833 				    (control->tail_mbuf == NULL)) {
5834 					panic("Gak, control->length is corrupt?");
5835 				}
5836 #endif
5837 				no_rcv_needed = control->do_not_ref_stcb;
5838 				out_flags &= ~MSG_EOR;
5839 			}
5840 		}
5841 		if (out_flags & MSG_EOR) {
5842 			goto release;
5843 		}
5844 		if ((uio->uio_resid == 0) ||
5845 		    ((in_eeor_mode) &&
5846 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5847 			goto release;
5848 		}
5849 		/*
5850 		 * If I hit here the receiver wants more and this message is
5851 		 * NOT done (pd-api). So two questions. Can we block? if not
5852 		 * we are done. Did the user NOT set MSG_WAITALL?
5853 		 */
5854 		if (block_allowed == 0) {
5855 			goto release;
5856 		}
5857 		/*
5858 		 * We need to wait for more data a few things: - We don't
5859 		 * sbunlock() so we don't get someone else reading. - We
5860 		 * must be sure to account for the case where what is added
5861 		 * is NOT to our control when we wakeup.
5862 		 */
5863 
5864 		/*
5865 		 * Do we need to tell the transport a rwnd update might be
5866 		 * needed before we go to sleep?
5867 		 */
5868 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5869 		    ((freed_so_far >= rwnd_req) &&
5870 		    (control->do_not_ref_stcb == 0) &&
5871 		    (no_rcv_needed == 0))) {
5872 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5873 		}
5874 wait_some_more:
5875 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5876 			goto release;
5877 		}
5878 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5879 			goto release;
5880 
5881 		if (hold_rlock == 1) {
5882 			SCTP_INP_READ_UNLOCK(inp);
5883 			hold_rlock = 0;
5884 		}
5885 		if (hold_sblock == 0) {
5886 			SOCKBUF_LOCK(&so->so_rcv);
5887 			hold_sblock = 1;
5888 		}
5889 		if ((copied_so_far) && (control->length == 0) &&
5890 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5891 			goto release;
5892 		}
5893 		if (so->so_rcv.sb_cc <= control->held_length) {
5894 			error = sbwait(&so->so_rcv);
5895 			if (error) {
5896 				goto release;
5897 			}
5898 			control->held_length = 0;
5899 		}
5900 		if (hold_sblock) {
5901 			SOCKBUF_UNLOCK(&so->so_rcv);
5902 			hold_sblock = 0;
5903 		}
5904 		if (control->length == 0) {
5905 			/* still nothing here */
5906 			if (control->end_added == 1) {
5907 				/* he aborted, or is done i.e.did a shutdown */
5908 				out_flags |= MSG_EOR;
5909 				if (control->pdapi_aborted) {
5910 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5911 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5912 
5913 					out_flags |= MSG_TRUNC;
5914 				} else {
5915 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5916 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5917 				}
5918 				goto done_with_control;
5919 			}
5920 			if (so->so_rcv.sb_cc > held_length) {
5921 				control->held_length = so->so_rcv.sb_cc;
5922 				held_length = 0;
5923 			}
5924 			goto wait_some_more;
5925 		} else if (control->data == NULL) {
5926 			/*
5927 			 * we must re-sync since data is probably being
5928 			 * added
5929 			 */
5930 			SCTP_INP_READ_LOCK(inp);
5931 			if ((control->length > 0) && (control->data == NULL)) {
5932 				/*
5933 				 * big trouble.. we have the lock and its
5934 				 * corrupt?
5935 				 */
5936 #ifdef INVARIANTS
5937 				panic("Impossible data==NULL length !=0");
5938 #endif
5939 				out_flags |= MSG_EOR;
5940 				out_flags |= MSG_TRUNC;
5941 				control->length = 0;
5942 				SCTP_INP_READ_UNLOCK(inp);
5943 				goto done_with_control;
5944 			}
5945 			SCTP_INP_READ_UNLOCK(inp);
5946 			/* We will fall around to get more data */
5947 		}
5948 		goto get_more_data;
5949 	} else {
5950 		/*-
5951 		 * Give caller back the mbuf chain,
5952 		 * store in uio_resid the length
5953 		 */
5954 		wakeup_read_socket = 0;
5955 		if ((control->end_added == 0) ||
5956 		    (TAILQ_NEXT(control, next) == NULL)) {
5957 			/* Need to get rlock */
5958 			if (hold_rlock == 0) {
5959 				SCTP_INP_READ_LOCK(inp);
5960 				hold_rlock = 1;
5961 			}
5962 		}
5963 		if (control->end_added) {
5964 			out_flags |= MSG_EOR;
5965 			if ((control->do_not_ref_stcb == 0) &&
5966 			    (control->stcb != NULL) &&
5967 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5968 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5969 		}
5970 		if (control->spec_flags & M_NOTIFICATION) {
5971 			out_flags |= MSG_NOTIFICATION;
5972 		}
5973 		uio->uio_resid = control->length;
5974 		*mp = control->data;
5975 		m = control->data;
5976 		while (m) {
5977 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5978 				sctp_sblog(&so->so_rcv,
5979 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5980 			}
5981 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5982 			freed_so_far += SCTP_BUF_LEN(m);
5983 			freed_so_far += MSIZE;
5984 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5985 				sctp_sblog(&so->so_rcv,
5986 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5987 			}
5988 			m = SCTP_BUF_NEXT(m);
5989 		}
5990 		control->data = control->tail_mbuf = NULL;
5991 		control->length = 0;
5992 		if (out_flags & MSG_EOR) {
5993 			/* Done with this control */
5994 			goto done_with_control;
5995 		}
5996 	}
5997 release:
5998 	if (hold_rlock == 1) {
5999 		SCTP_INP_READ_UNLOCK(inp);
6000 		hold_rlock = 0;
6001 	}
6002 	if (hold_sblock == 1) {
6003 		SOCKBUF_UNLOCK(&so->so_rcv);
6004 		hold_sblock = 0;
6005 	}
6006 	sbunlock(&so->so_rcv);
6007 	sockbuf_lock = 0;
6008 
6009 release_unlocked:
6010 	if (hold_sblock) {
6011 		SOCKBUF_UNLOCK(&so->so_rcv);
6012 		hold_sblock = 0;
6013 	}
6014 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6015 		if ((freed_so_far >= rwnd_req) &&
6016 		    (control && (control->do_not_ref_stcb == 0)) &&
6017 		    (no_rcv_needed == 0))
6018 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6019 	}
6020 out:
6021 	if (msg_flags) {
6022 		*msg_flags = out_flags;
6023 	}
6024 	if (((out_flags & MSG_EOR) == 0) &&
6025 	    ((in_flags & MSG_PEEK) == 0) &&
6026 	    (sinfo) &&
6027 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6028 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6029 		struct sctp_extrcvinfo *s_extra;
6030 
6031 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6032 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6033 	}
6034 	if (hold_rlock == 1) {
6035 		SCTP_INP_READ_UNLOCK(inp);
6036 	}
6037 	if (hold_sblock) {
6038 		SOCKBUF_UNLOCK(&so->so_rcv);
6039 	}
6040 	if (sockbuf_lock) {
6041 		sbunlock(&so->so_rcv);
6042 	}
6043 	if (freecnt_applied) {
6044 		/*
6045 		 * The lock on the socket buffer protects us so the free
6046 		 * code will stop. But since we used the socketbuf lock and
6047 		 * the sender uses the tcb_lock to increment, we need to use
6048 		 * the atomic add to the refcnt.
6049 		 */
6050 		if (stcb == NULL) {
6051 #ifdef INVARIANTS
6052 			panic("stcb for refcnt has gone NULL?");
6053 			goto stage_left;
6054 #else
6055 			goto stage_left;
6056 #endif
6057 		}
6058 		/* Save the value back for next time */
6059 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6060 		atomic_add_int(&stcb->asoc.refcnt, -1);
6061 	}
6062 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6063 		if (stcb) {
6064 			sctp_misc_ints(SCTP_SORECV_DONE,
6065 			    freed_so_far,
6066 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6067 			    stcb->asoc.my_rwnd,
6068 			    so->so_rcv.sb_cc);
6069 		} else {
6070 			sctp_misc_ints(SCTP_SORECV_DONE,
6071 			    freed_so_far,
6072 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6073 			    0,
6074 			    so->so_rcv.sb_cc);
6075 		}
6076 	}
6077 stage_left:
6078 	if (wakeup_read_socket) {
6079 		sctp_sorwakeup(inp, so);
6080 	}
6081 	return (error);
6082 }
6083 
6084 
6085 #ifdef SCTP_MBUF_LOGGING
6086 struct mbuf *
6087 sctp_m_free(struct mbuf *m)
6088 {
6089 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6090 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6091 	}
6092 	return (m_free(m));
6093 }
6094 
6095 void
6096 sctp_m_freem(struct mbuf *mb)
6097 {
6098 	while (mb != NULL)
6099 		mb = sctp_m_free(mb);
6100 }
6101 
6102 #endif
6103 
6104 int
6105 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6106 {
6107 	/*
6108 	 * Given a local address. For all associations that holds the
6109 	 * address, request a peer-set-primary.
6110 	 */
6111 	struct sctp_ifa *ifa;
6112 	struct sctp_laddr *wi;
6113 
6114 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6115 	if (ifa == NULL) {
6116 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6117 		return (EADDRNOTAVAIL);
6118 	}
6119 	/*
6120 	 * Now that we have the ifa we must awaken the iterator with this
6121 	 * message.
6122 	 */
6123 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6124 	if (wi == NULL) {
6125 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6126 		return (ENOMEM);
6127 	}
6128 	/* Now incr the count and int wi structure */
6129 	SCTP_INCR_LADDR_COUNT();
6130 	memset(wi, 0, sizeof(*wi));
6131 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6132 	wi->ifa = ifa;
6133 	wi->action = SCTP_SET_PRIM_ADDR;
6134 	atomic_add_int(&ifa->refcount, 1);
6135 
6136 	/* Now add it to the work queue */
6137 	SCTP_WQ_ADDR_LOCK();
6138 	/*
6139 	 * Should this really be a tailq? As it is we will process the
6140 	 * newest first :-0
6141 	 */
6142 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6143 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6144 	    (struct sctp_inpcb *)NULL,
6145 	    (struct sctp_tcb *)NULL,
6146 	    (struct sctp_nets *)NULL);
6147 	SCTP_WQ_ADDR_UNLOCK();
6148 	return (0);
6149 }
6150 
6151 
6152 int
6153 sctp_soreceive(struct socket *so,
6154     struct sockaddr **psa,
6155     struct uio *uio,
6156     struct mbuf **mp0,
6157     struct mbuf **controlp,
6158     int *flagsp)
6159 {
6160 	int error, fromlen;
6161 	uint8_t sockbuf[256];
6162 	struct sockaddr *from;
6163 	struct sctp_extrcvinfo sinfo;
6164 	int filling_sinfo = 1;
6165 	struct sctp_inpcb *inp;
6166 
6167 	inp = (struct sctp_inpcb *)so->so_pcb;
6168 	/* pickup the assoc we are reading from */
6169 	if (inp == NULL) {
6170 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6171 		return (EINVAL);
6172 	}
6173 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6174 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6175 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6176 	    (controlp == NULL)) {
6177 		/* user does not want the sndrcv ctl */
6178 		filling_sinfo = 0;
6179 	}
6180 	if (psa) {
6181 		from = (struct sockaddr *)sockbuf;
6182 		fromlen = sizeof(sockbuf);
6183 		from->sa_len = 0;
6184 	} else {
6185 		from = NULL;
6186 		fromlen = 0;
6187 	}
6188 
6189 	if (filling_sinfo) {
6190 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6191 	}
6192 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6193 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6194 	if (controlp != NULL) {
6195 		/* copy back the sinfo in a CMSG format */
6196 		if (filling_sinfo)
6197 			*controlp = sctp_build_ctl_nchunk(inp,
6198 			    (struct sctp_sndrcvinfo *)&sinfo);
6199 		else
6200 			*controlp = NULL;
6201 	}
6202 	if (psa) {
6203 		/* copy back the address info */
6204 		if (from && from->sa_len) {
6205 			*psa = sodupsockaddr(from, M_NOWAIT);
6206 		} else {
6207 			*psa = NULL;
6208 		}
6209 	}
6210 	return (error);
6211 }
6212 
6213 
6214 
6215 
6216 
6217 int
6218 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6219     int totaddr, int *error)
6220 {
6221 	int added = 0;
6222 	int i;
6223 	struct sctp_inpcb *inp;
6224 	struct sockaddr *sa;
6225 	size_t incr = 0;
6226 #ifdef INET
6227 	struct sockaddr_in *sin;
6228 #endif
6229 #ifdef INET6
6230 	struct sockaddr_in6 *sin6;
6231 #endif
6232 
6233 	sa = addr;
6234 	inp = stcb->sctp_ep;
6235 	*error = 0;
6236 	for (i = 0; i < totaddr; i++) {
6237 		switch (sa->sa_family) {
6238 #ifdef INET
6239 		case AF_INET:
6240 			incr = sizeof(struct sockaddr_in);
6241 			sin = (struct sockaddr_in *)sa;
6242 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6243 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6244 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6245 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6246 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6247 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6248 				*error = EINVAL;
6249 				goto out_now;
6250 			}
6251 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6252 			    SCTP_DONOT_SETSCOPE,
6253 			    SCTP_ADDR_IS_CONFIRMED)) {
6254 				/* assoc gone no un-lock */
6255 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6256 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6257 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6258 				*error = ENOBUFS;
6259 				goto out_now;
6260 			}
6261 			added++;
6262 			break;
6263 #endif
6264 #ifdef INET6
6265 		case AF_INET6:
6266 			incr = sizeof(struct sockaddr_in6);
6267 			sin6 = (struct sockaddr_in6 *)sa;
6268 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6269 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6270 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6271 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6272 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6273 				*error = EINVAL;
6274 				goto out_now;
6275 			}
6276 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6277 			    SCTP_DONOT_SETSCOPE,
6278 			    SCTP_ADDR_IS_CONFIRMED)) {
6279 				/* assoc gone no un-lock */
6280 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6281 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6282 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6283 				*error = ENOBUFS;
6284 				goto out_now;
6285 			}
6286 			added++;
6287 			break;
6288 #endif
6289 		default:
6290 			break;
6291 		}
6292 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6293 	}
6294 out_now:
6295 	return (added);
6296 }
6297 
6298 struct sctp_tcb *
6299 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6300     unsigned int *totaddr,
6301     unsigned int *num_v4, unsigned int *num_v6, int *error,
6302     unsigned int limit, int *bad_addr)
6303 {
6304 	struct sockaddr *sa;
6305 	struct sctp_tcb *stcb = NULL;
6306 	unsigned int incr, at, i;
6307 
6308 	at = 0;
6309 	sa = addr;
6310 	*error = *num_v6 = *num_v4 = 0;
6311 	/* account and validate addresses */
6312 	for (i = 0; i < *totaddr; i++) {
6313 		switch (sa->sa_family) {
6314 #ifdef INET
6315 		case AF_INET:
6316 			incr = (unsigned int)sizeof(struct sockaddr_in);
6317 			if (sa->sa_len != incr) {
6318 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6319 				*error = EINVAL;
6320 				*bad_addr = 1;
6321 				return (NULL);
6322 			}
6323 			(*num_v4) += 1;
6324 			break;
6325 #endif
6326 #ifdef INET6
6327 		case AF_INET6:
6328 			{
6329 				struct sockaddr_in6 *sin6;
6330 
6331 				sin6 = (struct sockaddr_in6 *)sa;
6332 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6333 					/* Must be non-mapped for connectx */
6334 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6335 					*error = EINVAL;
6336 					*bad_addr = 1;
6337 					return (NULL);
6338 				}
6339 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6340 				if (sa->sa_len != incr) {
6341 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6342 					*error = EINVAL;
6343 					*bad_addr = 1;
6344 					return (NULL);
6345 				}
6346 				(*num_v6) += 1;
6347 				break;
6348 			}
6349 #endif
6350 		default:
6351 			*totaddr = i;
6352 			incr = 0;
6353 			/* we are done */
6354 			break;
6355 		}
6356 		if (i == *totaddr) {
6357 			break;
6358 		}
6359 		SCTP_INP_INCR_REF(inp);
6360 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6361 		if (stcb != NULL) {
6362 			/* Already have or am bring up an association */
6363 			return (stcb);
6364 		} else {
6365 			SCTP_INP_DECR_REF(inp);
6366 		}
6367 		if ((at + incr) > limit) {
6368 			*totaddr = i;
6369 			break;
6370 		}
6371 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6372 	}
6373 	return ((struct sctp_tcb *)NULL);
6374 }
6375 
6376 /*
6377  * sctp_bindx(ADD) for one address.
6378  * assumes all arguments are valid/checked by caller.
6379  */
6380 void
6381 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6382     struct sockaddr *sa, sctp_assoc_t assoc_id,
6383     uint32_t vrf_id, int *error, void *p)
6384 {
6385 	struct sockaddr *addr_touse;
6386 #if defined(INET) && defined(INET6)
6387 	struct sockaddr_in sin;
6388 #endif
6389 
6390 	/* see if we're bound all already! */
6391 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6392 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6393 		*error = EINVAL;
6394 		return;
6395 	}
6396 	addr_touse = sa;
6397 #ifdef INET6
6398 	if (sa->sa_family == AF_INET6) {
6399 #ifdef INET
6400 		struct sockaddr_in6 *sin6;
6401 
6402 #endif
6403 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6404 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6405 			*error = EINVAL;
6406 			return;
6407 		}
6408 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6409 			/* can only bind v6 on PF_INET6 sockets */
6410 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 			*error = EINVAL;
6412 			return;
6413 		}
6414 #ifdef INET
6415 		sin6 = (struct sockaddr_in6 *)addr_touse;
6416 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6417 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6418 			    SCTP_IPV6_V6ONLY(inp)) {
6419 				/* can't bind v4-mapped on PF_INET sockets */
6420 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6421 				*error = EINVAL;
6422 				return;
6423 			}
6424 			in6_sin6_2_sin(&sin, sin6);
6425 			addr_touse = (struct sockaddr *)&sin;
6426 		}
6427 #endif
6428 	}
6429 #endif
6430 #ifdef INET
6431 	if (sa->sa_family == AF_INET) {
6432 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6433 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6434 			*error = EINVAL;
6435 			return;
6436 		}
6437 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6438 		    SCTP_IPV6_V6ONLY(inp)) {
6439 			/* can't bind v4 on PF_INET sockets */
6440 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6441 			*error = EINVAL;
6442 			return;
6443 		}
6444 	}
6445 #endif
6446 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6447 		if (p == NULL) {
6448 			/* Can't get proc for Net/Open BSD */
6449 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6450 			*error = EINVAL;
6451 			return;
6452 		}
6453 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6454 		return;
6455 	}
6456 	/*
6457 	 * No locks required here since bind and mgmt_ep_sa all do their own
6458 	 * locking. If we do something for the FIX: below we may need to
6459 	 * lock in that case.
6460 	 */
6461 	if (assoc_id == 0) {
6462 		/* add the address */
6463 		struct sctp_inpcb *lep;
6464 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6465 
6466 		/* validate the incoming port */
6467 		if ((lsin->sin_port != 0) &&
6468 		    (lsin->sin_port != inp->sctp_lport)) {
6469 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6470 			*error = EINVAL;
6471 			return;
6472 		} else {
6473 			/* user specified 0 port, set it to existing port */
6474 			lsin->sin_port = inp->sctp_lport;
6475 		}
6476 
6477 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6478 		if (lep != NULL) {
6479 			/*
6480 			 * We must decrement the refcount since we have the
6481 			 * ep already and are binding. No remove going on
6482 			 * here.
6483 			 */
6484 			SCTP_INP_DECR_REF(lep);
6485 		}
6486 		if (lep == inp) {
6487 			/* already bound to it.. ok */
6488 			return;
6489 		} else if (lep == NULL) {
6490 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6491 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6492 			    SCTP_ADD_IP_ADDRESS,
6493 			    vrf_id, NULL);
6494 		} else {
6495 			*error = EADDRINUSE;
6496 		}
6497 		if (*error)
6498 			return;
6499 	} else {
6500 		/*
6501 		 * FIX: decide whether we allow assoc based bindx
6502 		 */
6503 	}
6504 }
6505 
6506 /*
6507  * sctp_bindx(DELETE) for one address.
6508  * assumes all arguments are valid/checked by caller.
6509  */
6510 void
6511 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6512     struct sockaddr *sa, sctp_assoc_t assoc_id,
6513     uint32_t vrf_id, int *error)
6514 {
6515 	struct sockaddr *addr_touse;
6516 #if defined(INET) && defined(INET6)
6517 	struct sockaddr_in sin;
6518 #endif
6519 
6520 	/* see if we're bound all already! */
6521 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6522 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6523 		*error = EINVAL;
6524 		return;
6525 	}
6526 	addr_touse = sa;
6527 #ifdef INET6
6528 	if (sa->sa_family == AF_INET6) {
6529 #ifdef INET
6530 		struct sockaddr_in6 *sin6;
6531 #endif
6532 
6533 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6534 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6535 			*error = EINVAL;
6536 			return;
6537 		}
6538 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6539 			/* can only bind v6 on PF_INET6 sockets */
6540 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6541 			*error = EINVAL;
6542 			return;
6543 		}
6544 #ifdef INET
6545 		sin6 = (struct sockaddr_in6 *)addr_touse;
6546 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6547 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6548 			    SCTP_IPV6_V6ONLY(inp)) {
6549 				/* can't bind mapped-v4 on PF_INET sockets */
6550 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6551 				*error = EINVAL;
6552 				return;
6553 			}
6554 			in6_sin6_2_sin(&sin, sin6);
6555 			addr_touse = (struct sockaddr *)&sin;
6556 		}
6557 #endif
6558 	}
6559 #endif
6560 #ifdef INET
6561 	if (sa->sa_family == AF_INET) {
6562 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6563 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6564 			*error = EINVAL;
6565 			return;
6566 		}
6567 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6568 		    SCTP_IPV6_V6ONLY(inp)) {
6569 			/* can't bind v4 on PF_INET sockets */
6570 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6571 			*error = EINVAL;
6572 			return;
6573 		}
6574 	}
6575 #endif
6576 	/*
6577 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6578 	 * below is ever changed we may need to lock before calling
6579 	 * association level binding.
6580 	 */
6581 	if (assoc_id == 0) {
6582 		/* delete the address */
6583 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6584 		    SCTP_DEL_IP_ADDRESS,
6585 		    vrf_id, NULL);
6586 	} else {
6587 		/*
6588 		 * FIX: decide whether we allow assoc based bindx
6589 		 */
6590 	}
6591 }
6592 
6593 /*
6594  * returns the valid local address count for an assoc, taking into account
6595  * all scoping rules
6596  */
6597 int
6598 sctp_local_addr_count(struct sctp_tcb *stcb)
6599 {
6600 	int loopback_scope;
6601 #if defined(INET)
6602 	int ipv4_local_scope, ipv4_addr_legal;
6603 #endif
6604 #if defined (INET6)
6605 	int local_scope, site_scope, ipv6_addr_legal;
6606 #endif
6607 	struct sctp_vrf *vrf;
6608 	struct sctp_ifn *sctp_ifn;
6609 	struct sctp_ifa *sctp_ifa;
6610 	int count = 0;
6611 
6612 	/* Turn on all the appropriate scopes */
6613 	loopback_scope = stcb->asoc.scope.loopback_scope;
6614 #if defined(INET)
6615 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6616 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6617 #endif
6618 #if defined(INET6)
6619 	local_scope = stcb->asoc.scope.local_scope;
6620 	site_scope = stcb->asoc.scope.site_scope;
6621 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6622 #endif
6623 	SCTP_IPI_ADDR_RLOCK();
6624 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6625 	if (vrf == NULL) {
6626 		/* no vrf, no addresses */
6627 		SCTP_IPI_ADDR_RUNLOCK();
6628 		return (0);
6629 	}
6630 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6631 		/*
6632 		 * bound all case: go through all ifns on the vrf
6633 		 */
6634 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6635 			if ((loopback_scope == 0) &&
6636 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6637 				continue;
6638 			}
6639 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6640 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6641 					continue;
6642 				switch (sctp_ifa->address.sa.sa_family) {
6643 #ifdef INET
6644 				case AF_INET:
6645 					if (ipv4_addr_legal) {
6646 						struct sockaddr_in *sin;
6647 
6648 						sin = &sctp_ifa->address.sin;
6649 						if (sin->sin_addr.s_addr == 0) {
6650 							/*
6651 							 * skip unspecified
6652 							 * addrs
6653 							 */
6654 							continue;
6655 						}
6656 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6657 						    &sin->sin_addr) != 0) {
6658 							continue;
6659 						}
6660 						if ((ipv4_local_scope == 0) &&
6661 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6662 							continue;
6663 						}
6664 						/* count this one */
6665 						count++;
6666 					} else {
6667 						continue;
6668 					}
6669 					break;
6670 #endif
6671 #ifdef INET6
6672 				case AF_INET6:
6673 					if (ipv6_addr_legal) {
6674 						struct sockaddr_in6 *sin6;
6675 
6676 						sin6 = &sctp_ifa->address.sin6;
6677 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6678 							continue;
6679 						}
6680 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6681 						    &sin6->sin6_addr) != 0) {
6682 							continue;
6683 						}
6684 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6685 							if (local_scope == 0)
6686 								continue;
6687 							if (sin6->sin6_scope_id == 0) {
6688 								if (sa6_recoverscope(sin6) != 0)
6689 									/*
6690 									 *
6691 									 * bad
6692 									 * link
6693 									 *
6694 									 * local
6695 									 *
6696 									 * address
6697 									 */
6698 									continue;
6699 							}
6700 						}
6701 						if ((site_scope == 0) &&
6702 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6703 							continue;
6704 						}
6705 						/* count this one */
6706 						count++;
6707 					}
6708 					break;
6709 #endif
6710 				default:
6711 					/* TSNH */
6712 					break;
6713 				}
6714 			}
6715 		}
6716 	} else {
6717 		/*
6718 		 * subset bound case
6719 		 */
6720 		struct sctp_laddr *laddr;
6721 
6722 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6723 		    sctp_nxt_addr) {
6724 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6725 				continue;
6726 			}
6727 			/* count this one */
6728 			count++;
6729 		}
6730 	}
6731 	SCTP_IPI_ADDR_RUNLOCK();
6732 	return (count);
6733 }
6734 
6735 #if defined(SCTP_LOCAL_TRACE_BUF)
6736 
6737 void
6738 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6739 {
6740 	uint32_t saveindex, newindex;
6741 
6742 	do {
6743 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6744 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6745 			newindex = 1;
6746 		} else {
6747 			newindex = saveindex + 1;
6748 		}
6749 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6750 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6751 		saveindex = 0;
6752 	}
6753 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6754 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6755 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6756 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6757 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6758 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6760 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6761 }
6762 
6763 #endif
6764 static void
6765 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6766     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6767 {
6768 	struct ip *iph;
6769 #ifdef INET6
6770 	struct ip6_hdr *ip6;
6771 #endif
6772 	struct mbuf *sp, *last;
6773 	struct udphdr *uhdr;
6774 	uint16_t port;
6775 
6776 	if ((m->m_flags & M_PKTHDR) == 0) {
6777 		/* Can't handle one that is not a pkt hdr */
6778 		goto out;
6779 	}
6780 	/* Pull the src port */
6781 	iph = mtod(m, struct ip *);
6782 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6783 	port = uhdr->uh_sport;
6784 	/*
6785 	 * Split out the mbuf chain. Leave the IP header in m, place the
6786 	 * rest in the sp.
6787 	 */
6788 	sp = m_split(m, off, M_NOWAIT);
6789 	if (sp == NULL) {
6790 		/* Gak, drop packet, we can't do a split */
6791 		goto out;
6792 	}
6793 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6794 		/* Gak, packet can't have an SCTP header in it - too small */
6795 		m_freem(sp);
6796 		goto out;
6797 	}
6798 	/* Now pull up the UDP header and SCTP header together */
6799 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6800 	if (sp == NULL) {
6801 		/* Gak pullup failed */
6802 		goto out;
6803 	}
6804 	/* Trim out the UDP header */
6805 	m_adj(sp, sizeof(struct udphdr));
6806 
6807 	/* Now reconstruct the mbuf chain */
6808 	for (last = m; last->m_next; last = last->m_next);
6809 	last->m_next = sp;
6810 	m->m_pkthdr.len += sp->m_pkthdr.len;
6811 	/*
6812 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6813 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6814 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6815 	 * SCTP checksum. Therefore, clear the bit.
6816 	 */
6817 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6818 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6819 	    m->m_pkthdr.len,
6820 	    if_name(m->m_pkthdr.rcvif),
6821 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6822 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6823 	iph = mtod(m, struct ip *);
6824 	switch (iph->ip_v) {
6825 #ifdef INET
6826 	case IPVERSION:
6827 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6828 		sctp_input_with_port(m, off, port);
6829 		break;
6830 #endif
6831 #ifdef INET6
6832 	case IPV6_VERSION >> 4:
6833 		ip6 = mtod(m, struct ip6_hdr *);
6834 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6835 		sctp6_input_with_port(&m, &off, port);
6836 		break;
6837 #endif
6838 	default:
6839 		goto out;
6840 		break;
6841 	}
6842 	return;
6843 out:
6844 	m_freem(m);
6845 }
6846 
6847 #ifdef INET
6848 static void
6849 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6850 {
6851 	struct ip *outer_ip, *inner_ip;
6852 	struct sctphdr *sh;
6853 	struct icmp *icmp;
6854 	struct udphdr *udp;
6855 	struct sctp_inpcb *inp;
6856 	struct sctp_tcb *stcb;
6857 	struct sctp_nets *net;
6858 	struct sctp_init_chunk *ch;
6859 	struct sockaddr_in src, dst;
6860 	uint8_t type, code;
6861 
6862 	inner_ip = (struct ip *)vip;
6863 	icmp = (struct icmp *)((caddr_t)inner_ip -
6864 	    (sizeof(struct icmp) - sizeof(struct ip)));
6865 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6866 	if (ntohs(outer_ip->ip_len) <
6867 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6868 		return;
6869 	}
6870 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6871 	sh = (struct sctphdr *)(udp + 1);
6872 	memset(&src, 0, sizeof(struct sockaddr_in));
6873 	src.sin_family = AF_INET;
6874 	src.sin_len = sizeof(struct sockaddr_in);
6875 	src.sin_port = sh->src_port;
6876 	src.sin_addr = inner_ip->ip_src;
6877 	memset(&dst, 0, sizeof(struct sockaddr_in));
6878 	dst.sin_family = AF_INET;
6879 	dst.sin_len = sizeof(struct sockaddr_in);
6880 	dst.sin_port = sh->dest_port;
6881 	dst.sin_addr = inner_ip->ip_dst;
6882 	/*
6883 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6884 	 * holds our local endpoint address. Thus we reverse the dst and the
6885 	 * src in the lookup.
6886 	 */
6887 	inp = NULL;
6888 	net = NULL;
6889 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6890 	    (struct sockaddr *)&src,
6891 	    &inp, &net, 1,
6892 	    SCTP_DEFAULT_VRFID);
6893 	if ((stcb != NULL) &&
6894 	    (net != NULL) &&
6895 	    (inp != NULL)) {
6896 		/* Check the UDP port numbers */
6897 		if ((udp->uh_dport != net->port) ||
6898 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6899 			SCTP_TCB_UNLOCK(stcb);
6900 			return;
6901 		}
6902 		/* Check the verification tag */
6903 		if (ntohl(sh->v_tag) != 0) {
6904 			/*
6905 			 * This must be the verification tag used for
6906 			 * sending out packets. We don't consider packets
6907 			 * reflecting the verification tag.
6908 			 */
6909 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6910 				SCTP_TCB_UNLOCK(stcb);
6911 				return;
6912 			}
6913 		} else {
6914 			if (ntohs(outer_ip->ip_len) >=
6915 			    sizeof(struct ip) +
6916 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6917 				/*
6918 				 * In this case we can check if we got an
6919 				 * INIT chunk and if the initiate tag
6920 				 * matches.
6921 				 */
6922 				ch = (struct sctp_init_chunk *)(sh + 1);
6923 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6924 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6925 					SCTP_TCB_UNLOCK(stcb);
6926 					return;
6927 				}
6928 			} else {
6929 				SCTP_TCB_UNLOCK(stcb);
6930 				return;
6931 			}
6932 		}
6933 		type = icmp->icmp_type;
6934 		code = icmp->icmp_code;
6935 		if ((type == ICMP_UNREACH) &&
6936 		    (code == ICMP_UNREACH_PORT)) {
6937 			code = ICMP_UNREACH_PROTOCOL;
6938 		}
6939 		sctp_notify(inp, stcb, net, type, code,
6940 		    ntohs(inner_ip->ip_len),
6941 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6942 	} else {
6943 		if ((stcb == NULL) && (inp != NULL)) {
6944 			/* reduce ref-count */
6945 			SCTP_INP_WLOCK(inp);
6946 			SCTP_INP_DECR_REF(inp);
6947 			SCTP_INP_WUNLOCK(inp);
6948 		}
6949 		if (stcb) {
6950 			SCTP_TCB_UNLOCK(stcb);
6951 		}
6952 	}
6953 	return;
6954 }
6955 #endif
6956 
6957 #ifdef INET6
6958 static void
6959 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6960 {
6961 	struct ip6ctlparam *ip6cp;
6962 	struct sctp_inpcb *inp;
6963 	struct sctp_tcb *stcb;
6964 	struct sctp_nets *net;
6965 	struct sctphdr sh;
6966 	struct udphdr udp;
6967 	struct sockaddr_in6 src, dst;
6968 	uint8_t type, code;
6969 
6970 	ip6cp = (struct ip6ctlparam *)d;
6971 	/*
6972 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6973 	 */
6974 	if (ip6cp->ip6c_m == NULL) {
6975 		return;
6976 	}
6977 	/*
6978 	 * Check if we can safely examine the ports and the verification tag
6979 	 * of the SCTP common header.
6980 	 */
6981 	if (ip6cp->ip6c_m->m_pkthdr.len <
6982 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
6983 		return;
6984 	}
6985 	/* Copy out the UDP header. */
6986 	memset(&udp, 0, sizeof(struct udphdr));
6987 	m_copydata(ip6cp->ip6c_m,
6988 	    ip6cp->ip6c_off,
6989 	    sizeof(struct udphdr),
6990 	    (caddr_t)&udp);
6991 	/* Copy out the port numbers and the verification tag. */
6992 	memset(&sh, 0, sizeof(struct sctphdr));
6993 	m_copydata(ip6cp->ip6c_m,
6994 	    ip6cp->ip6c_off + sizeof(struct udphdr),
6995 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
6996 	    (caddr_t)&sh);
6997 	memset(&src, 0, sizeof(struct sockaddr_in6));
6998 	src.sin6_family = AF_INET6;
6999 	src.sin6_len = sizeof(struct sockaddr_in6);
7000 	src.sin6_port = sh.src_port;
7001 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7002 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7003 		return;
7004 	}
7005 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7006 	dst.sin6_family = AF_INET6;
7007 	dst.sin6_len = sizeof(struct sockaddr_in6);
7008 	dst.sin6_port = sh.dest_port;
7009 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7010 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7011 		return;
7012 	}
7013 	inp = NULL;
7014 	net = NULL;
7015 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7016 	    (struct sockaddr *)&src,
7017 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7018 	if ((stcb != NULL) &&
7019 	    (net != NULL) &&
7020 	    (inp != NULL)) {
7021 		/* Check the UDP port numbers */
7022 		if ((udp.uh_dport != net->port) ||
7023 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7024 			SCTP_TCB_UNLOCK(stcb);
7025 			return;
7026 		}
7027 		/* Check the verification tag */
7028 		if (ntohl(sh.v_tag) != 0) {
7029 			/*
7030 			 * This must be the verification tag used for
7031 			 * sending out packets. We don't consider packets
7032 			 * reflecting the verification tag.
7033 			 */
7034 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7035 				SCTP_TCB_UNLOCK(stcb);
7036 				return;
7037 			}
7038 		} else {
7039 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7040 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7041 			    sizeof(struct sctphdr) +
7042 			    sizeof(struct sctp_chunkhdr) +
7043 			    offsetof(struct sctp_init, a_rwnd)) {
7044 				/*
7045 				 * In this case we can check if we got an
7046 				 * INIT chunk and if the initiate tag
7047 				 * matches.
7048 				 */
7049 				uint32_t initiate_tag;
7050 				uint8_t chunk_type;
7051 
7052 				m_copydata(ip6cp->ip6c_m,
7053 				    ip6cp->ip6c_off +
7054 				    sizeof(struct udphdr) +
7055 				    sizeof(struct sctphdr),
7056 				    sizeof(uint8_t),
7057 				    (caddr_t)&chunk_type);
7058 				m_copydata(ip6cp->ip6c_m,
7059 				    ip6cp->ip6c_off +
7060 				    sizeof(struct udphdr) +
7061 				    sizeof(struct sctphdr) +
7062 				    sizeof(struct sctp_chunkhdr),
7063 				    sizeof(uint32_t),
7064 				    (caddr_t)&initiate_tag);
7065 				if ((chunk_type != SCTP_INITIATION) ||
7066 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7067 					SCTP_TCB_UNLOCK(stcb);
7068 					return;
7069 				}
7070 			} else {
7071 				SCTP_TCB_UNLOCK(stcb);
7072 				return;
7073 			}
7074 		}
7075 		type = ip6cp->ip6c_icmp6->icmp6_type;
7076 		code = ip6cp->ip6c_icmp6->icmp6_code;
7077 		if ((type == ICMP6_DST_UNREACH) &&
7078 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7079 			type = ICMP6_PARAM_PROB;
7080 			code = ICMP6_PARAMPROB_NEXTHEADER;
7081 		}
7082 		sctp6_notify(inp, stcb, net, type, code,
7083 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7084 	} else {
7085 		if ((stcb == NULL) && (inp != NULL)) {
7086 			/* reduce inp's ref-count */
7087 			SCTP_INP_WLOCK(inp);
7088 			SCTP_INP_DECR_REF(inp);
7089 			SCTP_INP_WUNLOCK(inp);
7090 		}
7091 		if (stcb) {
7092 			SCTP_TCB_UNLOCK(stcb);
7093 		}
7094 	}
7095 }
7096 #endif
7097 
7098 void
7099 sctp_over_udp_stop(void)
7100 {
7101 	/*
7102 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7103 	 * for writting!
7104 	 */
7105 #ifdef INET
7106 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7107 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7108 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7109 	}
7110 #endif
7111 #ifdef INET6
7112 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7113 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7114 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7115 	}
7116 #endif
7117 }
7118 
7119 int
7120 sctp_over_udp_start(void)
7121 {
7122 	uint16_t port;
7123 	int ret;
7124 #ifdef INET
7125 	struct sockaddr_in sin;
7126 #endif
7127 #ifdef INET6
7128 	struct sockaddr_in6 sin6;
7129 #endif
7130 	/*
7131 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7132 	 * for writting!
7133 	 */
7134 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7135 	if (ntohs(port) == 0) {
7136 		/* Must have a port set */
7137 		return (EINVAL);
7138 	}
7139 #ifdef INET
7140 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7141 		/* Already running -- must stop first */
7142 		return (EALREADY);
7143 	}
7144 #endif
7145 #ifdef INET6
7146 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7147 		/* Already running -- must stop first */
7148 		return (EALREADY);
7149 	}
7150 #endif
7151 #ifdef INET
7152 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7153 	    SOCK_DGRAM, IPPROTO_UDP,
7154 	    curthread->td_ucred, curthread))) {
7155 		sctp_over_udp_stop();
7156 		return (ret);
7157 	}
7158 	/* Call the special UDP hook. */
7159 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7160 	    sctp_recv_udp_tunneled_packet,
7161 	    sctp_recv_icmp_tunneled_packet,
7162 	    NULL))) {
7163 		sctp_over_udp_stop();
7164 		return (ret);
7165 	}
7166 	/* Ok, we have a socket, bind it to the port. */
7167 	memset(&sin, 0, sizeof(struct sockaddr_in));
7168 	sin.sin_len = sizeof(struct sockaddr_in);
7169 	sin.sin_family = AF_INET;
7170 	sin.sin_port = htons(port);
7171 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7172 	    (struct sockaddr *)&sin, curthread))) {
7173 		sctp_over_udp_stop();
7174 		return (ret);
7175 	}
7176 #endif
7177 #ifdef INET6
7178 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7179 	    SOCK_DGRAM, IPPROTO_UDP,
7180 	    curthread->td_ucred, curthread))) {
7181 		sctp_over_udp_stop();
7182 		return (ret);
7183 	}
7184 	/* Call the special UDP hook. */
7185 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7186 	    sctp_recv_udp_tunneled_packet,
7187 	    sctp_recv_icmp6_tunneled_packet,
7188 	    NULL))) {
7189 		sctp_over_udp_stop();
7190 		return (ret);
7191 	}
7192 	/* Ok, we have a socket, bind it to the port. */
7193 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7194 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7195 	sin6.sin6_family = AF_INET6;
7196 	sin6.sin6_port = htons(port);
7197 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7198 	    (struct sockaddr *)&sin6, curthread))) {
7199 		sctp_over_udp_stop();
7200 		return (ret);
7201 	}
7202 #endif
7203 	return (0);
7204 }
7205 
7206 /*
7207  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7208  * If all arguments are zero, zero is returned.
7209  */
7210 uint32_t
7211 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7212 {
7213 	if (mtu1 > 0) {
7214 		if (mtu2 > 0) {
7215 			if (mtu3 > 0) {
7216 				return (min(mtu1, min(mtu2, mtu3)));
7217 			} else {
7218 				return (min(mtu1, mtu2));
7219 			}
7220 		} else {
7221 			if (mtu3 > 0) {
7222 				return (min(mtu1, mtu3));
7223 			} else {
7224 				return (mtu1);
7225 			}
7226 		}
7227 	} else {
7228 		if (mtu2 > 0) {
7229 			if (mtu3 > 0) {
7230 				return (min(mtu2, mtu3));
7231 			} else {
7232 				return (mtu2);
7233 			}
7234 		} else {
7235 			return (mtu3);
7236 		}
7237 	}
7238 }
7239 
7240 void
7241 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7242 {
7243 	struct in_conninfo inc;
7244 
7245 	memset(&inc, 0, sizeof(struct in_conninfo));
7246 	inc.inc_fibnum = fibnum;
7247 	switch (addr->sa.sa_family) {
7248 #ifdef INET
7249 	case AF_INET:
7250 		inc.inc_faddr = addr->sin.sin_addr;
7251 		break;
7252 #endif
7253 #ifdef INET6
7254 	case AF_INET6:
7255 		inc.inc_flags |= INC_ISIPV6;
7256 		inc.inc6_faddr = addr->sin6.sin6_addr;
7257 		break;
7258 #endif
7259 	default:
7260 		return;
7261 	}
7262 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7263 }
7264 
7265 uint32_t
7266 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7267 {
7268 	struct in_conninfo inc;
7269 
7270 	memset(&inc, 0, sizeof(struct in_conninfo));
7271 	inc.inc_fibnum = fibnum;
7272 	switch (addr->sa.sa_family) {
7273 #ifdef INET
7274 	case AF_INET:
7275 		inc.inc_faddr = addr->sin.sin_addr;
7276 		break;
7277 #endif
7278 #ifdef INET6
7279 	case AF_INET6:
7280 		inc.inc_flags |= INC_ISIPV6;
7281 		inc.inc6_faddr = addr->sin6.sin6_addr;
7282 		break;
7283 #endif
7284 	default:
7285 		return (0);
7286 	}
7287 	return ((uint32_t)tcp_hc_getmtu(&inc));
7288 }
7289