xref: /freebsd/sys/netinet/sctputil.c (revision 282e23f07bf49b4e37aabdcc1c513a788db36d10)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 #ifdef INET6
56 #include <netinet/icmp6.h>
57 #endif
58 
59 
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 
64 extern const struct sctp_cc_functions sctp_cc_functions[];
65 extern const struct sctp_ss_functions sctp_ss_functions[];
66 
67 void
68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->rtt / 1000;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 }
128 
129 void
130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
131 {
132 	struct sctp_cwnd_log sctp_clog;
133 
134 	sctp_clog.x.strlog.stcb = stcb;
135 	sctp_clog.x.strlog.n_tsn = tsn;
136 	sctp_clog.x.strlog.n_sseq = sseq;
137 	sctp_clog.x.strlog.e_tsn = 0;
138 	sctp_clog.x.strlog.e_sseq = 0;
139 	sctp_clog.x.strlog.strm = stream;
140 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
141 	    SCTP_LOG_EVENT_STRM,
142 	    from,
143 	    sctp_clog.x.misc.log1,
144 	    sctp_clog.x.misc.log2,
145 	    sctp_clog.x.misc.log3,
146 	    sctp_clog.x.misc.log4);
147 }
148 
149 void
150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
151 {
152 	struct sctp_cwnd_log sctp_clog;
153 
154 	sctp_clog.x.nagle.stcb = (void *)stcb;
155 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
156 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
157 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
158 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
159 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
160 	    SCTP_LOG_EVENT_NAGLE,
161 	    action,
162 	    sctp_clog.x.misc.log1,
163 	    sctp_clog.x.misc.log2,
164 	    sctp_clog.x.misc.log3,
165 	    sctp_clog.x.misc.log4);
166 }
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 }
222 
223 #ifdef SCTP_MBUF_LOGGING
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 void
250 sctp_log_mbc(struct mbuf *m, int from)
251 {
252 	struct mbuf *mat;
253 
254 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
255 		sctp_log_mb(mat, from);
256 	}
257 }
258 
259 #endif
260 
261 void
262 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
263 {
264 	struct sctp_cwnd_log sctp_clog;
265 
266 	if (control == NULL) {
267 		SCTP_PRINTF("Gak log of NULL?\n");
268 		return;
269 	}
270 	sctp_clog.x.strlog.stcb = control->stcb;
271 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
272 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
273 	sctp_clog.x.strlog.strm = control->sinfo_stream;
274 	if (poschk != NULL) {
275 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
276 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
277 	} else {
278 		sctp_clog.x.strlog.e_tsn = 0;
279 		sctp_clog.x.strlog.e_sseq = 0;
280 	}
281 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
282 	    SCTP_LOG_EVENT_STRM,
283 	    from,
284 	    sctp_clog.x.misc.log1,
285 	    sctp_clog.x.misc.log2,
286 	    sctp_clog.x.misc.log3,
287 	    sctp_clog.x.misc.log4);
288 }
289 
290 void
291 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
292 {
293 	struct sctp_cwnd_log sctp_clog;
294 
295 	sctp_clog.x.cwnd.net = net;
296 	if (stcb->asoc.send_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_send = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
300 	if (stcb->asoc.stream_queue_cnt > 255)
301 		sctp_clog.x.cwnd.cnt_in_str = 255;
302 	else
303 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
304 
305 	if (net) {
306 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
307 		sctp_clog.x.cwnd.inflight = net->flight_size;
308 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
310 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
311 	}
312 	if (SCTP_CWNDLOG_PRESEND == from) {
313 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
314 	}
315 	sctp_clog.x.cwnd.cwnd_augment = augment;
316 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
317 	    SCTP_LOG_EVENT_CWND,
318 	    from,
319 	    sctp_clog.x.misc.log1,
320 	    sctp_clog.x.misc.log2,
321 	    sctp_clog.x.misc.log3,
322 	    sctp_clog.x.misc.log4);
323 }
324 
325 void
326 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
327 {
328 	struct sctp_cwnd_log sctp_clog;
329 
330 	memset(&sctp_clog, 0, sizeof(sctp_clog));
331 	if (inp) {
332 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
333 
334 	} else {
335 		sctp_clog.x.lock.sock = (void *)NULL;
336 	}
337 	sctp_clog.x.lock.inp = (void *)inp;
338 	if (stcb) {
339 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
340 	} else {
341 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
342 	}
343 	if (inp) {
344 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
345 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
346 	} else {
347 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
349 	}
350 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
351 	if (inp && (inp->sctp_socket)) {
352 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
353 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
354 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
355 	} else {
356 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
357 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
358 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
359 	}
360 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
361 	    SCTP_LOG_LOCK_EVENT,
362 	    from,
363 	    sctp_clog.x.misc.log1,
364 	    sctp_clog.x.misc.log2,
365 	    sctp_clog.x.misc.log3,
366 	    sctp_clog.x.misc.log4);
367 }
368 
369 void
370 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
371 {
372 	struct sctp_cwnd_log sctp_clog;
373 
374 	memset(&sctp_clog, 0, sizeof(sctp_clog));
375 	sctp_clog.x.cwnd.net = net;
376 	sctp_clog.x.cwnd.cwnd_new_value = error;
377 	sctp_clog.x.cwnd.inflight = net->flight_size;
378 	sctp_clog.x.cwnd.cwnd_augment = burst;
379 	if (stcb->asoc.send_queue_cnt > 255)
380 		sctp_clog.x.cwnd.cnt_in_send = 255;
381 	else
382 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
383 	if (stcb->asoc.stream_queue_cnt > 255)
384 		sctp_clog.x.cwnd.cnt_in_str = 255;
385 	else
386 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
387 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
388 	    SCTP_LOG_EVENT_MAXBURST,
389 	    from,
390 	    sctp_clog.x.misc.log1,
391 	    sctp_clog.x.misc.log2,
392 	    sctp_clog.x.misc.log3,
393 	    sctp_clog.x.misc.log4);
394 }
395 
396 void
397 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
398 {
399 	struct sctp_cwnd_log sctp_clog;
400 
401 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
402 	sctp_clog.x.rwnd.send_size = snd_size;
403 	sctp_clog.x.rwnd.overhead = overhead;
404 	sctp_clog.x.rwnd.new_rwnd = 0;
405 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
406 	    SCTP_LOG_EVENT_RWND,
407 	    from,
408 	    sctp_clog.x.misc.log1,
409 	    sctp_clog.x.misc.log2,
410 	    sctp_clog.x.misc.log3,
411 	    sctp_clog.x.misc.log4);
412 }
413 
414 void
415 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
416 {
417 	struct sctp_cwnd_log sctp_clog;
418 
419 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
420 	sctp_clog.x.rwnd.send_size = flight_size;
421 	sctp_clog.x.rwnd.overhead = overhead;
422 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
423 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
424 	    SCTP_LOG_EVENT_RWND,
425 	    from,
426 	    sctp_clog.x.misc.log1,
427 	    sctp_clog.x.misc.log2,
428 	    sctp_clog.x.misc.log3,
429 	    sctp_clog.x.misc.log4);
430 }
431 
432 #ifdef SCTP_MBCNT_LOGGING
433 static void
434 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
435 {
436 	struct sctp_cwnd_log sctp_clog;
437 
438 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
439 	sctp_clog.x.mbcnt.size_change = book;
440 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
441 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
442 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
443 	    SCTP_LOG_EVENT_MBCNT,
444 	    from,
445 	    sctp_clog.x.misc.log1,
446 	    sctp_clog.x.misc.log2,
447 	    sctp_clog.x.misc.log3,
448 	    sctp_clog.x.misc.log4);
449 }
450 
451 #endif
452 
453 void
454 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
455 {
456 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
457 	    SCTP_LOG_MISC_EVENT,
458 	    from,
459 	    a, b, c, d);
460 }
461 
462 void
463 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
464 {
465 	struct sctp_cwnd_log sctp_clog;
466 
467 	sctp_clog.x.wake.stcb = (void *)stcb;
468 	sctp_clog.x.wake.wake_cnt = wake_cnt;
469 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
470 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
471 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
472 
473 	if (stcb->asoc.stream_queue_cnt < 0xff)
474 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
475 	else
476 		sctp_clog.x.wake.stream_qcnt = 0xff;
477 
478 	if (stcb->asoc.chunks_on_out_queue < 0xff)
479 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
480 	else
481 		sctp_clog.x.wake.chunks_on_oque = 0xff;
482 
483 	sctp_clog.x.wake.sctpflags = 0;
484 	/* set in the defered mode stuff */
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
486 		sctp_clog.x.wake.sctpflags |= 1;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
488 		sctp_clog.x.wake.sctpflags |= 2;
489 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
490 		sctp_clog.x.wake.sctpflags |= 4;
491 	/* what about the sb */
492 	if (stcb->sctp_socket) {
493 		struct socket *so = stcb->sctp_socket;
494 
495 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
496 	} else {
497 		sctp_clog.x.wake.sbflags = 0xff;
498 	}
499 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
500 	    SCTP_LOG_EVENT_WAKE,
501 	    from,
502 	    sctp_clog.x.misc.log1,
503 	    sctp_clog.x.misc.log2,
504 	    sctp_clog.x.misc.log3,
505 	    sctp_clog.x.misc.log4);
506 }
507 
508 void
509 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
510 {
511 	struct sctp_cwnd_log sctp_clog;
512 
513 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
514 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
515 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
516 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
517 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
518 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
519 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
520 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
521 	    SCTP_LOG_EVENT_BLOCK,
522 	    from,
523 	    sctp_clog.x.misc.log1,
524 	    sctp_clog.x.misc.log2,
525 	    sctp_clog.x.misc.log3,
526 	    sctp_clog.x.misc.log4);
527 }
528 
529 int
530 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
531 {
532 	/* May need to fix this if ktrdump does not work */
533 	return (0);
534 }
535 
536 #ifdef SCTP_AUDITING_ENABLED
537 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
538 static int sctp_audit_indx = 0;
539 
540 static
541 void
542 sctp_print_audit_report(void)
543 {
544 	int i;
545 	int cnt;
546 
547 	cnt = 0;
548 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	for (i = 0; i < sctp_audit_indx; i++) {
568 		if ((sctp_audit_data[i][0] == 0xe0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if (sctp_audit_data[i][0] == 0xf0) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
576 		    (sctp_audit_data[i][1] == 0x01)) {
577 			SCTP_PRINTF("\n");
578 			cnt = 0;
579 		}
580 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
581 		    (uint32_t) sctp_audit_data[i][1]);
582 		cnt++;
583 		if ((cnt % 14) == 0)
584 			SCTP_PRINTF("\n");
585 	}
586 	SCTP_PRINTF("\n");
587 }
588 
589 void
590 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
591     struct sctp_nets *net)
592 {
593 	int resend_cnt, tot_out, rep, tot_book_cnt;
594 	struct sctp_nets *lnet;
595 	struct sctp_tmit_chunk *chk;
596 
597 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
598 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
599 	sctp_audit_indx++;
600 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 		sctp_audit_indx = 0;
602 	}
603 	if (inp == NULL) {
604 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
605 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
606 		sctp_audit_indx++;
607 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
608 			sctp_audit_indx = 0;
609 		}
610 		return;
611 	}
612 	if (stcb == NULL) {
613 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
614 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
615 		sctp_audit_indx++;
616 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
617 			sctp_audit_indx = 0;
618 		}
619 		return;
620 	}
621 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
622 	sctp_audit_data[sctp_audit_indx][1] =
623 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
624 	sctp_audit_indx++;
625 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
626 		sctp_audit_indx = 0;
627 	}
628 	rep = 0;
629 	tot_book_cnt = 0;
630 	resend_cnt = tot_out = 0;
631 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
632 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
633 			resend_cnt++;
634 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
635 			tot_out += chk->book_size;
636 			tot_book_cnt++;
637 		}
638 	}
639 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
647 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
648 		rep = 1;
649 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
650 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
651 		sctp_audit_data[sctp_audit_indx][1] =
652 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 	}
658 	if (tot_out != stcb->asoc.total_flight) {
659 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
660 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
661 		sctp_audit_indx++;
662 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
663 			sctp_audit_indx = 0;
664 		}
665 		rep = 1;
666 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
667 		    (int)stcb->asoc.total_flight);
668 		stcb->asoc.total_flight = tot_out;
669 	}
670 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
671 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
672 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
673 		sctp_audit_indx++;
674 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
675 			sctp_audit_indx = 0;
676 		}
677 		rep = 1;
678 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
679 
680 		stcb->asoc.total_flight_count = tot_book_cnt;
681 	}
682 	tot_out = 0;
683 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
684 		tot_out += lnet->flight_size;
685 	}
686 	if (tot_out != stcb->asoc.total_flight) {
687 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
688 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
689 		sctp_audit_indx++;
690 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 			sctp_audit_indx = 0;
692 		}
693 		rep = 1;
694 		SCTP_PRINTF("real flight:%d net total was %d\n",
695 		    stcb->asoc.total_flight, tot_out);
696 		/* now corrective action */
697 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
698 
699 			tot_out = 0;
700 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
701 				if ((chk->whoTo == lnet) &&
702 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
703 					tot_out += chk->book_size;
704 				}
705 			}
706 			if (lnet->flight_size != tot_out) {
707 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
708 				    (void *)lnet, lnet->flight_size,
709 				    tot_out);
710 				lnet->flight_size = tot_out;
711 			}
712 		}
713 	}
714 	if (rep) {
715 		sctp_print_audit_report();
716 	}
717 }
718 
719 void
720 sctp_audit_log(uint8_t ev, uint8_t fd)
721 {
722 
723 	sctp_audit_data[sctp_audit_indx][0] = ev;
724 	sctp_audit_data[sctp_audit_indx][1] = fd;
725 	sctp_audit_indx++;
726 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 		sctp_audit_indx = 0;
728 	}
729 }
730 
731 #endif
732 
733 /*
734  * sctp_stop_timers_for_shutdown() should be called
735  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
736  * state to make sure that all timers are stopped.
737  */
738 void
739 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
740 {
741 	struct sctp_association *asoc;
742 	struct sctp_nets *net;
743 
744 	asoc = &stcb->asoc;
745 
746 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
751 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
752 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
753 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
754 	}
755 }
756 
757 /*
758  * a list of sizes based on typical mtu's, used only if next hop size not
759  * returned.
760  */
761 static uint32_t sctp_mtu_sizes[] = {
762 	68,
763 	296,
764 	508,
765 	512,
766 	544,
767 	576,
768 	1006,
769 	1492,
770 	1500,
771 	1536,
772 	2002,
773 	2048,
774 	4352,
775 	4464,
776 	8166,
777 	17914,
778 	32000,
779 	65535
780 };
781 
782 /*
783  * Return the largest MTU smaller than val. If there is no
784  * entry, just return val.
785  */
786 uint32_t
787 sctp_get_prev_mtu(uint32_t val)
788 {
789 	uint32_t i;
790 
791 	if (val <= sctp_mtu_sizes[0]) {
792 		return (val);
793 	}
794 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
795 		if (val <= sctp_mtu_sizes[i]) {
796 			break;
797 		}
798 	}
799 	return (sctp_mtu_sizes[i - 1]);
800 }
801 
802 /*
803  * Return the smallest MTU larger than val. If there is no
804  * entry, just return val.
805  */
806 uint32_t
807 sctp_get_next_mtu(uint32_t val)
808 {
809 	/* select another MTU that is just bigger than this one */
810 	uint32_t i;
811 
812 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
813 		if (val < sctp_mtu_sizes[i]) {
814 			return (sctp_mtu_sizes[i]);
815 		}
816 	}
817 	return (val);
818 }
819 
820 void
821 sctp_fill_random_store(struct sctp_pcb *m)
822 {
823 	/*
824 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
825 	 * our counter. The result becomes our good random numbers and we
826 	 * then setup to give these out. Note that we do no locking to
827 	 * protect this. This is ok, since if competing folks call this we
828 	 * will get more gobbled gook in the random store which is what we
829 	 * want. There is a danger that two guys will use the same random
830 	 * numbers, but thats ok too since that is random as well :->
831 	 */
832 	m->store_at = 0;
833 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
834 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
835 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
836 	m->random_counter++;
837 }
838 
839 uint32_t
840 sctp_select_initial_TSN(struct sctp_pcb *inp)
841 {
842 	/*
843 	 * A true implementation should use random selection process to get
844 	 * the initial stream sequence number, using RFC1750 as a good
845 	 * guideline
846 	 */
847 	uint32_t x, *xp;
848 	uint8_t *p;
849 	int store_at, new_store;
850 
851 	if (inp->initial_sequence_debug != 0) {
852 		uint32_t ret;
853 
854 		ret = inp->initial_sequence_debug;
855 		inp->initial_sequence_debug++;
856 		return (ret);
857 	}
858 retry:
859 	store_at = inp->store_at;
860 	new_store = store_at + sizeof(uint32_t);
861 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
862 		new_store = 0;
863 	}
864 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
865 		goto retry;
866 	}
867 	if (new_store == 0) {
868 		/* Refill the random store */
869 		sctp_fill_random_store(inp);
870 	}
871 	p = &inp->random_store[store_at];
872 	xp = (uint32_t *) p;
873 	x = *xp;
874 	return (x);
875 }
876 
877 uint32_t
878 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
879 {
880 	uint32_t x;
881 	struct timeval now;
882 
883 	if (check) {
884 		(void)SCTP_GETTIME_TIMEVAL(&now);
885 	}
886 	for (;;) {
887 		x = sctp_select_initial_TSN(&inp->sctp_ep);
888 		if (x == 0) {
889 			/* we never use 0 */
890 			continue;
891 		}
892 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
893 			break;
894 		}
895 	}
896 	return (x);
897 }
898 
899 int32_t
900 sctp_map_assoc_state(int kernel_state)
901 {
902 	int32_t user_state;
903 
904 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
905 		user_state = SCTP_CLOSED;
906 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
907 		user_state = SCTP_SHUTDOWN_PENDING;
908 	} else {
909 		switch (kernel_state & SCTP_STATE_MASK) {
910 		case SCTP_STATE_EMPTY:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_INUSE:
914 			user_state = SCTP_CLOSED;
915 			break;
916 		case SCTP_STATE_COOKIE_WAIT:
917 			user_state = SCTP_COOKIE_WAIT;
918 			break;
919 		case SCTP_STATE_COOKIE_ECHOED:
920 			user_state = SCTP_COOKIE_ECHOED;
921 			break;
922 		case SCTP_STATE_OPEN:
923 			user_state = SCTP_ESTABLISHED;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_SENT:
926 			user_state = SCTP_SHUTDOWN_SENT;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_RECEIVED:
929 			user_state = SCTP_SHUTDOWN_RECEIVED;
930 			break;
931 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
932 			user_state = SCTP_SHUTDOWN_ACK_SENT;
933 			break;
934 		default:
935 			user_state = SCTP_CLOSED;
936 			break;
937 		}
938 	}
939 	return (user_state);
940 }
941 
942 int
943 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
944     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
945 {
946 	struct sctp_association *asoc;
947 
948 	/*
949 	 * Anything set to zero is taken care of by the allocation routine's
950 	 * bzero
951 	 */
952 
953 	/*
954 	 * Up front select what scoping to apply on addresses I tell my peer
955 	 * Not sure what to do with these right now, we will need to come up
956 	 * with a way to set them. We may need to pass them through from the
957 	 * caller in the sctp_aloc_assoc() function.
958 	 */
959 	int i;
960 
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 
964 #endif
965 
966 	asoc = &stcb->asoc;
967 	/* init all variables to a known value. */
968 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
969 	asoc->max_burst = inp->sctp_ep.max_burst;
970 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
971 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
972 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
973 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
974 	asoc->ecn_supported = inp->ecn_supported;
975 	asoc->prsctp_supported = inp->prsctp_supported;
976 	asoc->idata_supported = inp->idata_supported;
977 	asoc->auth_supported = inp->auth_supported;
978 	asoc->asconf_supported = inp->asconf_supported;
979 	asoc->reconfig_supported = inp->reconfig_supported;
980 	asoc->nrsack_supported = inp->nrsack_supported;
981 	asoc->pktdrop_supported = inp->pktdrop_supported;
982 	asoc->idata_supported = inp->idata_supported;
983 	asoc->sctp_cmt_pf = (uint8_t) 0;
984 	asoc->sctp_frag_point = inp->sctp_frag_point;
985 	asoc->sctp_features = inp->sctp_features;
986 	asoc->default_dscp = inp->sctp_ep.default_dscp;
987 	asoc->max_cwnd = inp->max_cwnd;
988 #ifdef INET6
989 	if (inp->sctp_ep.default_flowlabel) {
990 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
991 	} else {
992 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
993 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
994 			asoc->default_flowlabel &= 0x000fffff;
995 			asoc->default_flowlabel |= 0x80000000;
996 		} else {
997 			asoc->default_flowlabel = 0;
998 		}
999 	}
1000 #endif
1001 	asoc->sb_send_resv = 0;
1002 	if (override_tag) {
1003 		asoc->my_vtag = override_tag;
1004 	} else {
1005 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1006 	}
1007 	/* Get the nonce tags */
1008 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010 	asoc->vrf_id = vrf_id;
1011 
1012 #ifdef SCTP_ASOCLOG_OF_TSNS
1013 	asoc->tsn_in_at = 0;
1014 	asoc->tsn_out_at = 0;
1015 	asoc->tsn_in_wrapped = 0;
1016 	asoc->tsn_out_wrapped = 0;
1017 	asoc->cumack_log_at = 0;
1018 	asoc->cumack_log_atsnt = 0;
1019 #endif
1020 #ifdef SCTP_FS_SPEC_LOG
1021 	asoc->fs_index = 0;
1022 #endif
1023 	asoc->refcnt = 0;
1024 	asoc->assoc_up_sent = 0;
1025 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1026 	    sctp_select_initial_TSN(&inp->sctp_ep);
1027 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1028 	/* we are optimisitic here */
1029 	asoc->peer_supports_nat = 0;
1030 	asoc->sent_queue_retran_cnt = 0;
1031 
1032 	/* for CMT */
1033 	asoc->last_net_cmt_send_started = NULL;
1034 
1035 	/* This will need to be adjusted */
1036 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1037 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1038 	asoc->asconf_seq_in = asoc->last_acked_seq;
1039 
1040 	/* here we are different, we hold the next one we expect */
1041 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1042 
1043 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1044 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1045 
1046 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1047 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1048 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1049 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1050 	asoc->free_chunk_cnt = 0;
1051 
1052 	asoc->iam_blocking = 0;
1053 	asoc->context = inp->sctp_context;
1054 	asoc->local_strreset_support = inp->local_strreset_support;
1055 	asoc->def_send = inp->def_send;
1056 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1057 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1058 	asoc->pr_sctp_cnt = 0;
1059 	asoc->total_output_queue_size = 0;
1060 
1061 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1062 		asoc->scope.ipv6_addr_legal = 1;
1063 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1064 			asoc->scope.ipv4_addr_legal = 1;
1065 		} else {
1066 			asoc->scope.ipv4_addr_legal = 0;
1067 		}
1068 	} else {
1069 		asoc->scope.ipv6_addr_legal = 0;
1070 		asoc->scope.ipv4_addr_legal = 1;
1071 	}
1072 
1073 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1074 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1075 
1076 	asoc->smallest_mtu = inp->sctp_frag_point;
1077 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1078 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1079 
1080 	asoc->locked_on_sending = NULL;
1081 	asoc->stream_locked_on = 0;
1082 	asoc->ecn_echo_cnt_onq = 0;
1083 	asoc->stream_locked = 0;
1084 
1085 	asoc->send_sack = 1;
1086 
1087 	LIST_INIT(&asoc->sctp_restricted_addrs);
1088 
1089 	TAILQ_INIT(&asoc->nets);
1090 	TAILQ_INIT(&asoc->pending_reply_queue);
1091 	TAILQ_INIT(&asoc->asconf_ack_sent);
1092 	/* Setup to fill the hb random cache at first HB */
1093 	asoc->hb_random_idx = 4;
1094 
1095 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1096 
1097 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1098 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1099 
1100 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1101 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1102 
1103 	/*
1104 	 * Now the stream parameters, here we allocate space for all streams
1105 	 * that we request by default.
1106 	 */
1107 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1108 	    o_strms;
1109 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1110 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1111 	    SCTP_M_STRMO);
1112 	if (asoc->strmout == NULL) {
1113 		/* big trouble no memory */
1114 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1115 		return (ENOMEM);
1116 	}
1117 	for (i = 0; i < asoc->streamoutcnt; i++) {
1118 		/*
1119 		 * inbound side must be set to 0xffff, also NOTE when we get
1120 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1121 		 * count (streamoutcnt) but first check if we sent to any of
1122 		 * the upper streams that were dropped (if some were). Those
1123 		 * that were dropped must be notified to the upper layer as
1124 		 * failed to send.
1125 		 */
1126 		asoc->strmout[i].next_mid_ordered = 0;
1127 		asoc->strmout[i].next_mid_unordered = 0;
1128 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1129 		asoc->strmout[i].chunks_on_queues = 0;
1130 #if defined(SCTP_DETAILED_STR_STATS)
1131 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1132 			asoc->strmout[i].abandoned_sent[j] = 0;
1133 			asoc->strmout[i].abandoned_unsent[j] = 0;
1134 		}
1135 #else
1136 		asoc->strmout[i].abandoned_sent[0] = 0;
1137 		asoc->strmout[i].abandoned_unsent[0] = 0;
1138 #endif
1139 		asoc->strmout[i].stream_no = i;
1140 		asoc->strmout[i].last_msg_incomplete = 0;
1141 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1142 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1143 	}
1144 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1145 
1146 	/* Now the mapping array */
1147 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1148 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1149 	    SCTP_M_MAP);
1150 	if (asoc->mapping_array == NULL) {
1151 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1152 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1153 		return (ENOMEM);
1154 	}
1155 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1156 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1157 	    SCTP_M_MAP);
1158 	if (asoc->nr_mapping_array == NULL) {
1159 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1160 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1161 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1162 		return (ENOMEM);
1163 	}
1164 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1165 
1166 	/* Now the init of the other outqueues */
1167 	TAILQ_INIT(&asoc->free_chunks);
1168 	TAILQ_INIT(&asoc->control_send_queue);
1169 	TAILQ_INIT(&asoc->asconf_send_queue);
1170 	TAILQ_INIT(&asoc->send_queue);
1171 	TAILQ_INIT(&asoc->sent_queue);
1172 	TAILQ_INIT(&asoc->resetHead);
1173 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1174 	TAILQ_INIT(&asoc->asconf_queue);
1175 	/* authentication fields */
1176 	asoc->authinfo.random = NULL;
1177 	asoc->authinfo.active_keyid = 0;
1178 	asoc->authinfo.assoc_key = NULL;
1179 	asoc->authinfo.assoc_keyid = 0;
1180 	asoc->authinfo.recv_key = NULL;
1181 	asoc->authinfo.recv_keyid = 0;
1182 	LIST_INIT(&asoc->shared_keys);
1183 	asoc->marked_retrans = 0;
1184 	asoc->port = inp->sctp_ep.port;
1185 	asoc->timoinit = 0;
1186 	asoc->timodata = 0;
1187 	asoc->timosack = 0;
1188 	asoc->timoshutdown = 0;
1189 	asoc->timoheartbeat = 0;
1190 	asoc->timocookie = 0;
1191 	asoc->timoshutdownack = 0;
1192 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1193 	asoc->discontinuity_time = asoc->start_time;
1194 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1195 		asoc->abandoned_unsent[i] = 0;
1196 		asoc->abandoned_sent[i] = 0;
1197 	}
1198 	/*
1199 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1200 	 * freed later when the association is freed.
1201 	 */
1202 	return (0);
1203 }
1204 
1205 void
1206 sctp_print_mapping_array(struct sctp_association *asoc)
1207 {
1208 	unsigned int i, limit;
1209 
1210 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1211 	    asoc->mapping_array_size,
1212 	    asoc->mapping_array_base_tsn,
1213 	    asoc->cumulative_tsn,
1214 	    asoc->highest_tsn_inside_map,
1215 	    asoc->highest_tsn_inside_nr_map);
1216 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1217 		if (asoc->mapping_array[limit - 1] != 0) {
1218 			break;
1219 		}
1220 	}
1221 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1222 	for (i = 0; i < limit; i++) {
1223 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1224 	}
1225 	if (limit % 16)
1226 		SCTP_PRINTF("\n");
1227 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1228 		if (asoc->nr_mapping_array[limit - 1]) {
1229 			break;
1230 		}
1231 	}
1232 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1233 	for (i = 0; i < limit; i++) {
1234 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1235 	}
1236 	if (limit % 16)
1237 		SCTP_PRINTF("\n");
1238 }
1239 
1240 int
1241 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1242 {
1243 	/* mapping array needs to grow */
1244 	uint8_t *new_array1, *new_array2;
1245 	uint32_t new_size;
1246 
1247 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1248 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1249 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1250 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1251 		/* can't get more, forget it */
1252 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1253 		if (new_array1) {
1254 			SCTP_FREE(new_array1, SCTP_M_MAP);
1255 		}
1256 		if (new_array2) {
1257 			SCTP_FREE(new_array2, SCTP_M_MAP);
1258 		}
1259 		return (-1);
1260 	}
1261 	memset(new_array1, 0, new_size);
1262 	memset(new_array2, 0, new_size);
1263 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1264 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1265 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1266 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1267 	asoc->mapping_array = new_array1;
1268 	asoc->nr_mapping_array = new_array2;
1269 	asoc->mapping_array_size = new_size;
1270 	return (0);
1271 }
1272 
1273 
1274 static void
1275 sctp_iterator_work(struct sctp_iterator *it)
1276 {
1277 	int iteration_count = 0;
1278 	int inp_skip = 0;
1279 	int first_in = 1;
1280 	struct sctp_inpcb *tinp;
1281 
1282 	SCTP_INP_INFO_RLOCK();
1283 	SCTP_ITERATOR_LOCK();
1284 	if (it->inp) {
1285 		SCTP_INP_RLOCK(it->inp);
1286 		SCTP_INP_DECR_REF(it->inp);
1287 	}
1288 	if (it->inp == NULL) {
1289 		/* iterator is complete */
1290 done_with_iterator:
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		sctp_it_ctl.cur_it = it;
1432 		/* now lets work on this one */
1433 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1434 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1435 		CURVNET_SET(it->vn);
1436 		sctp_iterator_work(it);
1437 		sctp_it_ctl.cur_it = NULL;
1438 		CURVNET_RESTORE();
1439 		SCTP_IPI_ITERATOR_WQ_LOCK();
1440 		/* sa_ignore FREED_MEMORY */
1441 	}
1442 	sctp_it_ctl.iterator_running = 0;
1443 	return;
1444 }
1445 
1446 
1447 static void
1448 sctp_handle_addr_wq(void)
1449 {
1450 	/* deal with the ADDR wq from the rtsock calls */
1451 	struct sctp_laddr *wi, *nwi;
1452 	struct sctp_asconf_iterator *asc;
1453 
1454 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1455 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1456 	if (asc == NULL) {
1457 		/* Try later, no memory */
1458 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1459 		    (struct sctp_inpcb *)NULL,
1460 		    (struct sctp_tcb *)NULL,
1461 		    (struct sctp_nets *)NULL);
1462 		return;
1463 	}
1464 	LIST_INIT(&asc->list_of_work);
1465 	asc->cnt = 0;
1466 
1467 	SCTP_WQ_ADDR_LOCK();
1468 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1469 		LIST_REMOVE(wi, sctp_nxt_addr);
1470 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1471 		asc->cnt++;
1472 	}
1473 	SCTP_WQ_ADDR_UNLOCK();
1474 
1475 	if (asc->cnt == 0) {
1476 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1477 	} else {
1478 		int ret;
1479 
1480 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1481 		    sctp_asconf_iterator_stcb,
1482 		    NULL,	/* No ep end for boundall */
1483 		    SCTP_PCB_FLAGS_BOUNDALL,
1484 		    SCTP_PCB_ANY_FEATURES,
1485 		    SCTP_ASOC_ANY_STATE,
1486 		    (void *)asc, 0,
1487 		    sctp_asconf_iterator_end, NULL, 0);
1488 		if (ret) {
1489 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1490 			/*
1491 			 * Freeing if we are stopping or put back on the
1492 			 * addr_wq.
1493 			 */
1494 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1495 				sctp_asconf_iterator_end(asc, 0);
1496 			} else {
1497 				SCTP_WQ_ADDR_LOCK();
1498 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1499 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1500 				}
1501 				SCTP_WQ_ADDR_UNLOCK();
1502 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1503 			}
1504 		}
1505 	}
1506 }
1507 
1508 void
1509 sctp_timeout_handler(void *t)
1510 {
1511 	struct sctp_inpcb *inp;
1512 	struct sctp_tcb *stcb;
1513 	struct sctp_nets *net;
1514 	struct sctp_timer *tmr;
1515 	struct mbuf *op_err;
1516 
1517 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1518 	struct socket *so;
1519 
1520 #endif
1521 	int did_output;
1522 	int type;
1523 
1524 	tmr = (struct sctp_timer *)t;
1525 	inp = (struct sctp_inpcb *)tmr->ep;
1526 	stcb = (struct sctp_tcb *)tmr->tcb;
1527 	net = (struct sctp_nets *)tmr->net;
1528 	CURVNET_SET((struct vnet *)tmr->vnet);
1529 	did_output = 1;
1530 
1531 #ifdef SCTP_AUDITING_ENABLED
1532 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1533 	sctp_auditing(3, inp, stcb, net);
1534 #endif
1535 
1536 	/* sanity checks... */
1537 	if (tmr->self != (void *)tmr) {
1538 		/*
1539 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1540 		 * (void *)tmr);
1541 		 */
1542 		CURVNET_RESTORE();
1543 		return;
1544 	}
1545 	tmr->stopped_from = 0xa001;
1546 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1547 		/*
1548 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1549 		 * tmr->type);
1550 		 */
1551 		CURVNET_RESTORE();
1552 		return;
1553 	}
1554 	tmr->stopped_from = 0xa002;
1555 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1556 		CURVNET_RESTORE();
1557 		return;
1558 	}
1559 	/* if this is an iterator timeout, get the struct and clear inp */
1560 	tmr->stopped_from = 0xa003;
1561 	if (inp) {
1562 		SCTP_INP_INCR_REF(inp);
1563 		if ((inp->sctp_socket == NULL) &&
1564 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1569 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1570 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1571 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1572 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1573 		    ) {
1574 			SCTP_INP_DECR_REF(inp);
1575 			CURVNET_RESTORE();
1576 			return;
1577 		}
1578 	}
1579 	tmr->stopped_from = 0xa004;
1580 	if (stcb) {
1581 		atomic_add_int(&stcb->asoc.refcnt, 1);
1582 		if (stcb->asoc.state == 0) {
1583 			atomic_add_int(&stcb->asoc.refcnt, -1);
1584 			if (inp) {
1585 				SCTP_INP_DECR_REF(inp);
1586 			}
1587 			CURVNET_RESTORE();
1588 			return;
1589 		}
1590 	}
1591 	type = tmr->type;
1592 	tmr->stopped_from = 0xa005;
1593 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1594 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1595 		if (inp) {
1596 			SCTP_INP_DECR_REF(inp);
1597 		}
1598 		if (stcb) {
1599 			atomic_add_int(&stcb->asoc.refcnt, -1);
1600 		}
1601 		CURVNET_RESTORE();
1602 		return;
1603 	}
1604 	tmr->stopped_from = 0xa006;
1605 
1606 	if (stcb) {
1607 		SCTP_TCB_LOCK(stcb);
1608 		atomic_add_int(&stcb->asoc.refcnt, -1);
1609 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1610 		    ((stcb->asoc.state == 0) ||
1611 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1612 			SCTP_TCB_UNLOCK(stcb);
1613 			if (inp) {
1614 				SCTP_INP_DECR_REF(inp);
1615 			}
1616 			CURVNET_RESTORE();
1617 			return;
1618 		}
1619 	}
1620 	/* record in stopped what t-o occurred */
1621 	tmr->stopped_from = type;
1622 
1623 	/* mark as being serviced now */
1624 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1625 		/*
1626 		 * Callout has been rescheduled.
1627 		 */
1628 		goto get_out;
1629 	}
1630 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1631 		/*
1632 		 * Not active, so no action.
1633 		 */
1634 		goto get_out;
1635 	}
1636 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1637 
1638 	/* call the handler for the appropriate timer type */
1639 	switch (type) {
1640 	case SCTP_TIMER_TYPE_ZERO_COPY:
1641 		if (inp == NULL) {
1642 			break;
1643 		}
1644 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1645 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1646 		}
1647 		break;
1648 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1649 		if (inp == NULL) {
1650 			break;
1651 		}
1652 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1653 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1654 		}
1655 		break;
1656 	case SCTP_TIMER_TYPE_ADDR_WQ:
1657 		sctp_handle_addr_wq();
1658 		break;
1659 	case SCTP_TIMER_TYPE_SEND:
1660 		if ((stcb == NULL) || (inp == NULL)) {
1661 			break;
1662 		}
1663 		SCTP_STAT_INCR(sctps_timodata);
1664 		stcb->asoc.timodata++;
1665 		stcb->asoc.num_send_timers_up--;
1666 		if (stcb->asoc.num_send_timers_up < 0) {
1667 			stcb->asoc.num_send_timers_up = 0;
1668 		}
1669 		SCTP_TCB_LOCK_ASSERT(stcb);
1670 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1671 			/* no need to unlock on tcb its gone */
1672 
1673 			goto out_decr;
1674 		}
1675 		SCTP_TCB_LOCK_ASSERT(stcb);
1676 #ifdef SCTP_AUDITING_ENABLED
1677 		sctp_auditing(4, inp, stcb, net);
1678 #endif
1679 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1680 		if ((stcb->asoc.num_send_timers_up == 0) &&
1681 		    (stcb->asoc.sent_queue_cnt > 0)) {
1682 			struct sctp_tmit_chunk *chk;
1683 
1684 			/*
1685 			 * safeguard. If there on some on the sent queue
1686 			 * somewhere but no timers running something is
1687 			 * wrong... so we start a timer on the first chunk
1688 			 * on the send queue on whatever net it is sent to.
1689 			 */
1690 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1691 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1692 			    chk->whoTo);
1693 		}
1694 		break;
1695 	case SCTP_TIMER_TYPE_INIT:
1696 		if ((stcb == NULL) || (inp == NULL)) {
1697 			break;
1698 		}
1699 		SCTP_STAT_INCR(sctps_timoinit);
1700 		stcb->asoc.timoinit++;
1701 		if (sctp_t1init_timer(inp, stcb, net)) {
1702 			/* no need to unlock on tcb its gone */
1703 			goto out_decr;
1704 		}
1705 		/* We do output but not here */
1706 		did_output = 0;
1707 		break;
1708 	case SCTP_TIMER_TYPE_RECV:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		SCTP_STAT_INCR(sctps_timosack);
1713 		stcb->asoc.timosack++;
1714 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1715 #ifdef SCTP_AUDITING_ENABLED
1716 		sctp_auditing(4, inp, stcb, net);
1717 #endif
1718 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1719 		break;
1720 	case SCTP_TIMER_TYPE_SHUTDOWN:
1721 		if ((stcb == NULL) || (inp == NULL)) {
1722 			break;
1723 		}
1724 		if (sctp_shutdown_timer(inp, stcb, net)) {
1725 			/* no need to unlock on tcb its gone */
1726 			goto out_decr;
1727 		}
1728 		SCTP_STAT_INCR(sctps_timoshutdown);
1729 		stcb->asoc.timoshutdown++;
1730 #ifdef SCTP_AUDITING_ENABLED
1731 		sctp_auditing(4, inp, stcb, net);
1732 #endif
1733 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1734 		break;
1735 	case SCTP_TIMER_TYPE_HEARTBEAT:
1736 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1737 			break;
1738 		}
1739 		SCTP_STAT_INCR(sctps_timoheartbeat);
1740 		stcb->asoc.timoheartbeat++;
1741 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1742 			/* no need to unlock on tcb its gone */
1743 			goto out_decr;
1744 		}
1745 #ifdef SCTP_AUDITING_ENABLED
1746 		sctp_auditing(4, inp, stcb, net);
1747 #endif
1748 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1749 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1750 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1751 		}
1752 		break;
1753 	case SCTP_TIMER_TYPE_COOKIE:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		if (sctp_cookie_timer(inp, stcb, net)) {
1758 			/* no need to unlock on tcb its gone */
1759 			goto out_decr;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timocookie);
1762 		stcb->asoc.timocookie++;
1763 #ifdef SCTP_AUDITING_ENABLED
1764 		sctp_auditing(4, inp, stcb, net);
1765 #endif
1766 		/*
1767 		 * We consider T3 and Cookie timer pretty much the same with
1768 		 * respect to where from in chunk_output.
1769 		 */
1770 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1771 		break;
1772 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1773 		{
1774 			struct timeval tv;
1775 			int i, secret;
1776 
1777 			if (inp == NULL) {
1778 				break;
1779 			}
1780 			SCTP_STAT_INCR(sctps_timosecret);
1781 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1782 			SCTP_INP_WLOCK(inp);
1783 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1784 			inp->sctp_ep.last_secret_number =
1785 			    inp->sctp_ep.current_secret_number;
1786 			inp->sctp_ep.current_secret_number++;
1787 			if (inp->sctp_ep.current_secret_number >=
1788 			    SCTP_HOW_MANY_SECRETS) {
1789 				inp->sctp_ep.current_secret_number = 0;
1790 			}
1791 			secret = (int)inp->sctp_ep.current_secret_number;
1792 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1793 				inp->sctp_ep.secret_key[secret][i] =
1794 				    sctp_select_initial_TSN(&inp->sctp_ep);
1795 			}
1796 			SCTP_INP_WUNLOCK(inp);
1797 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1798 		}
1799 		did_output = 0;
1800 		break;
1801 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1802 		if ((stcb == NULL) || (inp == NULL)) {
1803 			break;
1804 		}
1805 		SCTP_STAT_INCR(sctps_timopathmtu);
1806 		sctp_pathmtu_timer(inp, stcb, net);
1807 		did_output = 0;
1808 		break;
1809 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1810 		if ((stcb == NULL) || (inp == NULL)) {
1811 			break;
1812 		}
1813 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1814 			/* no need to unlock on tcb its gone */
1815 			goto out_decr;
1816 		}
1817 		SCTP_STAT_INCR(sctps_timoshutdownack);
1818 		stcb->asoc.timoshutdownack++;
1819 #ifdef SCTP_AUDITING_ENABLED
1820 		sctp_auditing(4, inp, stcb, net);
1821 #endif
1822 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1823 		break;
1824 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1825 		if ((stcb == NULL) || (inp == NULL)) {
1826 			break;
1827 		}
1828 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1829 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1830 		    "Shutdown guard timer expired");
1831 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1832 		/* no need to unlock on tcb its gone */
1833 		goto out_decr;
1834 
1835 	case SCTP_TIMER_TYPE_STRRESET:
1836 		if ((stcb == NULL) || (inp == NULL)) {
1837 			break;
1838 		}
1839 		if (sctp_strreset_timer(inp, stcb, net)) {
1840 			/* no need to unlock on tcb its gone */
1841 			goto out_decr;
1842 		}
1843 		SCTP_STAT_INCR(sctps_timostrmrst);
1844 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1845 		break;
1846 	case SCTP_TIMER_TYPE_ASCONF:
1847 		if ((stcb == NULL) || (inp == NULL)) {
1848 			break;
1849 		}
1850 		if (sctp_asconf_timer(inp, stcb, net)) {
1851 			/* no need to unlock on tcb its gone */
1852 			goto out_decr;
1853 		}
1854 		SCTP_STAT_INCR(sctps_timoasconf);
1855 #ifdef SCTP_AUDITING_ENABLED
1856 		sctp_auditing(4, inp, stcb, net);
1857 #endif
1858 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1859 		break;
1860 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1861 		if ((stcb == NULL) || (inp == NULL)) {
1862 			break;
1863 		}
1864 		sctp_delete_prim_timer(inp, stcb, net);
1865 		SCTP_STAT_INCR(sctps_timodelprim);
1866 		break;
1867 
1868 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1869 		if ((stcb == NULL) || (inp == NULL)) {
1870 			break;
1871 		}
1872 		SCTP_STAT_INCR(sctps_timoautoclose);
1873 		sctp_autoclose_timer(inp, stcb, net);
1874 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1875 		did_output = 0;
1876 		break;
1877 	case SCTP_TIMER_TYPE_ASOCKILL:
1878 		if ((stcb == NULL) || (inp == NULL)) {
1879 			break;
1880 		}
1881 		SCTP_STAT_INCR(sctps_timoassockill);
1882 		/* Can we free it yet? */
1883 		SCTP_INP_DECR_REF(inp);
1884 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1885 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1887 		so = SCTP_INP_SO(inp);
1888 		atomic_add_int(&stcb->asoc.refcnt, 1);
1889 		SCTP_TCB_UNLOCK(stcb);
1890 		SCTP_SOCKET_LOCK(so, 1);
1891 		SCTP_TCB_LOCK(stcb);
1892 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1893 #endif
1894 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1895 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1896 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1897 		SCTP_SOCKET_UNLOCK(so, 1);
1898 #endif
1899 		/*
1900 		 * free asoc, always unlocks (or destroy's) so prevent
1901 		 * duplicate unlock or unlock of a free mtx :-0
1902 		 */
1903 		stcb = NULL;
1904 		goto out_no_decr;
1905 	case SCTP_TIMER_TYPE_INPKILL:
1906 		SCTP_STAT_INCR(sctps_timoinpkill);
1907 		if (inp == NULL) {
1908 			break;
1909 		}
1910 		/*
1911 		 * special case, take away our increment since WE are the
1912 		 * killer
1913 		 */
1914 		SCTP_INP_DECR_REF(inp);
1915 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1916 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1917 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1918 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1919 		inp = NULL;
1920 		goto out_no_decr;
1921 	default:
1922 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1923 		    type);
1924 		break;
1925 	}
1926 #ifdef SCTP_AUDITING_ENABLED
1927 	sctp_audit_log(0xF1, (uint8_t) type);
1928 	if (inp)
1929 		sctp_auditing(5, inp, stcb, net);
1930 #endif
1931 	if ((did_output) && stcb) {
1932 		/*
1933 		 * Now we need to clean up the control chunk chain if an
1934 		 * ECNE is on it. It must be marked as UNSENT again so next
1935 		 * call will continue to send it until such time that we get
1936 		 * a CWR, to remove it. It is, however, less likely that we
1937 		 * will find a ecn echo on the chain though.
1938 		 */
1939 		sctp_fix_ecn_echo(&stcb->asoc);
1940 	}
1941 get_out:
1942 	if (stcb) {
1943 		SCTP_TCB_UNLOCK(stcb);
1944 	}
1945 out_decr:
1946 	if (inp) {
1947 		SCTP_INP_DECR_REF(inp);
1948 	}
1949 out_no_decr:
1950 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1951 	CURVNET_RESTORE();
1952 }
1953 
1954 void
1955 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1956     struct sctp_nets *net)
1957 {
1958 	uint32_t to_ticks;
1959 	struct sctp_timer *tmr;
1960 
1961 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1962 		return;
1963 
1964 	tmr = NULL;
1965 	if (stcb) {
1966 		SCTP_TCB_LOCK_ASSERT(stcb);
1967 	}
1968 	switch (t_type) {
1969 	case SCTP_TIMER_TYPE_ZERO_COPY:
1970 		tmr = &inp->sctp_ep.zero_copy_timer;
1971 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1974 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1975 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1976 		break;
1977 	case SCTP_TIMER_TYPE_ADDR_WQ:
1978 		/* Only 1 tick away :-) */
1979 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1980 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1981 		break;
1982 	case SCTP_TIMER_TYPE_SEND:
1983 		/* Here we use the RTO timer */
1984 		{
1985 			int rto_val;
1986 
1987 			if ((stcb == NULL) || (net == NULL)) {
1988 				return;
1989 			}
1990 			tmr = &net->rxt_timer;
1991 			if (net->RTO == 0) {
1992 				rto_val = stcb->asoc.initial_rto;
1993 			} else {
1994 				rto_val = net->RTO;
1995 			}
1996 			to_ticks = MSEC_TO_TICKS(rto_val);
1997 		}
1998 		break;
1999 	case SCTP_TIMER_TYPE_INIT:
2000 		/*
2001 		 * Here we use the INIT timer default usually about 1
2002 		 * minute.
2003 		 */
2004 		if ((stcb == NULL) || (net == NULL)) {
2005 			return;
2006 		}
2007 		tmr = &net->rxt_timer;
2008 		if (net->RTO == 0) {
2009 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2010 		} else {
2011 			to_ticks = MSEC_TO_TICKS(net->RTO);
2012 		}
2013 		break;
2014 	case SCTP_TIMER_TYPE_RECV:
2015 		/*
2016 		 * Here we use the Delayed-Ack timer value from the inp
2017 		 * ususually about 200ms.
2018 		 */
2019 		if (stcb == NULL) {
2020 			return;
2021 		}
2022 		tmr = &stcb->asoc.dack_timer;
2023 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2024 		break;
2025 	case SCTP_TIMER_TYPE_SHUTDOWN:
2026 		/* Here we use the RTO of the destination. */
2027 		if ((stcb == NULL) || (net == NULL)) {
2028 			return;
2029 		}
2030 		if (net->RTO == 0) {
2031 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2032 		} else {
2033 			to_ticks = MSEC_TO_TICKS(net->RTO);
2034 		}
2035 		tmr = &net->rxt_timer;
2036 		break;
2037 	case SCTP_TIMER_TYPE_HEARTBEAT:
2038 		/*
2039 		 * the net is used here so that we can add in the RTO. Even
2040 		 * though we use a different timer. We also add the HB timer
2041 		 * PLUS a random jitter.
2042 		 */
2043 		if ((stcb == NULL) || (net == NULL)) {
2044 			return;
2045 		} else {
2046 			uint32_t rndval;
2047 			uint32_t jitter;
2048 
2049 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2050 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2051 				return;
2052 			}
2053 			if (net->RTO == 0) {
2054 				to_ticks = stcb->asoc.initial_rto;
2055 			} else {
2056 				to_ticks = net->RTO;
2057 			}
2058 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2059 			jitter = rndval % to_ticks;
2060 			if (jitter >= (to_ticks >> 1)) {
2061 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2062 			} else {
2063 				to_ticks = to_ticks - jitter;
2064 			}
2065 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2066 			    !(net->dest_state & SCTP_ADDR_PF)) {
2067 				to_ticks += net->heart_beat_delay;
2068 			}
2069 			/*
2070 			 * Now we must convert the to_ticks that are now in
2071 			 * ms to ticks.
2072 			 */
2073 			to_ticks = MSEC_TO_TICKS(to_ticks);
2074 			tmr = &net->hb_timer;
2075 		}
2076 		break;
2077 	case SCTP_TIMER_TYPE_COOKIE:
2078 		/*
2079 		 * Here we can use the RTO timer from the network since one
2080 		 * RTT was compelete. If a retran happened then we will be
2081 		 * using the RTO initial value.
2082 		 */
2083 		if ((stcb == NULL) || (net == NULL)) {
2084 			return;
2085 		}
2086 		if (net->RTO == 0) {
2087 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2088 		} else {
2089 			to_ticks = MSEC_TO_TICKS(net->RTO);
2090 		}
2091 		tmr = &net->rxt_timer;
2092 		break;
2093 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2094 		/*
2095 		 * nothing needed but the endpoint here ususually about 60
2096 		 * minutes.
2097 		 */
2098 		tmr = &inp->sctp_ep.signature_change;
2099 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2100 		break;
2101 	case SCTP_TIMER_TYPE_ASOCKILL:
2102 		if (stcb == NULL) {
2103 			return;
2104 		}
2105 		tmr = &stcb->asoc.strreset_timer;
2106 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2107 		break;
2108 	case SCTP_TIMER_TYPE_INPKILL:
2109 		/*
2110 		 * The inp is setup to die. We re-use the signature_chage
2111 		 * timer since that has stopped and we are in the GONE
2112 		 * state.
2113 		 */
2114 		tmr = &inp->sctp_ep.signature_change;
2115 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2116 		break;
2117 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2118 		/*
2119 		 * Here we use the value found in the EP for PMTU ususually
2120 		 * about 10 minutes.
2121 		 */
2122 		if ((stcb == NULL) || (net == NULL)) {
2123 			return;
2124 		}
2125 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2126 			return;
2127 		}
2128 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2129 		tmr = &net->pmtu_timer;
2130 		break;
2131 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2132 		/* Here we use the RTO of the destination */
2133 		if ((stcb == NULL) || (net == NULL)) {
2134 			return;
2135 		}
2136 		if (net->RTO == 0) {
2137 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2138 		} else {
2139 			to_ticks = MSEC_TO_TICKS(net->RTO);
2140 		}
2141 		tmr = &net->rxt_timer;
2142 		break;
2143 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2144 		/*
2145 		 * Here we use the endpoints shutdown guard timer usually
2146 		 * about 3 minutes.
2147 		 */
2148 		if (stcb == NULL) {
2149 			return;
2150 		}
2151 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2152 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2153 		} else {
2154 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2155 		}
2156 		tmr = &stcb->asoc.shut_guard_timer;
2157 		break;
2158 	case SCTP_TIMER_TYPE_STRRESET:
2159 		/*
2160 		 * Here the timer comes from the stcb but its value is from
2161 		 * the net's RTO.
2162 		 */
2163 		if ((stcb == NULL) || (net == NULL)) {
2164 			return;
2165 		}
2166 		if (net->RTO == 0) {
2167 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		} else {
2169 			to_ticks = MSEC_TO_TICKS(net->RTO);
2170 		}
2171 		tmr = &stcb->asoc.strreset_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_ASCONF:
2174 		/*
2175 		 * Here the timer comes from the stcb but its value is from
2176 		 * the net's RTO.
2177 		 */
2178 		if ((stcb == NULL) || (net == NULL)) {
2179 			return;
2180 		}
2181 		if (net->RTO == 0) {
2182 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2183 		} else {
2184 			to_ticks = MSEC_TO_TICKS(net->RTO);
2185 		}
2186 		tmr = &stcb->asoc.asconf_timer;
2187 		break;
2188 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2189 		if ((stcb == NULL) || (net != NULL)) {
2190 			return;
2191 		}
2192 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2193 		tmr = &stcb->asoc.delete_prim_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2196 		if (stcb == NULL) {
2197 			return;
2198 		}
2199 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2200 			/*
2201 			 * Really an error since stcb is NOT set to
2202 			 * autoclose
2203 			 */
2204 			return;
2205 		}
2206 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2207 		tmr = &stcb->asoc.autoclose_timer;
2208 		break;
2209 	default:
2210 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2211 		    __func__, t_type);
2212 		return;
2213 		break;
2214 	}
2215 	if ((to_ticks <= 0) || (tmr == NULL)) {
2216 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2217 		    __func__, t_type, to_ticks, (void *)tmr);
2218 		return;
2219 	}
2220 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2221 		/*
2222 		 * we do NOT allow you to have it already running. if it is
2223 		 * we leave the current one up unchanged
2224 		 */
2225 		return;
2226 	}
2227 	/* At this point we can proceed */
2228 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2229 		stcb->asoc.num_send_timers_up++;
2230 	}
2231 	tmr->stopped_from = 0;
2232 	tmr->type = t_type;
2233 	tmr->ep = (void *)inp;
2234 	tmr->tcb = (void *)stcb;
2235 	tmr->net = (void *)net;
2236 	tmr->self = (void *)tmr;
2237 	tmr->vnet = (void *)curvnet;
2238 	tmr->ticks = sctp_get_tick_count();
2239 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2240 	return;
2241 }
2242 
2243 void
2244 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2245     struct sctp_nets *net, uint32_t from)
2246 {
2247 	struct sctp_timer *tmr;
2248 
2249 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2250 	    (inp == NULL))
2251 		return;
2252 
2253 	tmr = NULL;
2254 	if (stcb) {
2255 		SCTP_TCB_LOCK_ASSERT(stcb);
2256 	}
2257 	switch (t_type) {
2258 	case SCTP_TIMER_TYPE_ZERO_COPY:
2259 		tmr = &inp->sctp_ep.zero_copy_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2262 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2263 		break;
2264 	case SCTP_TIMER_TYPE_ADDR_WQ:
2265 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2266 		break;
2267 	case SCTP_TIMER_TYPE_SEND:
2268 		if ((stcb == NULL) || (net == NULL)) {
2269 			return;
2270 		}
2271 		tmr = &net->rxt_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_INIT:
2274 		if ((stcb == NULL) || (net == NULL)) {
2275 			return;
2276 		}
2277 		tmr = &net->rxt_timer;
2278 		break;
2279 	case SCTP_TIMER_TYPE_RECV:
2280 		if (stcb == NULL) {
2281 			return;
2282 		}
2283 		tmr = &stcb->asoc.dack_timer;
2284 		break;
2285 	case SCTP_TIMER_TYPE_SHUTDOWN:
2286 		if ((stcb == NULL) || (net == NULL)) {
2287 			return;
2288 		}
2289 		tmr = &net->rxt_timer;
2290 		break;
2291 	case SCTP_TIMER_TYPE_HEARTBEAT:
2292 		if ((stcb == NULL) || (net == NULL)) {
2293 			return;
2294 		}
2295 		tmr = &net->hb_timer;
2296 		break;
2297 	case SCTP_TIMER_TYPE_COOKIE:
2298 		if ((stcb == NULL) || (net == NULL)) {
2299 			return;
2300 		}
2301 		tmr = &net->rxt_timer;
2302 		break;
2303 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2304 		/* nothing needed but the endpoint here */
2305 		tmr = &inp->sctp_ep.signature_change;
2306 		/*
2307 		 * We re-use the newcookie timer for the INP kill timer. We
2308 		 * must assure that we do not kill it by accident.
2309 		 */
2310 		break;
2311 	case SCTP_TIMER_TYPE_ASOCKILL:
2312 		/*
2313 		 * Stop the asoc kill timer.
2314 		 */
2315 		if (stcb == NULL) {
2316 			return;
2317 		}
2318 		tmr = &stcb->asoc.strreset_timer;
2319 		break;
2320 
2321 	case SCTP_TIMER_TYPE_INPKILL:
2322 		/*
2323 		 * The inp is setup to die. We re-use the signature_chage
2324 		 * timer since that has stopped and we are in the GONE
2325 		 * state.
2326 		 */
2327 		tmr = &inp->sctp_ep.signature_change;
2328 		break;
2329 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2330 		if ((stcb == NULL) || (net == NULL)) {
2331 			return;
2332 		}
2333 		tmr = &net->pmtu_timer;
2334 		break;
2335 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2336 		if ((stcb == NULL) || (net == NULL)) {
2337 			return;
2338 		}
2339 		tmr = &net->rxt_timer;
2340 		break;
2341 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2342 		if (stcb == NULL) {
2343 			return;
2344 		}
2345 		tmr = &stcb->asoc.shut_guard_timer;
2346 		break;
2347 	case SCTP_TIMER_TYPE_STRRESET:
2348 		if (stcb == NULL) {
2349 			return;
2350 		}
2351 		tmr = &stcb->asoc.strreset_timer;
2352 		break;
2353 	case SCTP_TIMER_TYPE_ASCONF:
2354 		if (stcb == NULL) {
2355 			return;
2356 		}
2357 		tmr = &stcb->asoc.asconf_timer;
2358 		break;
2359 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2360 		if (stcb == NULL) {
2361 			return;
2362 		}
2363 		tmr = &stcb->asoc.delete_prim_timer;
2364 		break;
2365 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2366 		if (stcb == NULL) {
2367 			return;
2368 		}
2369 		tmr = &stcb->asoc.autoclose_timer;
2370 		break;
2371 	default:
2372 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2373 		    __func__, t_type);
2374 		break;
2375 	}
2376 	if (tmr == NULL) {
2377 		return;
2378 	}
2379 	if ((tmr->type != t_type) && tmr->type) {
2380 		/*
2381 		 * Ok we have a timer that is under joint use. Cookie timer
2382 		 * per chance with the SEND timer. We therefore are NOT
2383 		 * running the timer that the caller wants stopped.  So just
2384 		 * return.
2385 		 */
2386 		return;
2387 	}
2388 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2389 		stcb->asoc.num_send_timers_up--;
2390 		if (stcb->asoc.num_send_timers_up < 0) {
2391 			stcb->asoc.num_send_timers_up = 0;
2392 		}
2393 	}
2394 	tmr->self = NULL;
2395 	tmr->stopped_from = from;
2396 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2397 	return;
2398 }
2399 
2400 uint32_t
2401 sctp_calculate_len(struct mbuf *m)
2402 {
2403 	uint32_t tlen = 0;
2404 	struct mbuf *at;
2405 
2406 	at = m;
2407 	while (at) {
2408 		tlen += SCTP_BUF_LEN(at);
2409 		at = SCTP_BUF_NEXT(at);
2410 	}
2411 	return (tlen);
2412 }
2413 
2414 void
2415 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2416     struct sctp_association *asoc, uint32_t mtu)
2417 {
2418 	/*
2419 	 * Reset the P-MTU size on this association, this involves changing
2420 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2421 	 * allow the DF flag to be cleared.
2422 	 */
2423 	struct sctp_tmit_chunk *chk;
2424 	unsigned int eff_mtu, ovh;
2425 
2426 	asoc->smallest_mtu = mtu;
2427 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2428 		ovh = SCTP_MIN_OVERHEAD;
2429 	} else {
2430 		ovh = SCTP_MIN_V4_OVERHEAD;
2431 	}
2432 	eff_mtu = mtu - ovh;
2433 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2434 		if (chk->send_size > eff_mtu) {
2435 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2436 		}
2437 	}
2438 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2439 		if (chk->send_size > eff_mtu) {
2440 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2441 		}
2442 	}
2443 }
2444 
2445 
2446 /*
2447  * given an association and starting time of the current RTT period return
2448  * RTO in number of msecs net should point to the current network
2449  */
2450 
2451 uint32_t
2452 sctp_calculate_rto(struct sctp_tcb *stcb,
2453     struct sctp_association *asoc,
2454     struct sctp_nets *net,
2455     struct timeval *told,
2456     int safe, int rtt_from_sack)
2457 {
2458 	/*-
2459 	 * given an association and the starting time of the current RTT
2460 	 * period (in value1/value2) return RTO in number of msecs.
2461 	 */
2462 	int32_t rtt;		/* RTT in ms */
2463 	uint32_t new_rto;
2464 	int first_measure = 0;
2465 	struct timeval now, then, *old;
2466 
2467 	/* Copy it out for sparc64 */
2468 	if (safe == sctp_align_unsafe_makecopy) {
2469 		old = &then;
2470 		memcpy(&then, told, sizeof(struct timeval));
2471 	} else if (safe == sctp_align_safe_nocopy) {
2472 		old = told;
2473 	} else {
2474 		/* error */
2475 		SCTP_PRINTF("Huh, bad rto calc call\n");
2476 		return (0);
2477 	}
2478 	/************************/
2479 	/* 1. calculate new RTT */
2480 	/************************/
2481 	/* get the current time */
2482 	if (stcb->asoc.use_precise_time) {
2483 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2484 	} else {
2485 		(void)SCTP_GETTIME_TIMEVAL(&now);
2486 	}
2487 	timevalsub(&now, old);
2488 	/* store the current RTT in us */
2489 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2490 	        (uint64_t) now.tv_usec;
2491 
2492 	/* compute rtt in ms */
2493 	rtt = (int32_t) (net->rtt / 1000);
2494 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2495 		/*
2496 		 * Tell the CC module that a new update has just occurred
2497 		 * from a sack
2498 		 */
2499 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2500 	}
2501 	/*
2502 	 * Do we need to determine the lan? We do this only on sacks i.e.
2503 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2504 	 */
2505 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2506 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2507 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2508 			net->lan_type = SCTP_LAN_INTERNET;
2509 		} else {
2510 			net->lan_type = SCTP_LAN_LOCAL;
2511 		}
2512 	}
2513 	/***************************/
2514 	/* 2. update RTTVAR & SRTT */
2515 	/***************************/
2516 	/*-
2517 	 * Compute the scaled average lastsa and the
2518 	 * scaled variance lastsv as described in van Jacobson
2519 	 * Paper "Congestion Avoidance and Control", Annex A.
2520 	 *
2521 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2522 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2523 	 */
2524 	if (net->RTO_measured) {
2525 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2526 		net->lastsa += rtt;
2527 		if (rtt < 0) {
2528 			rtt = -rtt;
2529 		}
2530 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2531 		net->lastsv += rtt;
2532 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2533 			rto_logging(net, SCTP_LOG_RTTVAR);
2534 		}
2535 	} else {
2536 		/* First RTO measurment */
2537 		net->RTO_measured = 1;
2538 		first_measure = 1;
2539 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2540 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2541 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2542 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2543 		}
2544 	}
2545 	if (net->lastsv == 0) {
2546 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2547 	}
2548 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2549 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2550 	    (stcb->asoc.sat_network_lockout == 0)) {
2551 		stcb->asoc.sat_network = 1;
2552 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2553 		stcb->asoc.sat_network = 0;
2554 		stcb->asoc.sat_network_lockout = 1;
2555 	}
2556 	/* bound it, per C6/C7 in Section 5.3.1 */
2557 	if (new_rto < stcb->asoc.minrto) {
2558 		new_rto = stcb->asoc.minrto;
2559 	}
2560 	if (new_rto > stcb->asoc.maxrto) {
2561 		new_rto = stcb->asoc.maxrto;
2562 	}
2563 	/* we are now returning the RTO */
2564 	return (new_rto);
2565 }
2566 
2567 /*
2568  * return a pointer to a contiguous piece of data from the given mbuf chain
2569  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2570  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2571  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2572  */
2573 caddr_t
2574 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2575 {
2576 	uint32_t count;
2577 	uint8_t *ptr;
2578 
2579 	ptr = in_ptr;
2580 	if ((off < 0) || (len <= 0))
2581 		return (NULL);
2582 
2583 	/* find the desired start location */
2584 	while ((m != NULL) && (off > 0)) {
2585 		if (off < SCTP_BUF_LEN(m))
2586 			break;
2587 		off -= SCTP_BUF_LEN(m);
2588 		m = SCTP_BUF_NEXT(m);
2589 	}
2590 	if (m == NULL)
2591 		return (NULL);
2592 
2593 	/* is the current mbuf large enough (eg. contiguous)? */
2594 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2595 		return (mtod(m, caddr_t)+off);
2596 	} else {
2597 		/* else, it spans more than one mbuf, so save a temp copy... */
2598 		while ((m != NULL) && (len > 0)) {
2599 			count = min(SCTP_BUF_LEN(m) - off, len);
2600 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2601 			len -= count;
2602 			ptr += count;
2603 			off = 0;
2604 			m = SCTP_BUF_NEXT(m);
2605 		}
2606 		if ((m == NULL) && (len > 0))
2607 			return (NULL);
2608 		else
2609 			return ((caddr_t)in_ptr);
2610 	}
2611 }
2612 
2613 
2614 
2615 struct sctp_paramhdr *
2616 sctp_get_next_param(struct mbuf *m,
2617     int offset,
2618     struct sctp_paramhdr *pull,
2619     int pull_limit)
2620 {
2621 	/* This just provides a typed signature to Peter's Pull routine */
2622 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2623 	    (uint8_t *) pull));
2624 }
2625 
2626 
2627 struct mbuf *
2628 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2629 {
2630 	struct mbuf *m_last;
2631 	caddr_t dp;
2632 
2633 	if (padlen > 3) {
2634 		return (NULL);
2635 	}
2636 	if (padlen <= M_TRAILINGSPACE(m)) {
2637 		/*
2638 		 * The easy way. We hope the majority of the time we hit
2639 		 * here :)
2640 		 */
2641 		m_last = m;
2642 	} else {
2643 		/* Hard way we must grow the mbuf chain */
2644 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2645 		if (m_last == NULL) {
2646 			return (NULL);
2647 		}
2648 		SCTP_BUF_LEN(m_last) = 0;
2649 		SCTP_BUF_NEXT(m_last) = NULL;
2650 		SCTP_BUF_NEXT(m) = m_last;
2651 	}
2652 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2653 	SCTP_BUF_LEN(m_last) += padlen;
2654 	memset(dp, 0, padlen);
2655 	return (m_last);
2656 }
2657 
2658 struct mbuf *
2659 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2660 {
2661 	/* find the last mbuf in chain and pad it */
2662 	struct mbuf *m_at;
2663 
2664 	if (last_mbuf != NULL) {
2665 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2666 	} else {
2667 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2668 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2669 				return (sctp_add_pad_tombuf(m_at, padval));
2670 			}
2671 		}
2672 	}
2673 	return (NULL);
2674 }
2675 
2676 static void
2677 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2678     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2679 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2680     SCTP_UNUSED
2681 #endif
2682 )
2683 {
2684 	struct mbuf *m_notify;
2685 	struct sctp_assoc_change *sac;
2686 	struct sctp_queued_to_read *control;
2687 	unsigned int notif_len;
2688 	uint16_t abort_len;
2689 	unsigned int i;
2690 
2691 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2692 	struct socket *so;
2693 
2694 #endif
2695 
2696 	if (stcb == NULL) {
2697 		return;
2698 	}
2699 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2700 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2701 		if (abort != NULL) {
2702 			abort_len = ntohs(abort->ch.chunk_length);
2703 		} else {
2704 			abort_len = 0;
2705 		}
2706 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2707 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2708 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2709 			notif_len += abort_len;
2710 		}
2711 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2712 		if (m_notify == NULL) {
2713 			/* Retry with smaller value. */
2714 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2715 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2716 			if (m_notify == NULL) {
2717 				goto set_error;
2718 			}
2719 		}
2720 		SCTP_BUF_NEXT(m_notify) = NULL;
2721 		sac = mtod(m_notify, struct sctp_assoc_change *);
2722 		memset(sac, 0, notif_len);
2723 		sac->sac_type = SCTP_ASSOC_CHANGE;
2724 		sac->sac_flags = 0;
2725 		sac->sac_length = sizeof(struct sctp_assoc_change);
2726 		sac->sac_state = state;
2727 		sac->sac_error = error;
2728 		/* XXX verify these stream counts */
2729 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2730 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2731 		sac->sac_assoc_id = sctp_get_associd(stcb);
2732 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2733 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2734 				i = 0;
2735 				if (stcb->asoc.prsctp_supported == 1) {
2736 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2737 				}
2738 				if (stcb->asoc.auth_supported == 1) {
2739 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2740 				}
2741 				if (stcb->asoc.asconf_supported == 1) {
2742 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2743 				}
2744 				if (stcb->asoc.idata_supported == 1) {
2745 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2746 				}
2747 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2748 				if (stcb->asoc.reconfig_supported == 1) {
2749 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2750 				}
2751 				sac->sac_length += i;
2752 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2753 				memcpy(sac->sac_info, abort, abort_len);
2754 				sac->sac_length += abort_len;
2755 			}
2756 		}
2757 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2758 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2759 		    0, 0, stcb->asoc.context, 0, 0, 0,
2760 		    m_notify);
2761 		if (control != NULL) {
2762 			control->length = SCTP_BUF_LEN(m_notify);
2763 			/* not that we need this */
2764 			control->tail_mbuf = m_notify;
2765 			control->spec_flags = M_NOTIFICATION;
2766 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2767 			    control,
2768 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2769 			    so_locked);
2770 		} else {
2771 			sctp_m_freem(m_notify);
2772 		}
2773 	}
2774 	/*
2775 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2776 	 * comes in.
2777 	 */
2778 set_error:
2779 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2780 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2781 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2782 		SOCK_LOCK(stcb->sctp_socket);
2783 		if (from_peer) {
2784 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2785 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2786 				stcb->sctp_socket->so_error = ECONNREFUSED;
2787 			} else {
2788 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2789 				stcb->sctp_socket->so_error = ECONNRESET;
2790 			}
2791 		} else {
2792 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2793 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2794 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2795 				stcb->sctp_socket->so_error = ETIMEDOUT;
2796 			} else {
2797 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2798 				stcb->sctp_socket->so_error = ECONNABORTED;
2799 			}
2800 		}
2801 	}
2802 	/* Wake ANY sleepers */
2803 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2804 	so = SCTP_INP_SO(stcb->sctp_ep);
2805 	if (!so_locked) {
2806 		atomic_add_int(&stcb->asoc.refcnt, 1);
2807 		SCTP_TCB_UNLOCK(stcb);
2808 		SCTP_SOCKET_LOCK(so, 1);
2809 		SCTP_TCB_LOCK(stcb);
2810 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2811 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2812 			SCTP_SOCKET_UNLOCK(so, 1);
2813 			return;
2814 		}
2815 	}
2816 #endif
2817 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2818 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2819 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2820 		socantrcvmore_locked(stcb->sctp_socket);
2821 	}
2822 	sorwakeup(stcb->sctp_socket);
2823 	sowwakeup(stcb->sctp_socket);
2824 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2825 	if (!so_locked) {
2826 		SCTP_SOCKET_UNLOCK(so, 1);
2827 	}
2828 #endif
2829 }
2830 
2831 static void
2832 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2833     struct sockaddr *sa, uint32_t error, int so_locked
2834 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2835     SCTP_UNUSED
2836 #endif
2837 )
2838 {
2839 	struct mbuf *m_notify;
2840 	struct sctp_paddr_change *spc;
2841 	struct sctp_queued_to_read *control;
2842 
2843 	if ((stcb == NULL) ||
2844 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2845 		/* event not enabled */
2846 		return;
2847 	}
2848 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2849 	if (m_notify == NULL)
2850 		return;
2851 	SCTP_BUF_LEN(m_notify) = 0;
2852 	spc = mtod(m_notify, struct sctp_paddr_change *);
2853 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2854 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2855 	spc->spc_flags = 0;
2856 	spc->spc_length = sizeof(struct sctp_paddr_change);
2857 	switch (sa->sa_family) {
2858 #ifdef INET
2859 	case AF_INET:
2860 #ifdef INET6
2861 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2862 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2863 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2864 		} else {
2865 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2866 		}
2867 #else
2868 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2869 #endif
2870 		break;
2871 #endif
2872 #ifdef INET6
2873 	case AF_INET6:
2874 		{
2875 			struct sockaddr_in6 *sin6;
2876 
2877 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2878 
2879 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2880 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2881 				if (sin6->sin6_scope_id == 0) {
2882 					/* recover scope_id for user */
2883 					(void)sa6_recoverscope(sin6);
2884 				} else {
2885 					/* clear embedded scope_id for user */
2886 					in6_clearscope(&sin6->sin6_addr);
2887 				}
2888 			}
2889 			break;
2890 		}
2891 #endif
2892 	default:
2893 		/* TSNH */
2894 		break;
2895 	}
2896 	spc->spc_state = state;
2897 	spc->spc_error = error;
2898 	spc->spc_assoc_id = sctp_get_associd(stcb);
2899 
2900 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2901 	SCTP_BUF_NEXT(m_notify) = NULL;
2902 
2903 	/* append to socket */
2904 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2905 	    0, 0, stcb->asoc.context, 0, 0, 0,
2906 	    m_notify);
2907 	if (control == NULL) {
2908 		/* no memory */
2909 		sctp_m_freem(m_notify);
2910 		return;
2911 	}
2912 	control->length = SCTP_BUF_LEN(m_notify);
2913 	control->spec_flags = M_NOTIFICATION;
2914 	/* not that we need this */
2915 	control->tail_mbuf = m_notify;
2916 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2917 	    control,
2918 	    &stcb->sctp_socket->so_rcv, 1,
2919 	    SCTP_READ_LOCK_NOT_HELD,
2920 	    so_locked);
2921 }
2922 
2923 
2924 static void
2925 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2926     struct sctp_tmit_chunk *chk, int so_locked
2927 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2928     SCTP_UNUSED
2929 #endif
2930 )
2931 {
2932 	struct mbuf *m_notify;
2933 	struct sctp_send_failed *ssf;
2934 	struct sctp_send_failed_event *ssfe;
2935 	struct sctp_queued_to_read *control;
2936 	struct sctp_chunkhdr *chkhdr;
2937 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2938 
2939 	if ((stcb == NULL) ||
2940 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2941 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2942 		/* event not enabled */
2943 		return;
2944 	}
2945 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2946 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2947 	} else {
2948 		notifhdr_len = sizeof(struct sctp_send_failed);
2949 	}
2950 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2951 	if (m_notify == NULL)
2952 		/* no space left */
2953 		return;
2954 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2955 	if (stcb->asoc.idata_supported) {
2956 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2957 	} else {
2958 		chkhdr_len = sizeof(struct sctp_data_chunk);
2959 	}
2960 	/* Use some defaults in case we can't access the chunk header */
2961 	if (chk->send_size >= chkhdr_len) {
2962 		payload_len = chk->send_size - chkhdr_len;
2963 	} else {
2964 		payload_len = 0;
2965 	}
2966 	padding_len = 0;
2967 	if (chk->data != NULL) {
2968 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2969 		if (chkhdr != NULL) {
2970 			chk_len = ntohs(chkhdr->chunk_length);
2971 			if ((chk_len >= chkhdr_len) &&
2972 			    (chk->send_size >= chk_len) &&
2973 			    (chk->send_size - chk_len < 4)) {
2974 				padding_len = chk->send_size - chk_len;
2975 				payload_len = chk->send_size - chkhdr_len - padding_len;
2976 			}
2977 		}
2978 	}
2979 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2980 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2981 		memset(ssfe, 0, notifhdr_len);
2982 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2983 		if (sent) {
2984 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2985 		} else {
2986 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2987 		}
2988 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + payload_len);
2989 		ssfe->ssfe_error = error;
2990 		/* not exactly what the user sent in, but should be close :) */
2991 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2992 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2993 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2994 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2995 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2996 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2997 	} else {
2998 		ssf = mtod(m_notify, struct sctp_send_failed *);
2999 		memset(ssf, 0, notifhdr_len);
3000 		ssf->ssf_type = SCTP_SEND_FAILED;
3001 		if (sent) {
3002 			ssf->ssf_flags = SCTP_DATA_SENT;
3003 		} else {
3004 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3005 		}
3006 		ssf->ssf_length = (uint32_t) (notifhdr_len + payload_len);
3007 		ssf->ssf_error = error;
3008 		/* not exactly what the user sent in, but should be close :) */
3009 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3010 		ssf->ssf_info.sinfo_ssn = (uint16_t) chk->rec.data.stream_seq;
3011 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3012 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3013 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3014 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3015 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3016 	}
3017 	if (chk->data != NULL) {
3018 		/* Trim off the sctp chunk header (it should be there) */
3019 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3020 			m_adj(chk->data, chkhdr_len);
3021 			m_adj(chk->data, -padding_len);
3022 			sctp_mbuf_crush(chk->data);
3023 			chk->send_size -= (chkhdr_len + padding_len);
3024 		}
3025 	}
3026 	SCTP_BUF_NEXT(m_notify) = chk->data;
3027 	/* Steal off the mbuf */
3028 	chk->data = NULL;
3029 	/*
3030 	 * For this case, we check the actual socket buffer, since the assoc
3031 	 * is going away we don't want to overfill the socket buffer for a
3032 	 * non-reader
3033 	 */
3034 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3035 		sctp_m_freem(m_notify);
3036 		return;
3037 	}
3038 	/* append to socket */
3039 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3040 	    0, 0, stcb->asoc.context, 0, 0, 0,
3041 	    m_notify);
3042 	if (control == NULL) {
3043 		/* no memory */
3044 		sctp_m_freem(m_notify);
3045 		return;
3046 	}
3047 	control->spec_flags = M_NOTIFICATION;
3048 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3049 	    control,
3050 	    &stcb->sctp_socket->so_rcv, 1,
3051 	    SCTP_READ_LOCK_NOT_HELD,
3052 	    so_locked);
3053 }
3054 
3055 
3056 static void
3057 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3058     struct sctp_stream_queue_pending *sp, int so_locked
3059 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3060     SCTP_UNUSED
3061 #endif
3062 )
3063 {
3064 	struct mbuf *m_notify;
3065 	struct sctp_send_failed *ssf;
3066 	struct sctp_send_failed_event *ssfe;
3067 	struct sctp_queued_to_read *control;
3068 	int notifhdr_len;
3069 
3070 	if ((stcb == NULL) ||
3071 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3072 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3073 		/* event not enabled */
3074 		return;
3075 	}
3076 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3077 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3078 	} else {
3079 		notifhdr_len = sizeof(struct sctp_send_failed);
3080 	}
3081 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3082 	if (m_notify == NULL) {
3083 		/* no space left */
3084 		return;
3085 	}
3086 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3087 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3088 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3089 		memset(ssfe, 0, notifhdr_len);
3090 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3091 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3092 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + sp->length);
3093 		ssfe->ssfe_error = error;
3094 		/* not exactly what the user sent in, but should be close :) */
3095 		ssfe->ssfe_info.snd_sid = sp->stream;
3096 		if (sp->some_taken) {
3097 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3098 		} else {
3099 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3100 		}
3101 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3102 		ssfe->ssfe_info.snd_context = sp->context;
3103 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3104 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3105 	} else {
3106 		ssf = mtod(m_notify, struct sctp_send_failed *);
3107 		memset(ssf, 0, notifhdr_len);
3108 		ssf->ssf_type = SCTP_SEND_FAILED;
3109 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3110 		ssf->ssf_length = (uint32_t) (notifhdr_len + sp->length);
3111 		ssf->ssf_error = error;
3112 		/* not exactly what the user sent in, but should be close :) */
3113 		ssf->ssf_info.sinfo_stream = sp->stream;
3114 		ssf->ssf_info.sinfo_ssn = 0;
3115 		if (sp->some_taken) {
3116 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3117 		} else {
3118 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3119 		}
3120 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3121 		ssf->ssf_info.sinfo_context = sp->context;
3122 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3123 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3124 	}
3125 	SCTP_BUF_NEXT(m_notify) = sp->data;
3126 
3127 	/* Steal off the mbuf */
3128 	sp->data = NULL;
3129 	/*
3130 	 * For this case, we check the actual socket buffer, since the assoc
3131 	 * is going away we don't want to overfill the socket buffer for a
3132 	 * non-reader
3133 	 */
3134 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3135 		sctp_m_freem(m_notify);
3136 		return;
3137 	}
3138 	/* append to socket */
3139 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3140 	    0, 0, stcb->asoc.context, 0, 0, 0,
3141 	    m_notify);
3142 	if (control == NULL) {
3143 		/* no memory */
3144 		sctp_m_freem(m_notify);
3145 		return;
3146 	}
3147 	control->spec_flags = M_NOTIFICATION;
3148 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3149 	    control,
3150 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3151 }
3152 
3153 
3154 
3155 static void
3156 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3157 {
3158 	struct mbuf *m_notify;
3159 	struct sctp_adaptation_event *sai;
3160 	struct sctp_queued_to_read *control;
3161 
3162 	if ((stcb == NULL) ||
3163 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3164 		/* event not enabled */
3165 		return;
3166 	}
3167 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3168 	if (m_notify == NULL)
3169 		/* no space left */
3170 		return;
3171 	SCTP_BUF_LEN(m_notify) = 0;
3172 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3173 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3174 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3175 	sai->sai_flags = 0;
3176 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3177 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3178 	sai->sai_assoc_id = sctp_get_associd(stcb);
3179 
3180 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3181 	SCTP_BUF_NEXT(m_notify) = NULL;
3182 
3183 	/* append to socket */
3184 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3185 	    0, 0, stcb->asoc.context, 0, 0, 0,
3186 	    m_notify);
3187 	if (control == NULL) {
3188 		/* no memory */
3189 		sctp_m_freem(m_notify);
3190 		return;
3191 	}
3192 	control->length = SCTP_BUF_LEN(m_notify);
3193 	control->spec_flags = M_NOTIFICATION;
3194 	/* not that we need this */
3195 	control->tail_mbuf = m_notify;
3196 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3197 	    control,
3198 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3199 }
3200 
3201 /* This always must be called with the read-queue LOCKED in the INP */
3202 static void
3203 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3204     uint32_t val, int so_locked
3205 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3206     SCTP_UNUSED
3207 #endif
3208 )
3209 {
3210 	struct mbuf *m_notify;
3211 	struct sctp_pdapi_event *pdapi;
3212 	struct sctp_queued_to_read *control;
3213 	struct sockbuf *sb;
3214 
3215 	if ((stcb == NULL) ||
3216 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3217 		/* event not enabled */
3218 		return;
3219 	}
3220 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3221 		return;
3222 	}
3223 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3224 	if (m_notify == NULL)
3225 		/* no space left */
3226 		return;
3227 	SCTP_BUF_LEN(m_notify) = 0;
3228 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3229 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3230 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3231 	pdapi->pdapi_flags = 0;
3232 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3233 	pdapi->pdapi_indication = error;
3234 	pdapi->pdapi_stream = (val >> 16);
3235 	pdapi->pdapi_seq = (val & 0x0000ffff);
3236 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3237 
3238 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3239 	SCTP_BUF_NEXT(m_notify) = NULL;
3240 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3241 	    0, 0, stcb->asoc.context, 0, 0, 0,
3242 	    m_notify);
3243 	if (control == NULL) {
3244 		/* no memory */
3245 		sctp_m_freem(m_notify);
3246 		return;
3247 	}
3248 	control->spec_flags = M_NOTIFICATION;
3249 	control->length = SCTP_BUF_LEN(m_notify);
3250 	/* not that we need this */
3251 	control->tail_mbuf = m_notify;
3252 	control->held_length = 0;
3253 	control->length = 0;
3254 	sb = &stcb->sctp_socket->so_rcv;
3255 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3256 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3257 	}
3258 	sctp_sballoc(stcb, sb, m_notify);
3259 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3260 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3261 	}
3262 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3263 	control->end_added = 1;
3264 	if (stcb->asoc.control_pdapi)
3265 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3266 	else {
3267 		/* we really should not see this case */
3268 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3269 	}
3270 	if (stcb->sctp_ep && stcb->sctp_socket) {
3271 		/* This should always be the case */
3272 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3273 		struct socket *so;
3274 
3275 		so = SCTP_INP_SO(stcb->sctp_ep);
3276 		if (!so_locked) {
3277 			atomic_add_int(&stcb->asoc.refcnt, 1);
3278 			SCTP_TCB_UNLOCK(stcb);
3279 			SCTP_SOCKET_LOCK(so, 1);
3280 			SCTP_TCB_LOCK(stcb);
3281 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3282 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3283 				SCTP_SOCKET_UNLOCK(so, 1);
3284 				return;
3285 			}
3286 		}
3287 #endif
3288 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3289 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3290 		if (!so_locked) {
3291 			SCTP_SOCKET_UNLOCK(so, 1);
3292 		}
3293 #endif
3294 	}
3295 }
3296 
3297 static void
3298 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3299 {
3300 	struct mbuf *m_notify;
3301 	struct sctp_shutdown_event *sse;
3302 	struct sctp_queued_to_read *control;
3303 
3304 	/*
3305 	 * For TCP model AND UDP connected sockets we will send an error up
3306 	 * when an SHUTDOWN completes
3307 	 */
3308 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3309 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3310 		/* mark socket closed for read/write and wakeup! */
3311 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3312 		struct socket *so;
3313 
3314 		so = SCTP_INP_SO(stcb->sctp_ep);
3315 		atomic_add_int(&stcb->asoc.refcnt, 1);
3316 		SCTP_TCB_UNLOCK(stcb);
3317 		SCTP_SOCKET_LOCK(so, 1);
3318 		SCTP_TCB_LOCK(stcb);
3319 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3320 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3321 			SCTP_SOCKET_UNLOCK(so, 1);
3322 			return;
3323 		}
3324 #endif
3325 		socantsendmore(stcb->sctp_socket);
3326 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3327 		SCTP_SOCKET_UNLOCK(so, 1);
3328 #endif
3329 	}
3330 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3331 		/* event not enabled */
3332 		return;
3333 	}
3334 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3335 	if (m_notify == NULL)
3336 		/* no space left */
3337 		return;
3338 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3339 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3340 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3341 	sse->sse_flags = 0;
3342 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3343 	sse->sse_assoc_id = sctp_get_associd(stcb);
3344 
3345 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3346 	SCTP_BUF_NEXT(m_notify) = NULL;
3347 
3348 	/* append to socket */
3349 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3350 	    0, 0, stcb->asoc.context, 0, 0, 0,
3351 	    m_notify);
3352 	if (control == NULL) {
3353 		/* no memory */
3354 		sctp_m_freem(m_notify);
3355 		return;
3356 	}
3357 	control->spec_flags = M_NOTIFICATION;
3358 	control->length = SCTP_BUF_LEN(m_notify);
3359 	/* not that we need this */
3360 	control->tail_mbuf = m_notify;
3361 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3362 	    control,
3363 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3364 }
3365 
3366 static void
3367 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3368     int so_locked
3369 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3370     SCTP_UNUSED
3371 #endif
3372 )
3373 {
3374 	struct mbuf *m_notify;
3375 	struct sctp_sender_dry_event *event;
3376 	struct sctp_queued_to_read *control;
3377 
3378 	if ((stcb == NULL) ||
3379 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3380 		/* event not enabled */
3381 		return;
3382 	}
3383 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3384 	if (m_notify == NULL) {
3385 		/* no space left */
3386 		return;
3387 	}
3388 	SCTP_BUF_LEN(m_notify) = 0;
3389 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3390 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3391 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3392 	event->sender_dry_flags = 0;
3393 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3394 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3395 
3396 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3397 	SCTP_BUF_NEXT(m_notify) = NULL;
3398 
3399 	/* append to socket */
3400 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3401 	    0, 0, stcb->asoc.context, 0, 0, 0,
3402 	    m_notify);
3403 	if (control == NULL) {
3404 		/* no memory */
3405 		sctp_m_freem(m_notify);
3406 		return;
3407 	}
3408 	control->length = SCTP_BUF_LEN(m_notify);
3409 	control->spec_flags = M_NOTIFICATION;
3410 	/* not that we need this */
3411 	control->tail_mbuf = m_notify;
3412 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3413 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3414 }
3415 
3416 
3417 void
3418 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3419 {
3420 	struct mbuf *m_notify;
3421 	struct sctp_queued_to_read *control;
3422 	struct sctp_stream_change_event *stradd;
3423 
3424 	if ((stcb == NULL) ||
3425 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3426 		/* event not enabled */
3427 		return;
3428 	}
3429 	if ((stcb->asoc.peer_req_out) && flag) {
3430 		/* Peer made the request, don't tell the local user */
3431 		stcb->asoc.peer_req_out = 0;
3432 		return;
3433 	}
3434 	stcb->asoc.peer_req_out = 0;
3435 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3436 	if (m_notify == NULL)
3437 		/* no space left */
3438 		return;
3439 	SCTP_BUF_LEN(m_notify) = 0;
3440 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3441 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3442 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3443 	stradd->strchange_flags = flag;
3444 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3445 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3446 	stradd->strchange_instrms = numberin;
3447 	stradd->strchange_outstrms = numberout;
3448 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3449 	SCTP_BUF_NEXT(m_notify) = NULL;
3450 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3451 		/* no space */
3452 		sctp_m_freem(m_notify);
3453 		return;
3454 	}
3455 	/* append to socket */
3456 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3457 	    0, 0, stcb->asoc.context, 0, 0, 0,
3458 	    m_notify);
3459 	if (control == NULL) {
3460 		/* no memory */
3461 		sctp_m_freem(m_notify);
3462 		return;
3463 	}
3464 	control->spec_flags = M_NOTIFICATION;
3465 	control->length = SCTP_BUF_LEN(m_notify);
3466 	/* not that we need this */
3467 	control->tail_mbuf = m_notify;
3468 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3469 	    control,
3470 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3471 }
3472 
3473 void
3474 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3475 {
3476 	struct mbuf *m_notify;
3477 	struct sctp_queued_to_read *control;
3478 	struct sctp_assoc_reset_event *strasoc;
3479 
3480 	if ((stcb == NULL) ||
3481 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3482 		/* event not enabled */
3483 		return;
3484 	}
3485 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3486 	if (m_notify == NULL)
3487 		/* no space left */
3488 		return;
3489 	SCTP_BUF_LEN(m_notify) = 0;
3490 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3491 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3492 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3493 	strasoc->assocreset_flags = flag;
3494 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3495 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3496 	strasoc->assocreset_local_tsn = sending_tsn;
3497 	strasoc->assocreset_remote_tsn = recv_tsn;
3498 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3499 	SCTP_BUF_NEXT(m_notify) = NULL;
3500 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3501 		/* no space */
3502 		sctp_m_freem(m_notify);
3503 		return;
3504 	}
3505 	/* append to socket */
3506 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3507 	    0, 0, stcb->asoc.context, 0, 0, 0,
3508 	    m_notify);
3509 	if (control == NULL) {
3510 		/* no memory */
3511 		sctp_m_freem(m_notify);
3512 		return;
3513 	}
3514 	control->spec_flags = M_NOTIFICATION;
3515 	control->length = SCTP_BUF_LEN(m_notify);
3516 	/* not that we need this */
3517 	control->tail_mbuf = m_notify;
3518 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3519 	    control,
3520 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3521 }
3522 
3523 
3524 
3525 static void
3526 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3527     int number_entries, uint16_t * list, int flag)
3528 {
3529 	struct mbuf *m_notify;
3530 	struct sctp_queued_to_read *control;
3531 	struct sctp_stream_reset_event *strreset;
3532 	int len;
3533 
3534 	if ((stcb == NULL) ||
3535 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3536 		/* event not enabled */
3537 		return;
3538 	}
3539 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3540 	if (m_notify == NULL)
3541 		/* no space left */
3542 		return;
3543 	SCTP_BUF_LEN(m_notify) = 0;
3544 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3545 	if (len > M_TRAILINGSPACE(m_notify)) {
3546 		/* never enough room */
3547 		sctp_m_freem(m_notify);
3548 		return;
3549 	}
3550 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3551 	memset(strreset, 0, len);
3552 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3553 	strreset->strreset_flags = flag;
3554 	strreset->strreset_length = len;
3555 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3556 	if (number_entries) {
3557 		int i;
3558 
3559 		for (i = 0; i < number_entries; i++) {
3560 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3561 		}
3562 	}
3563 	SCTP_BUF_LEN(m_notify) = len;
3564 	SCTP_BUF_NEXT(m_notify) = NULL;
3565 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3566 		/* no space */
3567 		sctp_m_freem(m_notify);
3568 		return;
3569 	}
3570 	/* append to socket */
3571 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3572 	    0, 0, stcb->asoc.context, 0, 0, 0,
3573 	    m_notify);
3574 	if (control == NULL) {
3575 		/* no memory */
3576 		sctp_m_freem(m_notify);
3577 		return;
3578 	}
3579 	control->spec_flags = M_NOTIFICATION;
3580 	control->length = SCTP_BUF_LEN(m_notify);
3581 	/* not that we need this */
3582 	control->tail_mbuf = m_notify;
3583 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3584 	    control,
3585 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3586 }
3587 
3588 
3589 static void
3590 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3591 {
3592 	struct mbuf *m_notify;
3593 	struct sctp_remote_error *sre;
3594 	struct sctp_queued_to_read *control;
3595 	unsigned int notif_len;
3596 	uint16_t chunk_len;
3597 
3598 	if ((stcb == NULL) ||
3599 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3600 		return;
3601 	}
3602 	if (chunk != NULL) {
3603 		chunk_len = ntohs(chunk->ch.chunk_length);
3604 	} else {
3605 		chunk_len = 0;
3606 	}
3607 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3608 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3609 	if (m_notify == NULL) {
3610 		/* Retry with smaller value. */
3611 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3612 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3613 		if (m_notify == NULL) {
3614 			return;
3615 		}
3616 	}
3617 	SCTP_BUF_NEXT(m_notify) = NULL;
3618 	sre = mtod(m_notify, struct sctp_remote_error *);
3619 	memset(sre, 0, notif_len);
3620 	sre->sre_type = SCTP_REMOTE_ERROR;
3621 	sre->sre_flags = 0;
3622 	sre->sre_length = sizeof(struct sctp_remote_error);
3623 	sre->sre_error = error;
3624 	sre->sre_assoc_id = sctp_get_associd(stcb);
3625 	if (notif_len > sizeof(struct sctp_remote_error)) {
3626 		memcpy(sre->sre_data, chunk, chunk_len);
3627 		sre->sre_length += chunk_len;
3628 	}
3629 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3630 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3631 	    0, 0, stcb->asoc.context, 0, 0, 0,
3632 	    m_notify);
3633 	if (control != NULL) {
3634 		control->length = SCTP_BUF_LEN(m_notify);
3635 		/* not that we need this */
3636 		control->tail_mbuf = m_notify;
3637 		control->spec_flags = M_NOTIFICATION;
3638 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3639 		    control,
3640 		    &stcb->sctp_socket->so_rcv, 1,
3641 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3642 	} else {
3643 		sctp_m_freem(m_notify);
3644 	}
3645 }
3646 
3647 
3648 void
3649 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3650     uint32_t error, void *data, int so_locked
3651 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3652     SCTP_UNUSED
3653 #endif
3654 )
3655 {
3656 	if ((stcb == NULL) ||
3657 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3658 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3659 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3660 		/* If the socket is gone we are out of here */
3661 		return;
3662 	}
3663 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3664 		return;
3665 	}
3666 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3667 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3668 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3669 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3670 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3671 			/* Don't report these in front states */
3672 			return;
3673 		}
3674 	}
3675 	switch (notification) {
3676 	case SCTP_NOTIFY_ASSOC_UP:
3677 		if (stcb->asoc.assoc_up_sent == 0) {
3678 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3679 			stcb->asoc.assoc_up_sent = 1;
3680 		}
3681 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3682 			sctp_notify_adaptation_layer(stcb);
3683 		}
3684 		if (stcb->asoc.auth_supported == 0) {
3685 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3686 			    NULL, so_locked);
3687 		}
3688 		break;
3689 	case SCTP_NOTIFY_ASSOC_DOWN:
3690 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3691 		break;
3692 	case SCTP_NOTIFY_INTERFACE_DOWN:
3693 		{
3694 			struct sctp_nets *net;
3695 
3696 			net = (struct sctp_nets *)data;
3697 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3698 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3699 			break;
3700 		}
3701 	case SCTP_NOTIFY_INTERFACE_UP:
3702 		{
3703 			struct sctp_nets *net;
3704 
3705 			net = (struct sctp_nets *)data;
3706 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3707 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3708 			break;
3709 		}
3710 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3711 		{
3712 			struct sctp_nets *net;
3713 
3714 			net = (struct sctp_nets *)data;
3715 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3716 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3717 			break;
3718 		}
3719 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3720 		sctp_notify_send_failed2(stcb, error,
3721 		    (struct sctp_stream_queue_pending *)data, so_locked);
3722 		break;
3723 	case SCTP_NOTIFY_SENT_DG_FAIL:
3724 		sctp_notify_send_failed(stcb, 1, error,
3725 		    (struct sctp_tmit_chunk *)data, so_locked);
3726 		break;
3727 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3728 		sctp_notify_send_failed(stcb, 0, error,
3729 		    (struct sctp_tmit_chunk *)data, so_locked);
3730 		break;
3731 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3732 		{
3733 			uint32_t val;
3734 
3735 			val = *((uint32_t *) data);
3736 
3737 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3738 			break;
3739 		}
3740 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3741 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3742 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3743 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3744 		} else {
3745 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3746 		}
3747 		break;
3748 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3749 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3750 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3751 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3752 		} else {
3753 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3754 		}
3755 		break;
3756 	case SCTP_NOTIFY_ASSOC_RESTART:
3757 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3758 		if (stcb->asoc.auth_supported == 0) {
3759 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3760 			    NULL, so_locked);
3761 		}
3762 		break;
3763 	case SCTP_NOTIFY_STR_RESET_SEND:
3764 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3765 		break;
3766 	case SCTP_NOTIFY_STR_RESET_RECV:
3767 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3768 		break;
3769 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3770 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3771 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3772 		break;
3773 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3774 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3775 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3776 		break;
3777 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3778 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3779 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3780 		break;
3781 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3782 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3783 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3784 		break;
3785 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3786 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3787 		    error, so_locked);
3788 		break;
3789 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3790 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3791 		    error, so_locked);
3792 		break;
3793 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3794 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3795 		    error, so_locked);
3796 		break;
3797 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3798 		sctp_notify_shutdown_event(stcb);
3799 		break;
3800 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3801 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3802 		    (uint16_t) (uintptr_t) data,
3803 		    so_locked);
3804 		break;
3805 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3806 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3807 		    (uint16_t) (uintptr_t) data,
3808 		    so_locked);
3809 		break;
3810 	case SCTP_NOTIFY_NO_PEER_AUTH:
3811 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3812 		    (uint16_t) (uintptr_t) data,
3813 		    so_locked);
3814 		break;
3815 	case SCTP_NOTIFY_SENDER_DRY:
3816 		sctp_notify_sender_dry_event(stcb, so_locked);
3817 		break;
3818 	case SCTP_NOTIFY_REMOTE_ERROR:
3819 		sctp_notify_remote_error(stcb, error, data);
3820 		break;
3821 	default:
3822 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3823 		    __func__, notification, notification);
3824 		break;
3825 	}			/* end switch */
3826 }
3827 
3828 void
3829 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3830 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3831     SCTP_UNUSED
3832 #endif
3833 )
3834 {
3835 	struct sctp_association *asoc;
3836 	struct sctp_stream_out *outs;
3837 	struct sctp_tmit_chunk *chk, *nchk;
3838 	struct sctp_stream_queue_pending *sp, *nsp;
3839 	int i;
3840 
3841 	if (stcb == NULL) {
3842 		return;
3843 	}
3844 	asoc = &stcb->asoc;
3845 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3846 		/* already being freed */
3847 		return;
3848 	}
3849 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3850 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3851 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3852 		return;
3853 	}
3854 	/* now through all the gunk freeing chunks */
3855 	if (holds_lock == 0) {
3856 		SCTP_TCB_SEND_LOCK(stcb);
3857 	}
3858 	/* sent queue SHOULD be empty */
3859 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3860 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3861 		asoc->sent_queue_cnt--;
3862 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3863 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3864 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3865 #ifdef INVARIANTS
3866 			} else {
3867 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3868 #endif
3869 			}
3870 		}
3871 		if (chk->data != NULL) {
3872 			sctp_free_bufspace(stcb, asoc, chk, 1);
3873 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3874 			    error, chk, so_locked);
3875 			if (chk->data) {
3876 				sctp_m_freem(chk->data);
3877 				chk->data = NULL;
3878 			}
3879 		}
3880 		sctp_free_a_chunk(stcb, chk, so_locked);
3881 		/* sa_ignore FREED_MEMORY */
3882 	}
3883 	/* pending send queue SHOULD be empty */
3884 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3885 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3886 		asoc->send_queue_cnt--;
3887 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3888 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3889 #ifdef INVARIANTS
3890 		} else {
3891 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3892 #endif
3893 		}
3894 		if (chk->data != NULL) {
3895 			sctp_free_bufspace(stcb, asoc, chk, 1);
3896 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3897 			    error, chk, so_locked);
3898 			if (chk->data) {
3899 				sctp_m_freem(chk->data);
3900 				chk->data = NULL;
3901 			}
3902 		}
3903 		sctp_free_a_chunk(stcb, chk, so_locked);
3904 		/* sa_ignore FREED_MEMORY */
3905 	}
3906 	for (i = 0; i < asoc->streamoutcnt; i++) {
3907 		/* For each stream */
3908 		outs = &asoc->strmout[i];
3909 		/* clean up any sends there */
3910 		asoc->locked_on_sending = NULL;
3911 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3912 			asoc->stream_queue_cnt--;
3913 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3914 			sctp_free_spbufspace(stcb, asoc, sp);
3915 			if (sp->data) {
3916 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3917 				    error, (void *)sp, so_locked);
3918 				if (sp->data) {
3919 					sctp_m_freem(sp->data);
3920 					sp->data = NULL;
3921 					sp->tail_mbuf = NULL;
3922 					sp->length = 0;
3923 				}
3924 			}
3925 			if (sp->net) {
3926 				sctp_free_remote_addr(sp->net);
3927 				sp->net = NULL;
3928 			}
3929 			/* Free the chunk */
3930 			sctp_free_a_strmoq(stcb, sp, so_locked);
3931 			/* sa_ignore FREED_MEMORY */
3932 		}
3933 	}
3934 
3935 	if (holds_lock == 0) {
3936 		SCTP_TCB_SEND_UNLOCK(stcb);
3937 	}
3938 }
3939 
3940 void
3941 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3942     struct sctp_abort_chunk *abort, int so_locked
3943 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3944     SCTP_UNUSED
3945 #endif
3946 )
3947 {
3948 	if (stcb == NULL) {
3949 		return;
3950 	}
3951 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3952 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3953 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3954 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3955 	}
3956 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3957 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3958 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3959 		return;
3960 	}
3961 	/* Tell them we lost the asoc */
3962 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3963 	if (from_peer) {
3964 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3965 	} else {
3966 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3967 	}
3968 }
3969 
3970 void
3971 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3972     struct mbuf *m, int iphlen,
3973     struct sockaddr *src, struct sockaddr *dst,
3974     struct sctphdr *sh, struct mbuf *op_err,
3975     uint8_t mflowtype, uint32_t mflowid,
3976     uint32_t vrf_id, uint16_t port)
3977 {
3978 	uint32_t vtag;
3979 
3980 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3981 	struct socket *so;
3982 
3983 #endif
3984 
3985 	vtag = 0;
3986 	if (stcb != NULL) {
3987 		/* We have a TCB to abort, send notification too */
3988 		vtag = stcb->asoc.peer_vtag;
3989 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3990 		/* get the assoc vrf id and table id */
3991 		vrf_id = stcb->asoc.vrf_id;
3992 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3993 	}
3994 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3995 	    mflowtype, mflowid, inp->fibnum,
3996 	    vrf_id, port);
3997 	if (stcb != NULL) {
3998 		/* Ok, now lets free it */
3999 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4000 		so = SCTP_INP_SO(inp);
4001 		atomic_add_int(&stcb->asoc.refcnt, 1);
4002 		SCTP_TCB_UNLOCK(stcb);
4003 		SCTP_SOCKET_LOCK(so, 1);
4004 		SCTP_TCB_LOCK(stcb);
4005 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4006 #endif
4007 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4008 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4009 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4010 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4011 		}
4012 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4013 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4014 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4015 		SCTP_SOCKET_UNLOCK(so, 1);
4016 #endif
4017 	}
4018 }
4019 
4020 #ifdef SCTP_ASOCLOG_OF_TSNS
4021 void
4022 sctp_print_out_track_log(struct sctp_tcb *stcb)
4023 {
4024 #ifdef NOSIY_PRINTS
4025 	int i;
4026 
4027 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4028 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4029 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4030 		SCTP_PRINTF("None rcvd\n");
4031 		goto none_in;
4032 	}
4033 	if (stcb->asoc.tsn_in_wrapped) {
4034 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4035 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4036 			    stcb->asoc.in_tsnlog[i].tsn,
4037 			    stcb->asoc.in_tsnlog[i].strm,
4038 			    stcb->asoc.in_tsnlog[i].seq,
4039 			    stcb->asoc.in_tsnlog[i].flgs,
4040 			    stcb->asoc.in_tsnlog[i].sz);
4041 		}
4042 	}
4043 	if (stcb->asoc.tsn_in_at) {
4044 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4045 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4046 			    stcb->asoc.in_tsnlog[i].tsn,
4047 			    stcb->asoc.in_tsnlog[i].strm,
4048 			    stcb->asoc.in_tsnlog[i].seq,
4049 			    stcb->asoc.in_tsnlog[i].flgs,
4050 			    stcb->asoc.in_tsnlog[i].sz);
4051 		}
4052 	}
4053 none_in:
4054 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4055 	if ((stcb->asoc.tsn_out_at == 0) &&
4056 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4057 		SCTP_PRINTF("None sent\n");
4058 	}
4059 	if (stcb->asoc.tsn_out_wrapped) {
4060 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4061 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4062 			    stcb->asoc.out_tsnlog[i].tsn,
4063 			    stcb->asoc.out_tsnlog[i].strm,
4064 			    stcb->asoc.out_tsnlog[i].seq,
4065 			    stcb->asoc.out_tsnlog[i].flgs,
4066 			    stcb->asoc.out_tsnlog[i].sz);
4067 		}
4068 	}
4069 	if (stcb->asoc.tsn_out_at) {
4070 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4071 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4072 			    stcb->asoc.out_tsnlog[i].tsn,
4073 			    stcb->asoc.out_tsnlog[i].strm,
4074 			    stcb->asoc.out_tsnlog[i].seq,
4075 			    stcb->asoc.out_tsnlog[i].flgs,
4076 			    stcb->asoc.out_tsnlog[i].sz);
4077 		}
4078 	}
4079 #endif
4080 }
4081 
4082 #endif
4083 
4084 void
4085 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4086     struct mbuf *op_err,
4087     int so_locked
4088 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4089     SCTP_UNUSED
4090 #endif
4091 )
4092 {
4093 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4094 	struct socket *so;
4095 
4096 #endif
4097 
4098 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4099 	so = SCTP_INP_SO(inp);
4100 #endif
4101 	if (stcb == NULL) {
4102 		/* Got to have a TCB */
4103 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4104 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4105 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4106 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4107 			}
4108 		}
4109 		return;
4110 	} else {
4111 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4112 	}
4113 	/* notify the ulp */
4114 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4115 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4116 	}
4117 	/* notify the peer */
4118 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4119 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4120 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4121 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4122 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4123 	}
4124 	/* now free the asoc */
4125 #ifdef SCTP_ASOCLOG_OF_TSNS
4126 	sctp_print_out_track_log(stcb);
4127 #endif
4128 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4129 	if (!so_locked) {
4130 		atomic_add_int(&stcb->asoc.refcnt, 1);
4131 		SCTP_TCB_UNLOCK(stcb);
4132 		SCTP_SOCKET_LOCK(so, 1);
4133 		SCTP_TCB_LOCK(stcb);
4134 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4135 	}
4136 #endif
4137 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4138 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4139 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4140 	if (!so_locked) {
4141 		SCTP_SOCKET_UNLOCK(so, 1);
4142 	}
4143 #endif
4144 }
4145 
4146 void
4147 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4148     struct sockaddr *src, struct sockaddr *dst,
4149     struct sctphdr *sh, struct sctp_inpcb *inp,
4150     struct mbuf *cause,
4151     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4152     uint32_t vrf_id, uint16_t port)
4153 {
4154 	struct sctp_chunkhdr *ch, chunk_buf;
4155 	unsigned int chk_length;
4156 	int contains_init_chunk;
4157 
4158 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4159 	/* Generate a TO address for future reference */
4160 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4161 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4162 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4163 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4164 		}
4165 	}
4166 	contains_init_chunk = 0;
4167 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4168 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4169 	while (ch != NULL) {
4170 		chk_length = ntohs(ch->chunk_length);
4171 		if (chk_length < sizeof(*ch)) {
4172 			/* break to abort land */
4173 			break;
4174 		}
4175 		switch (ch->chunk_type) {
4176 		case SCTP_INIT:
4177 			contains_init_chunk = 1;
4178 			break;
4179 		case SCTP_PACKET_DROPPED:
4180 			/* we don't respond to pkt-dropped */
4181 			return;
4182 		case SCTP_ABORT_ASSOCIATION:
4183 			/* we don't respond with an ABORT to an ABORT */
4184 			return;
4185 		case SCTP_SHUTDOWN_COMPLETE:
4186 			/*
4187 			 * we ignore it since we are not waiting for it and
4188 			 * peer is gone
4189 			 */
4190 			return;
4191 		case SCTP_SHUTDOWN_ACK:
4192 			sctp_send_shutdown_complete2(src, dst, sh,
4193 			    mflowtype, mflowid, fibnum,
4194 			    vrf_id, port);
4195 			return;
4196 		default:
4197 			break;
4198 		}
4199 		offset += SCTP_SIZE32(chk_length);
4200 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4201 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4202 	}
4203 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4204 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4205 	    (contains_init_chunk == 0))) {
4206 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4207 		    mflowtype, mflowid, fibnum,
4208 		    vrf_id, port);
4209 	}
4210 }
4211 
4212 /*
4213  * check the inbound datagram to make sure there is not an abort inside it,
4214  * if there is return 1, else return 0.
4215  */
4216 int
4217 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4218 {
4219 	struct sctp_chunkhdr *ch;
4220 	struct sctp_init_chunk *init_chk, chunk_buf;
4221 	int offset;
4222 	unsigned int chk_length;
4223 
4224 	offset = iphlen + sizeof(struct sctphdr);
4225 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4226 	    (uint8_t *) & chunk_buf);
4227 	while (ch != NULL) {
4228 		chk_length = ntohs(ch->chunk_length);
4229 		if (chk_length < sizeof(*ch)) {
4230 			/* packet is probably corrupt */
4231 			break;
4232 		}
4233 		/* we seem to be ok, is it an abort? */
4234 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4235 			/* yep, tell them */
4236 			return (1);
4237 		}
4238 		if (ch->chunk_type == SCTP_INITIATION) {
4239 			/* need to update the Vtag */
4240 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4241 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4242 			if (init_chk != NULL) {
4243 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4244 			}
4245 		}
4246 		/* Nope, move to the next chunk */
4247 		offset += SCTP_SIZE32(chk_length);
4248 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4249 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4250 	}
4251 	return (0);
4252 }
4253 
4254 /*
4255  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4256  * set (i.e. it's 0) so, create this function to compare link local scopes
4257  */
4258 #ifdef INET6
4259 uint32_t
4260 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4261 {
4262 	struct sockaddr_in6 a, b;
4263 
4264 	/* save copies */
4265 	a = *addr1;
4266 	b = *addr2;
4267 
4268 	if (a.sin6_scope_id == 0)
4269 		if (sa6_recoverscope(&a)) {
4270 			/* can't get scope, so can't match */
4271 			return (0);
4272 		}
4273 	if (b.sin6_scope_id == 0)
4274 		if (sa6_recoverscope(&b)) {
4275 			/* can't get scope, so can't match */
4276 			return (0);
4277 		}
4278 	if (a.sin6_scope_id != b.sin6_scope_id)
4279 		return (0);
4280 
4281 	return (1);
4282 }
4283 
4284 /*
4285  * returns a sockaddr_in6 with embedded scope recovered and removed
4286  */
4287 struct sockaddr_in6 *
4288 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4289 {
4290 	/* check and strip embedded scope junk */
4291 	if (addr->sin6_family == AF_INET6) {
4292 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4293 			if (addr->sin6_scope_id == 0) {
4294 				*store = *addr;
4295 				if (!sa6_recoverscope(store)) {
4296 					/* use the recovered scope */
4297 					addr = store;
4298 				}
4299 			} else {
4300 				/* else, return the original "to" addr */
4301 				in6_clearscope(&addr->sin6_addr);
4302 			}
4303 		}
4304 	}
4305 	return (addr);
4306 }
4307 
4308 #endif
4309 
4310 /*
4311  * are the two addresses the same?  currently a "scopeless" check returns: 1
4312  * if same, 0 if not
4313  */
4314 int
4315 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4316 {
4317 
4318 	/* must be valid */
4319 	if (sa1 == NULL || sa2 == NULL)
4320 		return (0);
4321 
4322 	/* must be the same family */
4323 	if (sa1->sa_family != sa2->sa_family)
4324 		return (0);
4325 
4326 	switch (sa1->sa_family) {
4327 #ifdef INET6
4328 	case AF_INET6:
4329 		{
4330 			/* IPv6 addresses */
4331 			struct sockaddr_in6 *sin6_1, *sin6_2;
4332 
4333 			sin6_1 = (struct sockaddr_in6 *)sa1;
4334 			sin6_2 = (struct sockaddr_in6 *)sa2;
4335 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4336 			    sin6_2));
4337 		}
4338 #endif
4339 #ifdef INET
4340 	case AF_INET:
4341 		{
4342 			/* IPv4 addresses */
4343 			struct sockaddr_in *sin_1, *sin_2;
4344 
4345 			sin_1 = (struct sockaddr_in *)sa1;
4346 			sin_2 = (struct sockaddr_in *)sa2;
4347 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4348 		}
4349 #endif
4350 	default:
4351 		/* we don't do these... */
4352 		return (0);
4353 	}
4354 }
4355 
4356 void
4357 sctp_print_address(struct sockaddr *sa)
4358 {
4359 #ifdef INET6
4360 	char ip6buf[INET6_ADDRSTRLEN];
4361 
4362 #endif
4363 
4364 	switch (sa->sa_family) {
4365 #ifdef INET6
4366 	case AF_INET6:
4367 		{
4368 			struct sockaddr_in6 *sin6;
4369 
4370 			sin6 = (struct sockaddr_in6 *)sa;
4371 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4372 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4373 			    ntohs(sin6->sin6_port),
4374 			    sin6->sin6_scope_id);
4375 			break;
4376 		}
4377 #endif
4378 #ifdef INET
4379 	case AF_INET:
4380 		{
4381 			struct sockaddr_in *sin;
4382 			unsigned char *p;
4383 
4384 			sin = (struct sockaddr_in *)sa;
4385 			p = (unsigned char *)&sin->sin_addr;
4386 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4387 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4388 			break;
4389 		}
4390 #endif
4391 	default:
4392 		SCTP_PRINTF("?\n");
4393 		break;
4394 	}
4395 }
4396 
4397 void
4398 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4399     struct sctp_inpcb *new_inp,
4400     struct sctp_tcb *stcb,
4401     int waitflags)
4402 {
4403 	/*
4404 	 * go through our old INP and pull off any control structures that
4405 	 * belong to stcb and move then to the new inp.
4406 	 */
4407 	struct socket *old_so, *new_so;
4408 	struct sctp_queued_to_read *control, *nctl;
4409 	struct sctp_readhead tmp_queue;
4410 	struct mbuf *m;
4411 	int error = 0;
4412 
4413 	old_so = old_inp->sctp_socket;
4414 	new_so = new_inp->sctp_socket;
4415 	TAILQ_INIT(&tmp_queue);
4416 	error = sblock(&old_so->so_rcv, waitflags);
4417 	if (error) {
4418 		/*
4419 		 * Gak, can't get sblock, we have a problem. data will be
4420 		 * left stranded.. and we don't dare look at it since the
4421 		 * other thread may be reading something. Oh well, its a
4422 		 * screwed up app that does a peeloff OR a accept while
4423 		 * reading from the main socket... actually its only the
4424 		 * peeloff() case, since I think read will fail on a
4425 		 * listening socket..
4426 		 */
4427 		return;
4428 	}
4429 	/* lock the socket buffers */
4430 	SCTP_INP_READ_LOCK(old_inp);
4431 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4432 		/* Pull off all for out target stcb */
4433 		if (control->stcb == stcb) {
4434 			/* remove it we want it */
4435 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4436 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4437 			m = control->data;
4438 			while (m) {
4439 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4440 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4441 				}
4442 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4443 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4444 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4445 				}
4446 				m = SCTP_BUF_NEXT(m);
4447 			}
4448 		}
4449 	}
4450 	SCTP_INP_READ_UNLOCK(old_inp);
4451 	/* Remove the sb-lock on the old socket */
4452 
4453 	sbunlock(&old_so->so_rcv);
4454 	/* Now we move them over to the new socket buffer */
4455 	SCTP_INP_READ_LOCK(new_inp);
4456 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4457 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4458 		m = control->data;
4459 		while (m) {
4460 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4461 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4462 			}
4463 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4464 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4465 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4466 			}
4467 			m = SCTP_BUF_NEXT(m);
4468 		}
4469 	}
4470 	SCTP_INP_READ_UNLOCK(new_inp);
4471 }
4472 
4473 void
4474 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4475     struct sctp_tcb *stcb,
4476     int so_locked
4477 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4478     SCTP_UNUSED
4479 #endif
4480 )
4481 {
4482 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4483 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4484 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4485 		} else {
4486 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4487 			struct socket *so;
4488 
4489 			so = SCTP_INP_SO(inp);
4490 			if (!so_locked) {
4491 				if (stcb) {
4492 					atomic_add_int(&stcb->asoc.refcnt, 1);
4493 					SCTP_TCB_UNLOCK(stcb);
4494 				}
4495 				SCTP_SOCKET_LOCK(so, 1);
4496 				if (stcb) {
4497 					SCTP_TCB_LOCK(stcb);
4498 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4499 				}
4500 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4501 					SCTP_SOCKET_UNLOCK(so, 1);
4502 					return;
4503 				}
4504 			}
4505 #endif
4506 			sctp_sorwakeup(inp, inp->sctp_socket);
4507 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4508 			if (!so_locked) {
4509 				SCTP_SOCKET_UNLOCK(so, 1);
4510 			}
4511 #endif
4512 		}
4513 	}
4514 }
4515 
4516 void
4517 sctp_add_to_readq(struct sctp_inpcb *inp,
4518     struct sctp_tcb *stcb,
4519     struct sctp_queued_to_read *control,
4520     struct sockbuf *sb,
4521     int end,
4522     int inp_read_lock_held,
4523     int so_locked
4524 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4525     SCTP_UNUSED
4526 #endif
4527 )
4528 {
4529 	/*
4530 	 * Here we must place the control on the end of the socket read
4531 	 * queue AND increment sb_cc so that select will work properly on
4532 	 * read.
4533 	 */
4534 	struct mbuf *m, *prev = NULL;
4535 
4536 	if (inp == NULL) {
4537 		/* Gak, TSNH!! */
4538 #ifdef INVARIANTS
4539 		panic("Gak, inp NULL on add_to_readq");
4540 #endif
4541 		return;
4542 	}
4543 	if (inp_read_lock_held == 0)
4544 		SCTP_INP_READ_LOCK(inp);
4545 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4546 		sctp_free_remote_addr(control->whoFrom);
4547 		if (control->data) {
4548 			sctp_m_freem(control->data);
4549 			control->data = NULL;
4550 		}
4551 		sctp_free_a_readq(stcb, control);
4552 		if (inp_read_lock_held == 0)
4553 			SCTP_INP_READ_UNLOCK(inp);
4554 		return;
4555 	}
4556 	if (!(control->spec_flags & M_NOTIFICATION)) {
4557 		atomic_add_int(&inp->total_recvs, 1);
4558 		if (!control->do_not_ref_stcb) {
4559 			atomic_add_int(&stcb->total_recvs, 1);
4560 		}
4561 	}
4562 	m = control->data;
4563 	control->held_length = 0;
4564 	control->length = 0;
4565 	while (m) {
4566 		if (SCTP_BUF_LEN(m) == 0) {
4567 			/* Skip mbufs with NO length */
4568 			if (prev == NULL) {
4569 				/* First one */
4570 				control->data = sctp_m_free(m);
4571 				m = control->data;
4572 			} else {
4573 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4574 				m = SCTP_BUF_NEXT(prev);
4575 			}
4576 			if (m == NULL) {
4577 				control->tail_mbuf = prev;
4578 			}
4579 			continue;
4580 		}
4581 		prev = m;
4582 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4583 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4584 		}
4585 		sctp_sballoc(stcb, sb, m);
4586 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4587 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4588 		}
4589 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4590 		m = SCTP_BUF_NEXT(m);
4591 	}
4592 	if (prev != NULL) {
4593 		control->tail_mbuf = prev;
4594 	} else {
4595 		/* Everything got collapsed out?? */
4596 		sctp_free_remote_addr(control->whoFrom);
4597 		sctp_free_a_readq(stcb, control);
4598 		if (inp_read_lock_held == 0)
4599 			SCTP_INP_READ_UNLOCK(inp);
4600 		return;
4601 	}
4602 	if (end) {
4603 		control->end_added = 1;
4604 	}
4605 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4606 	control->on_read_q = 1;
4607 	if (inp_read_lock_held == 0)
4608 		SCTP_INP_READ_UNLOCK(inp);
4609 	if (inp && inp->sctp_socket) {
4610 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4611 	}
4612 }
4613 
4614 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4615  *************ALTERNATE ROUTING CODE
4616  */
4617 
4618 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4619  *************ALTERNATE ROUTING CODE
4620  */
4621 
4622 struct mbuf *
4623 sctp_generate_cause(uint16_t code, char *info)
4624 {
4625 	struct mbuf *m;
4626 	struct sctp_gen_error_cause *cause;
4627 	size_t info_len;
4628 	uint16_t len;
4629 
4630 	if ((code == 0) || (info == NULL)) {
4631 		return (NULL);
4632 	}
4633 	info_len = strlen(info);
4634 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4635 		return (NULL);
4636 	}
4637 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4638 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4639 	if (m != NULL) {
4640 		SCTP_BUF_LEN(m) = len;
4641 		cause = mtod(m, struct sctp_gen_error_cause *);
4642 		cause->code = htons(code);
4643 		cause->length = htons(len);
4644 		memcpy(cause->info, info, info_len);
4645 	}
4646 	return (m);
4647 }
4648 
4649 struct mbuf *
4650 sctp_generate_no_user_data_cause(uint32_t tsn)
4651 {
4652 	struct mbuf *m;
4653 	struct sctp_error_no_user_data *no_user_data_cause;
4654 	uint16_t len;
4655 
4656 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4657 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4658 	if (m != NULL) {
4659 		SCTP_BUF_LEN(m) = len;
4660 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4661 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4662 		no_user_data_cause->cause.length = htons(len);
4663 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4664 	}
4665 	return (m);
4666 }
4667 
4668 #ifdef SCTP_MBCNT_LOGGING
4669 void
4670 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4671     struct sctp_tmit_chunk *tp1, int chk_cnt)
4672 {
4673 	if (tp1->data == NULL) {
4674 		return;
4675 	}
4676 	asoc->chunks_on_out_queue -= chk_cnt;
4677 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4678 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4679 		    asoc->total_output_queue_size,
4680 		    tp1->book_size,
4681 		    0,
4682 		    tp1->mbcnt);
4683 	}
4684 	if (asoc->total_output_queue_size >= tp1->book_size) {
4685 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4686 	} else {
4687 		asoc->total_output_queue_size = 0;
4688 	}
4689 
4690 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4691 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4692 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4693 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4694 		} else {
4695 			stcb->sctp_socket->so_snd.sb_cc = 0;
4696 
4697 		}
4698 	}
4699 }
4700 
4701 #endif
4702 
4703 int
4704 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4705     uint8_t sent, int so_locked
4706 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4707     SCTP_UNUSED
4708 #endif
4709 )
4710 {
4711 	struct sctp_stream_out *strq;
4712 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4713 	struct sctp_stream_queue_pending *sp;
4714 	uint16_t stream = 0, seq = 0;
4715 	uint8_t foundeom = 0;
4716 	int ret_sz = 0;
4717 	int notdone;
4718 	int do_wakeup_routine = 0;
4719 
4720 	stream = tp1->rec.data.stream_number;
4721 	seq = tp1->rec.data.stream_seq;
4722 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4723 		stcb->asoc.abandoned_sent[0]++;
4724 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4725 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4726 #if defined(SCTP_DETAILED_STR_STATS)
4727 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4728 #endif
4729 	} else {
4730 		stcb->asoc.abandoned_unsent[0]++;
4731 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4732 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4733 #if defined(SCTP_DETAILED_STR_STATS)
4734 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4735 #endif
4736 	}
4737 	do {
4738 		ret_sz += tp1->book_size;
4739 		if (tp1->data != NULL) {
4740 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4741 				sctp_flight_size_decrease(tp1);
4742 				sctp_total_flight_decrease(stcb, tp1);
4743 			}
4744 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4745 			stcb->asoc.peers_rwnd += tp1->send_size;
4746 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4747 			if (sent) {
4748 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4749 			} else {
4750 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4751 			}
4752 			if (tp1->data) {
4753 				sctp_m_freem(tp1->data);
4754 				tp1->data = NULL;
4755 			}
4756 			do_wakeup_routine = 1;
4757 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4758 				stcb->asoc.sent_queue_cnt_removeable--;
4759 			}
4760 		}
4761 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4762 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4763 		    SCTP_DATA_NOT_FRAG) {
4764 			/* not frag'ed we ae done   */
4765 			notdone = 0;
4766 			foundeom = 1;
4767 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4768 			/* end of frag, we are done */
4769 			notdone = 0;
4770 			foundeom = 1;
4771 		} else {
4772 			/*
4773 			 * Its a begin or middle piece, we must mark all of
4774 			 * it
4775 			 */
4776 			notdone = 1;
4777 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4778 		}
4779 	} while (tp1 && notdone);
4780 	if (foundeom == 0) {
4781 		/*
4782 		 * The multi-part message was scattered across the send and
4783 		 * sent queue.
4784 		 */
4785 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4786 			if ((tp1->rec.data.stream_number != stream) ||
4787 			    (tp1->rec.data.stream_seq != seq)) {
4788 				break;
4789 			}
4790 			/*
4791 			 * save to chk in case we have some on stream out
4792 			 * queue. If so and we have an un-transmitted one we
4793 			 * don't have to fudge the TSN.
4794 			 */
4795 			chk = tp1;
4796 			ret_sz += tp1->book_size;
4797 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4798 			if (sent) {
4799 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4800 			} else {
4801 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4802 			}
4803 			if (tp1->data) {
4804 				sctp_m_freem(tp1->data);
4805 				tp1->data = NULL;
4806 			}
4807 			/* No flight involved here book the size to 0 */
4808 			tp1->book_size = 0;
4809 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4810 				foundeom = 1;
4811 			}
4812 			do_wakeup_routine = 1;
4813 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4814 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4815 			/*
4816 			 * on to the sent queue so we can wait for it to be
4817 			 * passed by.
4818 			 */
4819 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4820 			    sctp_next);
4821 			stcb->asoc.send_queue_cnt--;
4822 			stcb->asoc.sent_queue_cnt++;
4823 		}
4824 	}
4825 	if (foundeom == 0) {
4826 		/*
4827 		 * Still no eom found. That means there is stuff left on the
4828 		 * stream out queue.. yuck.
4829 		 */
4830 		SCTP_TCB_SEND_LOCK(stcb);
4831 		strq = &stcb->asoc.strmout[stream];
4832 		sp = TAILQ_FIRST(&strq->outqueue);
4833 		if (sp != NULL) {
4834 			sp->discard_rest = 1;
4835 			/*
4836 			 * We may need to put a chunk on the queue that
4837 			 * holds the TSN that would have been sent with the
4838 			 * LAST bit.
4839 			 */
4840 			if (chk == NULL) {
4841 				/* Yep, we have to */
4842 				sctp_alloc_a_chunk(stcb, chk);
4843 				if (chk == NULL) {
4844 					/*
4845 					 * we are hosed. All we can do is
4846 					 * nothing.. which will cause an
4847 					 * abort if the peer is paying
4848 					 * attention.
4849 					 */
4850 					goto oh_well;
4851 				}
4852 				memset(chk, 0, sizeof(*chk));
4853 				chk->rec.data.rcv_flags = 0;
4854 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4855 				chk->asoc = &stcb->asoc;
4856 				if (stcb->asoc.idata_supported == 0) {
4857 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4858 						chk->rec.data.stream_seq = 0;
4859 					} else {
4860 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4861 					}
4862 				} else {
4863 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4864 						chk->rec.data.stream_seq = strq->next_mid_unordered;
4865 					} else {
4866 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4867 					}
4868 				}
4869 				chk->rec.data.stream_number = sp->stream;
4870 				chk->rec.data.payloadtype = sp->ppid;
4871 				chk->rec.data.context = sp->context;
4872 				chk->flags = sp->act_flags;
4873 				chk->whoTo = NULL;
4874 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4875 				strq->chunks_on_queues++;
4876 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4877 				stcb->asoc.sent_queue_cnt++;
4878 				stcb->asoc.pr_sctp_cnt++;
4879 			}
4880 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4881 			if (stcb->asoc.idata_supported == 0) {
4882 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4883 					strq->next_mid_ordered++;
4884 				}
4885 			} else {
4886 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4887 					strq->next_mid_unordered++;
4888 				} else {
4889 					strq->next_mid_ordered++;
4890 				}
4891 			}
4892 	oh_well:
4893 			if (sp->data) {
4894 				/*
4895 				 * Pull any data to free up the SB and allow
4896 				 * sender to "add more" while we will throw
4897 				 * away :-)
4898 				 */
4899 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4900 				ret_sz += sp->length;
4901 				do_wakeup_routine = 1;
4902 				sp->some_taken = 1;
4903 				sctp_m_freem(sp->data);
4904 				sp->data = NULL;
4905 				sp->tail_mbuf = NULL;
4906 				sp->length = 0;
4907 			}
4908 		}
4909 		SCTP_TCB_SEND_UNLOCK(stcb);
4910 	}
4911 	if (do_wakeup_routine) {
4912 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4913 		struct socket *so;
4914 
4915 		so = SCTP_INP_SO(stcb->sctp_ep);
4916 		if (!so_locked) {
4917 			atomic_add_int(&stcb->asoc.refcnt, 1);
4918 			SCTP_TCB_UNLOCK(stcb);
4919 			SCTP_SOCKET_LOCK(so, 1);
4920 			SCTP_TCB_LOCK(stcb);
4921 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4922 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4923 				/* assoc was freed while we were unlocked */
4924 				SCTP_SOCKET_UNLOCK(so, 1);
4925 				return (ret_sz);
4926 			}
4927 		}
4928 #endif
4929 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4930 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4931 		if (!so_locked) {
4932 			SCTP_SOCKET_UNLOCK(so, 1);
4933 		}
4934 #endif
4935 	}
4936 	return (ret_sz);
4937 }
4938 
4939 /*
4940  * checks to see if the given address, sa, is one that is currently known by
4941  * the kernel note: can't distinguish the same address on multiple interfaces
4942  * and doesn't handle multiple addresses with different zone/scope id's note:
4943  * ifa_ifwithaddr() compares the entire sockaddr struct
4944  */
4945 struct sctp_ifa *
4946 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4947     int holds_lock)
4948 {
4949 	struct sctp_laddr *laddr;
4950 
4951 	if (holds_lock == 0) {
4952 		SCTP_INP_RLOCK(inp);
4953 	}
4954 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4955 		if (laddr->ifa == NULL)
4956 			continue;
4957 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4958 			continue;
4959 #ifdef INET
4960 		if (addr->sa_family == AF_INET) {
4961 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4962 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4963 				/* found him. */
4964 				if (holds_lock == 0) {
4965 					SCTP_INP_RUNLOCK(inp);
4966 				}
4967 				return (laddr->ifa);
4968 				break;
4969 			}
4970 		}
4971 #endif
4972 #ifdef INET6
4973 		if (addr->sa_family == AF_INET6) {
4974 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4975 			    &laddr->ifa->address.sin6)) {
4976 				/* found him. */
4977 				if (holds_lock == 0) {
4978 					SCTP_INP_RUNLOCK(inp);
4979 				}
4980 				return (laddr->ifa);
4981 				break;
4982 			}
4983 		}
4984 #endif
4985 	}
4986 	if (holds_lock == 0) {
4987 		SCTP_INP_RUNLOCK(inp);
4988 	}
4989 	return (NULL);
4990 }
4991 
4992 uint32_t
4993 sctp_get_ifa_hash_val(struct sockaddr *addr)
4994 {
4995 	switch (addr->sa_family) {
4996 #ifdef INET
4997 	case AF_INET:
4998 		{
4999 			struct sockaddr_in *sin;
5000 
5001 			sin = (struct sockaddr_in *)addr;
5002 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5003 		}
5004 #endif
5005 #ifdef INET6
5006 	case AF_INET6:
5007 		{
5008 			struct sockaddr_in6 *sin6;
5009 			uint32_t hash_of_addr;
5010 
5011 			sin6 = (struct sockaddr_in6 *)addr;
5012 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5013 			    sin6->sin6_addr.s6_addr32[1] +
5014 			    sin6->sin6_addr.s6_addr32[2] +
5015 			    sin6->sin6_addr.s6_addr32[3]);
5016 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5017 			return (hash_of_addr);
5018 		}
5019 #endif
5020 	default:
5021 		break;
5022 	}
5023 	return (0);
5024 }
5025 
5026 struct sctp_ifa *
5027 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5028 {
5029 	struct sctp_ifa *sctp_ifap;
5030 	struct sctp_vrf *vrf;
5031 	struct sctp_ifalist *hash_head;
5032 	uint32_t hash_of_addr;
5033 
5034 	if (holds_lock == 0)
5035 		SCTP_IPI_ADDR_RLOCK();
5036 
5037 	vrf = sctp_find_vrf(vrf_id);
5038 	if (vrf == NULL) {
5039 		if (holds_lock == 0)
5040 			SCTP_IPI_ADDR_RUNLOCK();
5041 		return (NULL);
5042 	}
5043 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5044 
5045 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5046 	if (hash_head == NULL) {
5047 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5048 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5049 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5050 		sctp_print_address(addr);
5051 		SCTP_PRINTF("No such bucket for address\n");
5052 		if (holds_lock == 0)
5053 			SCTP_IPI_ADDR_RUNLOCK();
5054 
5055 		return (NULL);
5056 	}
5057 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5058 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5059 			continue;
5060 #ifdef INET
5061 		if (addr->sa_family == AF_INET) {
5062 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5063 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5064 				/* found him. */
5065 				if (holds_lock == 0)
5066 					SCTP_IPI_ADDR_RUNLOCK();
5067 				return (sctp_ifap);
5068 				break;
5069 			}
5070 		}
5071 #endif
5072 #ifdef INET6
5073 		if (addr->sa_family == AF_INET6) {
5074 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5075 			    &sctp_ifap->address.sin6)) {
5076 				/* found him. */
5077 				if (holds_lock == 0)
5078 					SCTP_IPI_ADDR_RUNLOCK();
5079 				return (sctp_ifap);
5080 				break;
5081 			}
5082 		}
5083 #endif
5084 	}
5085 	if (holds_lock == 0)
5086 		SCTP_IPI_ADDR_RUNLOCK();
5087 	return (NULL);
5088 }
5089 
5090 static void
5091 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5092     uint32_t rwnd_req)
5093 {
5094 	/* User pulled some data, do we need a rwnd update? */
5095 	int r_unlocked = 0;
5096 	uint32_t dif, rwnd;
5097 	struct socket *so = NULL;
5098 
5099 	if (stcb == NULL)
5100 		return;
5101 
5102 	atomic_add_int(&stcb->asoc.refcnt, 1);
5103 
5104 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5105 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5106 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5107 		/* Pre-check If we are freeing no update */
5108 		goto no_lock;
5109 	}
5110 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5111 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5112 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5113 		goto out;
5114 	}
5115 	so = stcb->sctp_socket;
5116 	if (so == NULL) {
5117 		goto out;
5118 	}
5119 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5120 	/* Have you have freed enough to look */
5121 	*freed_so_far = 0;
5122 	/* Yep, its worth a look and the lock overhead */
5123 
5124 	/* Figure out what the rwnd would be */
5125 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5126 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5127 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5128 	} else {
5129 		dif = 0;
5130 	}
5131 	if (dif >= rwnd_req) {
5132 		if (hold_rlock) {
5133 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5134 			r_unlocked = 1;
5135 		}
5136 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5137 			/*
5138 			 * One last check before we allow the guy possibly
5139 			 * to get in. There is a race, where the guy has not
5140 			 * reached the gate. In that case
5141 			 */
5142 			goto out;
5143 		}
5144 		SCTP_TCB_LOCK(stcb);
5145 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5146 			/* No reports here */
5147 			SCTP_TCB_UNLOCK(stcb);
5148 			goto out;
5149 		}
5150 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5151 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5152 
5153 		sctp_chunk_output(stcb->sctp_ep, stcb,
5154 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5155 		/* make sure no timer is running */
5156 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5157 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5158 		SCTP_TCB_UNLOCK(stcb);
5159 	} else {
5160 		/* Update how much we have pending */
5161 		stcb->freed_by_sorcv_sincelast = dif;
5162 	}
5163 out:
5164 	if (so && r_unlocked && hold_rlock) {
5165 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5166 	}
5167 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5168 no_lock:
5169 	atomic_add_int(&stcb->asoc.refcnt, -1);
5170 	return;
5171 }
5172 
5173 int
5174 sctp_sorecvmsg(struct socket *so,
5175     struct uio *uio,
5176     struct mbuf **mp,
5177     struct sockaddr *from,
5178     int fromlen,
5179     int *msg_flags,
5180     struct sctp_sndrcvinfo *sinfo,
5181     int filling_sinfo)
5182 {
5183 	/*
5184 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5185 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5186 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5187 	 * On the way out we may send out any combination of:
5188 	 * MSG_NOTIFICATION MSG_EOR
5189 	 *
5190 	 */
5191 	struct sctp_inpcb *inp = NULL;
5192 	int my_len = 0;
5193 	int cp_len = 0, error = 0;
5194 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5195 	struct mbuf *m = NULL;
5196 	struct sctp_tcb *stcb = NULL;
5197 	int wakeup_read_socket = 0;
5198 	int freecnt_applied = 0;
5199 	int out_flags = 0, in_flags = 0;
5200 	int block_allowed = 1;
5201 	uint32_t freed_so_far = 0;
5202 	uint32_t copied_so_far = 0;
5203 	int in_eeor_mode = 0;
5204 	int no_rcv_needed = 0;
5205 	uint32_t rwnd_req = 0;
5206 	int hold_sblock = 0;
5207 	int hold_rlock = 0;
5208 	ssize_t slen = 0;
5209 	uint32_t held_length = 0;
5210 	int sockbuf_lock = 0;
5211 
5212 	if (uio == NULL) {
5213 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5214 		return (EINVAL);
5215 	}
5216 	if (msg_flags) {
5217 		in_flags = *msg_flags;
5218 		if (in_flags & MSG_PEEK)
5219 			SCTP_STAT_INCR(sctps_read_peeks);
5220 	} else {
5221 		in_flags = 0;
5222 	}
5223 	slen = uio->uio_resid;
5224 
5225 	/* Pull in and set up our int flags */
5226 	if (in_flags & MSG_OOB) {
5227 		/* Out of band's NOT supported */
5228 		return (EOPNOTSUPP);
5229 	}
5230 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5231 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5232 		return (EINVAL);
5233 	}
5234 	if ((in_flags & (MSG_DONTWAIT
5235 	    | MSG_NBIO
5236 	    )) ||
5237 	    SCTP_SO_IS_NBIO(so)) {
5238 		block_allowed = 0;
5239 	}
5240 	/* setup the endpoint */
5241 	inp = (struct sctp_inpcb *)so->so_pcb;
5242 	if (inp == NULL) {
5243 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5244 		return (EFAULT);
5245 	}
5246 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5247 	/* Must be at least a MTU's worth */
5248 	if (rwnd_req < SCTP_MIN_RWND)
5249 		rwnd_req = SCTP_MIN_RWND;
5250 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5251 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5252 		sctp_misc_ints(SCTP_SORECV_ENTER,
5253 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5254 	}
5255 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5256 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5257 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5258 	}
5259 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5260 	if (error) {
5261 		goto release_unlocked;
5262 	}
5263 	sockbuf_lock = 1;
5264 restart:
5265 
5266 
5267 restart_nosblocks:
5268 	if (hold_sblock == 0) {
5269 		SOCKBUF_LOCK(&so->so_rcv);
5270 		hold_sblock = 1;
5271 	}
5272 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5273 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5274 		goto out;
5275 	}
5276 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5277 		if (so->so_error) {
5278 			error = so->so_error;
5279 			if ((in_flags & MSG_PEEK) == 0)
5280 				so->so_error = 0;
5281 			goto out;
5282 		} else {
5283 			if (so->so_rcv.sb_cc == 0) {
5284 				/* indicate EOF */
5285 				error = 0;
5286 				goto out;
5287 			}
5288 		}
5289 	}
5290 	if (so->so_rcv.sb_cc <= held_length) {
5291 		if (so->so_error) {
5292 			error = so->so_error;
5293 			if ((in_flags & MSG_PEEK) == 0) {
5294 				so->so_error = 0;
5295 			}
5296 			goto out;
5297 		}
5298 		if ((so->so_rcv.sb_cc == 0) &&
5299 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5300 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5301 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5302 				/*
5303 				 * For active open side clear flags for
5304 				 * re-use passive open is blocked by
5305 				 * connect.
5306 				 */
5307 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5308 					/*
5309 					 * You were aborted, passive side
5310 					 * always hits here
5311 					 */
5312 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5313 					error = ECONNRESET;
5314 				}
5315 				so->so_state &= ~(SS_ISCONNECTING |
5316 				    SS_ISDISCONNECTING |
5317 				    SS_ISCONFIRMING |
5318 				    SS_ISCONNECTED);
5319 				if (error == 0) {
5320 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5321 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5322 						error = ENOTCONN;
5323 					}
5324 				}
5325 				goto out;
5326 			}
5327 		}
5328 		if (block_allowed) {
5329 			error = sbwait(&so->so_rcv);
5330 			if (error) {
5331 				goto out;
5332 			}
5333 			held_length = 0;
5334 			goto restart_nosblocks;
5335 		} else {
5336 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5337 			error = EWOULDBLOCK;
5338 			goto out;
5339 		}
5340 	}
5341 	if (hold_sblock == 1) {
5342 		SOCKBUF_UNLOCK(&so->so_rcv);
5343 		hold_sblock = 0;
5344 	}
5345 	/* we possibly have data we can read */
5346 	/* sa_ignore FREED_MEMORY */
5347 	control = TAILQ_FIRST(&inp->read_queue);
5348 	if (control == NULL) {
5349 		/*
5350 		 * This could be happening since the appender did the
5351 		 * increment but as not yet did the tailq insert onto the
5352 		 * read_queue
5353 		 */
5354 		if (hold_rlock == 0) {
5355 			SCTP_INP_READ_LOCK(inp);
5356 		}
5357 		control = TAILQ_FIRST(&inp->read_queue);
5358 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5359 #ifdef INVARIANTS
5360 			panic("Huh, its non zero and nothing on control?");
5361 #endif
5362 			so->so_rcv.sb_cc = 0;
5363 		}
5364 		SCTP_INP_READ_UNLOCK(inp);
5365 		hold_rlock = 0;
5366 		goto restart;
5367 	}
5368 	if ((control->length == 0) &&
5369 	    (control->do_not_ref_stcb)) {
5370 		/*
5371 		 * Clean up code for freeing assoc that left behind a
5372 		 * pdapi.. maybe a peer in EEOR that just closed after
5373 		 * sending and never indicated a EOR.
5374 		 */
5375 		if (hold_rlock == 0) {
5376 			hold_rlock = 1;
5377 			SCTP_INP_READ_LOCK(inp);
5378 		}
5379 		control->held_length = 0;
5380 		if (control->data) {
5381 			/* Hmm there is data here .. fix */
5382 			struct mbuf *m_tmp;
5383 			int cnt = 0;
5384 
5385 			m_tmp = control->data;
5386 			while (m_tmp) {
5387 				cnt += SCTP_BUF_LEN(m_tmp);
5388 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5389 					control->tail_mbuf = m_tmp;
5390 					control->end_added = 1;
5391 				}
5392 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5393 			}
5394 			control->length = cnt;
5395 		} else {
5396 			/* remove it */
5397 			TAILQ_REMOVE(&inp->read_queue, control, next);
5398 			/* Add back any hiddend data */
5399 			sctp_free_remote_addr(control->whoFrom);
5400 			sctp_free_a_readq(stcb, control);
5401 		}
5402 		if (hold_rlock) {
5403 			hold_rlock = 0;
5404 			SCTP_INP_READ_UNLOCK(inp);
5405 		}
5406 		goto restart;
5407 	}
5408 	if ((control->length == 0) &&
5409 	    (control->end_added == 1)) {
5410 		/*
5411 		 * Do we also need to check for (control->pdapi_aborted ==
5412 		 * 1)?
5413 		 */
5414 		if (hold_rlock == 0) {
5415 			hold_rlock = 1;
5416 			SCTP_INP_READ_LOCK(inp);
5417 		}
5418 		TAILQ_REMOVE(&inp->read_queue, control, next);
5419 		if (control->data) {
5420 #ifdef INVARIANTS
5421 			panic("control->data not null but control->length == 0");
5422 #else
5423 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5424 			sctp_m_freem(control->data);
5425 			control->data = NULL;
5426 #endif
5427 		}
5428 		if (control->aux_data) {
5429 			sctp_m_free(control->aux_data);
5430 			control->aux_data = NULL;
5431 		}
5432 #ifdef INVARIANTS
5433 		if (control->on_strm_q) {
5434 			panic("About to free ctl:%p so:%p and its in %d",
5435 			    control, so, control->on_strm_q);
5436 		}
5437 #endif
5438 		sctp_free_remote_addr(control->whoFrom);
5439 		sctp_free_a_readq(stcb, control);
5440 		if (hold_rlock) {
5441 			hold_rlock = 0;
5442 			SCTP_INP_READ_UNLOCK(inp);
5443 		}
5444 		goto restart;
5445 	}
5446 	if (control->length == 0) {
5447 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5448 		    (filling_sinfo)) {
5449 			/* find a more suitable one then this */
5450 			ctl = TAILQ_NEXT(control, next);
5451 			while (ctl) {
5452 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5453 				    (ctl->some_taken ||
5454 				    (ctl->spec_flags & M_NOTIFICATION) ||
5455 				    ((ctl->do_not_ref_stcb == 0) &&
5456 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5457 				    ) {
5458 					/*-
5459 					 * If we have a different TCB next, and there is data
5460 					 * present. If we have already taken some (pdapi), OR we can
5461 					 * ref the tcb and no delivery as started on this stream, we
5462 					 * take it. Note we allow a notification on a different
5463 					 * assoc to be delivered..
5464 					 */
5465 					control = ctl;
5466 					goto found_one;
5467 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5468 					    (ctl->length) &&
5469 					    ((ctl->some_taken) ||
5470 					    ((ctl->do_not_ref_stcb == 0) &&
5471 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5472 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5473 					/*-
5474 					 * If we have the same tcb, and there is data present, and we
5475 					 * have the strm interleave feature present. Then if we have
5476 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5477 					 * not started a delivery for this stream, we can take it.
5478 					 * Note we do NOT allow a notificaiton on the same assoc to
5479 					 * be delivered.
5480 					 */
5481 					control = ctl;
5482 					goto found_one;
5483 				}
5484 				ctl = TAILQ_NEXT(ctl, next);
5485 			}
5486 		}
5487 		/*
5488 		 * if we reach here, not suitable replacement is available
5489 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5490 		 * into the our held count, and its time to sleep again.
5491 		 */
5492 		held_length = so->so_rcv.sb_cc;
5493 		control->held_length = so->so_rcv.sb_cc;
5494 		goto restart;
5495 	}
5496 	/* Clear the held length since there is something to read */
5497 	control->held_length = 0;
5498 	if (hold_rlock) {
5499 		SCTP_INP_READ_UNLOCK(inp);
5500 		hold_rlock = 0;
5501 	}
5502 found_one:
5503 	/*
5504 	 * If we reach here, control has a some data for us to read off.
5505 	 * Note that stcb COULD be NULL.
5506 	 */
5507 	control->some_taken++;
5508 	if (hold_sblock) {
5509 		SOCKBUF_UNLOCK(&so->so_rcv);
5510 		hold_sblock = 0;
5511 	}
5512 	stcb = control->stcb;
5513 	if (stcb) {
5514 		if ((control->do_not_ref_stcb == 0) &&
5515 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5516 			if (freecnt_applied == 0)
5517 				stcb = NULL;
5518 		} else if (control->do_not_ref_stcb == 0) {
5519 			/* you can't free it on me please */
5520 			/*
5521 			 * The lock on the socket buffer protects us so the
5522 			 * free code will stop. But since we used the
5523 			 * socketbuf lock and the sender uses the tcb_lock
5524 			 * to increment, we need to use the atomic add to
5525 			 * the refcnt
5526 			 */
5527 			if (freecnt_applied) {
5528 #ifdef INVARIANTS
5529 				panic("refcnt already incremented");
5530 #else
5531 				SCTP_PRINTF("refcnt already incremented?\n");
5532 #endif
5533 			} else {
5534 				atomic_add_int(&stcb->asoc.refcnt, 1);
5535 				freecnt_applied = 1;
5536 			}
5537 			/*
5538 			 * Setup to remember how much we have not yet told
5539 			 * the peer our rwnd has opened up. Note we grab the
5540 			 * value from the tcb from last time. Note too that
5541 			 * sack sending clears this when a sack is sent,
5542 			 * which is fine. Once we hit the rwnd_req, we then
5543 			 * will go to the sctp_user_rcvd() that will not
5544 			 * lock until it KNOWs it MUST send a WUP-SACK.
5545 			 */
5546 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5547 			stcb->freed_by_sorcv_sincelast = 0;
5548 		}
5549 	}
5550 	if (stcb &&
5551 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5552 	    control->do_not_ref_stcb == 0) {
5553 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5554 	}
5555 	/* First lets get off the sinfo and sockaddr info */
5556 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5557 		sinfo->sinfo_stream = control->sinfo_stream;
5558 		sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn;
5559 		sinfo->sinfo_flags = control->sinfo_flags;
5560 		sinfo->sinfo_ppid = control->sinfo_ppid;
5561 		sinfo->sinfo_context = control->sinfo_context;
5562 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5563 		sinfo->sinfo_tsn = control->sinfo_tsn;
5564 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5565 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5566 		nxt = TAILQ_NEXT(control, next);
5567 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5568 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5569 			struct sctp_extrcvinfo *s_extra;
5570 
5571 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5572 			if ((nxt) &&
5573 			    (nxt->length)) {
5574 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5575 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5576 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5577 				}
5578 				if (nxt->spec_flags & M_NOTIFICATION) {
5579 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5580 				}
5581 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5582 				s_extra->serinfo_next_length = nxt->length;
5583 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5584 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5585 				if (nxt->tail_mbuf != NULL) {
5586 					if (nxt->end_added) {
5587 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5588 					}
5589 				}
5590 			} else {
5591 				/*
5592 				 * we explicitly 0 this, since the memcpy
5593 				 * got some other things beyond the older
5594 				 * sinfo_ that is on the control's structure
5595 				 * :-D
5596 				 */
5597 				nxt = NULL;
5598 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5599 				s_extra->serinfo_next_aid = 0;
5600 				s_extra->serinfo_next_length = 0;
5601 				s_extra->serinfo_next_ppid = 0;
5602 				s_extra->serinfo_next_stream = 0;
5603 			}
5604 		}
5605 		/*
5606 		 * update off the real current cum-ack, if we have an stcb.
5607 		 */
5608 		if ((control->do_not_ref_stcb == 0) && stcb)
5609 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5610 		/*
5611 		 * mask off the high bits, we keep the actual chunk bits in
5612 		 * there.
5613 		 */
5614 		sinfo->sinfo_flags &= 0x00ff;
5615 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5616 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5617 		}
5618 	}
5619 #ifdef SCTP_ASOCLOG_OF_TSNS
5620 	{
5621 		int index, newindex;
5622 		struct sctp_pcbtsn_rlog *entry;
5623 
5624 		do {
5625 			index = inp->readlog_index;
5626 			newindex = index + 1;
5627 			if (newindex >= SCTP_READ_LOG_SIZE) {
5628 				newindex = 0;
5629 			}
5630 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5631 		entry = &inp->readlog[index];
5632 		entry->vtag = control->sinfo_assoc_id;
5633 		entry->strm = control->sinfo_stream;
5634 		entry->seq = control->sinfo_ssn;
5635 		entry->sz = control->length;
5636 		entry->flgs = control->sinfo_flags;
5637 	}
5638 #endif
5639 	if ((fromlen > 0) && (from != NULL)) {
5640 		union sctp_sockstore store;
5641 		size_t len;
5642 
5643 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5644 #ifdef INET6
5645 		case AF_INET6:
5646 			len = sizeof(struct sockaddr_in6);
5647 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5648 			store.sin6.sin6_port = control->port_from;
5649 			break;
5650 #endif
5651 #ifdef INET
5652 		case AF_INET:
5653 #ifdef INET6
5654 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5655 				len = sizeof(struct sockaddr_in6);
5656 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5657 				    &store.sin6);
5658 				store.sin6.sin6_port = control->port_from;
5659 			} else {
5660 				len = sizeof(struct sockaddr_in);
5661 				store.sin = control->whoFrom->ro._l_addr.sin;
5662 				store.sin.sin_port = control->port_from;
5663 			}
5664 #else
5665 			len = sizeof(struct sockaddr_in);
5666 			store.sin = control->whoFrom->ro._l_addr.sin;
5667 			store.sin.sin_port = control->port_from;
5668 #endif
5669 			break;
5670 #endif
5671 		default:
5672 			len = 0;
5673 			break;
5674 		}
5675 		memcpy(from, &store, min((size_t)fromlen, len));
5676 #ifdef INET6
5677 		{
5678 			struct sockaddr_in6 lsa6, *from6;
5679 
5680 			from6 = (struct sockaddr_in6 *)from;
5681 			sctp_recover_scope_mac(from6, (&lsa6));
5682 		}
5683 #endif
5684 	}
5685 	/* now copy out what data we can */
5686 	if (mp == NULL) {
5687 		/* copy out each mbuf in the chain up to length */
5688 get_more_data:
5689 		m = control->data;
5690 		while (m) {
5691 			/* Move out all we can */
5692 			cp_len = (int)uio->uio_resid;
5693 			my_len = (int)SCTP_BUF_LEN(m);
5694 			if (cp_len > my_len) {
5695 				/* not enough in this buf */
5696 				cp_len = my_len;
5697 			}
5698 			if (hold_rlock) {
5699 				SCTP_INP_READ_UNLOCK(inp);
5700 				hold_rlock = 0;
5701 			}
5702 			if (cp_len > 0)
5703 				error = uiomove(mtod(m, char *), cp_len, uio);
5704 			/* re-read */
5705 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5706 				goto release;
5707 			}
5708 			if ((control->do_not_ref_stcb == 0) && stcb &&
5709 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5710 				no_rcv_needed = 1;
5711 			}
5712 			if (error) {
5713 				/* error we are out of here */
5714 				goto release;
5715 			}
5716 			SCTP_INP_READ_LOCK(inp);
5717 			hold_rlock = 1;
5718 			if (cp_len == SCTP_BUF_LEN(m)) {
5719 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5720 				    (control->end_added)) {
5721 					out_flags |= MSG_EOR;
5722 					if ((control->do_not_ref_stcb == 0) &&
5723 					    (control->stcb != NULL) &&
5724 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5725 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5726 				}
5727 				if (control->spec_flags & M_NOTIFICATION) {
5728 					out_flags |= MSG_NOTIFICATION;
5729 				}
5730 				/* we ate up the mbuf */
5731 				if (in_flags & MSG_PEEK) {
5732 					/* just looking */
5733 					m = SCTP_BUF_NEXT(m);
5734 					copied_so_far += cp_len;
5735 				} else {
5736 					/* dispose of the mbuf */
5737 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5738 						sctp_sblog(&so->so_rcv,
5739 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5740 					}
5741 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5742 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5743 						sctp_sblog(&so->so_rcv,
5744 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5745 					}
5746 					copied_so_far += cp_len;
5747 					freed_so_far += cp_len;
5748 					freed_so_far += MSIZE;
5749 					atomic_subtract_int(&control->length, cp_len);
5750 					control->data = sctp_m_free(m);
5751 					m = control->data;
5752 					/*
5753 					 * been through it all, must hold sb
5754 					 * lock ok to null tail
5755 					 */
5756 					if (control->data == NULL) {
5757 #ifdef INVARIANTS
5758 						if ((control->end_added == 0) ||
5759 						    (TAILQ_NEXT(control, next) == NULL)) {
5760 							/*
5761 							 * If the end is not
5762 							 * added, OR the
5763 							 * next is NOT null
5764 							 * we MUST have the
5765 							 * lock.
5766 							 */
5767 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5768 								panic("Hmm we don't own the lock?");
5769 							}
5770 						}
5771 #endif
5772 						control->tail_mbuf = NULL;
5773 #ifdef INVARIANTS
5774 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5775 							panic("end_added, nothing left and no MSG_EOR");
5776 						}
5777 #endif
5778 					}
5779 				}
5780 			} else {
5781 				/* Do we need to trim the mbuf? */
5782 				if (control->spec_flags & M_NOTIFICATION) {
5783 					out_flags |= MSG_NOTIFICATION;
5784 				}
5785 				if ((in_flags & MSG_PEEK) == 0) {
5786 					SCTP_BUF_RESV_UF(m, cp_len);
5787 					SCTP_BUF_LEN(m) -= cp_len;
5788 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5789 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5790 					}
5791 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5792 					if ((control->do_not_ref_stcb == 0) &&
5793 					    stcb) {
5794 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5795 					}
5796 					copied_so_far += cp_len;
5797 					freed_so_far += cp_len;
5798 					freed_so_far += MSIZE;
5799 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5800 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5801 						    SCTP_LOG_SBRESULT, 0);
5802 					}
5803 					atomic_subtract_int(&control->length, cp_len);
5804 				} else {
5805 					copied_so_far += cp_len;
5806 				}
5807 			}
5808 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5809 				break;
5810 			}
5811 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5812 			    (control->do_not_ref_stcb == 0) &&
5813 			    (freed_so_far >= rwnd_req)) {
5814 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5815 			}
5816 		}		/* end while(m) */
5817 		/*
5818 		 * At this point we have looked at it all and we either have
5819 		 * a MSG_EOR/or read all the user wants... <OR>
5820 		 * control->length == 0.
5821 		 */
5822 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5823 			/* we are done with this control */
5824 			if (control->length == 0) {
5825 				if (control->data) {
5826 #ifdef INVARIANTS
5827 					panic("control->data not null at read eor?");
5828 #else
5829 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5830 					sctp_m_freem(control->data);
5831 					control->data = NULL;
5832 #endif
5833 				}
5834 		done_with_control:
5835 				if (hold_rlock == 0) {
5836 					SCTP_INP_READ_LOCK(inp);
5837 					hold_rlock = 1;
5838 				}
5839 				TAILQ_REMOVE(&inp->read_queue, control, next);
5840 				/* Add back any hiddend data */
5841 				if (control->held_length) {
5842 					held_length = 0;
5843 					control->held_length = 0;
5844 					wakeup_read_socket = 1;
5845 				}
5846 				if (control->aux_data) {
5847 					sctp_m_free(control->aux_data);
5848 					control->aux_data = NULL;
5849 				}
5850 				no_rcv_needed = control->do_not_ref_stcb;
5851 				sctp_free_remote_addr(control->whoFrom);
5852 				control->data = NULL;
5853 #ifdef INVARIANTS
5854 				if (control->on_strm_q) {
5855 					panic("About to free ctl:%p so:%p and its in %d",
5856 					    control, so, control->on_strm_q);
5857 				}
5858 #endif
5859 				sctp_free_a_readq(stcb, control);
5860 				control = NULL;
5861 				if ((freed_so_far >= rwnd_req) &&
5862 				    (no_rcv_needed == 0))
5863 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5864 
5865 			} else {
5866 				/*
5867 				 * The user did not read all of this
5868 				 * message, turn off the returned MSG_EOR
5869 				 * since we are leaving more behind on the
5870 				 * control to read.
5871 				 */
5872 #ifdef INVARIANTS
5873 				if (control->end_added &&
5874 				    (control->data == NULL) &&
5875 				    (control->tail_mbuf == NULL)) {
5876 					panic("Gak, control->length is corrupt?");
5877 				}
5878 #endif
5879 				no_rcv_needed = control->do_not_ref_stcb;
5880 				out_flags &= ~MSG_EOR;
5881 			}
5882 		}
5883 		if (out_flags & MSG_EOR) {
5884 			goto release;
5885 		}
5886 		if ((uio->uio_resid == 0) ||
5887 		    ((in_eeor_mode) &&
5888 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5889 			goto release;
5890 		}
5891 		/*
5892 		 * If I hit here the receiver wants more and this message is
5893 		 * NOT done (pd-api). So two questions. Can we block? if not
5894 		 * we are done. Did the user NOT set MSG_WAITALL?
5895 		 */
5896 		if (block_allowed == 0) {
5897 			goto release;
5898 		}
5899 		/*
5900 		 * We need to wait for more data a few things: - We don't
5901 		 * sbunlock() so we don't get someone else reading. - We
5902 		 * must be sure to account for the case where what is added
5903 		 * is NOT to our control when we wakeup.
5904 		 */
5905 
5906 		/*
5907 		 * Do we need to tell the transport a rwnd update might be
5908 		 * needed before we go to sleep?
5909 		 */
5910 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5911 		    ((freed_so_far >= rwnd_req) &&
5912 		    (control->do_not_ref_stcb == 0) &&
5913 		    (no_rcv_needed == 0))) {
5914 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5915 		}
5916 wait_some_more:
5917 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5918 			goto release;
5919 		}
5920 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5921 			goto release;
5922 
5923 		if (hold_rlock == 1) {
5924 			SCTP_INP_READ_UNLOCK(inp);
5925 			hold_rlock = 0;
5926 		}
5927 		if (hold_sblock == 0) {
5928 			SOCKBUF_LOCK(&so->so_rcv);
5929 			hold_sblock = 1;
5930 		}
5931 		if ((copied_so_far) && (control->length == 0) &&
5932 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5933 			goto release;
5934 		}
5935 		if (so->so_rcv.sb_cc <= control->held_length) {
5936 			error = sbwait(&so->so_rcv);
5937 			if (error) {
5938 				goto release;
5939 			}
5940 			control->held_length = 0;
5941 		}
5942 		if (hold_sblock) {
5943 			SOCKBUF_UNLOCK(&so->so_rcv);
5944 			hold_sblock = 0;
5945 		}
5946 		if (control->length == 0) {
5947 			/* still nothing here */
5948 			if (control->end_added == 1) {
5949 				/* he aborted, or is done i.e.did a shutdown */
5950 				out_flags |= MSG_EOR;
5951 				if (control->pdapi_aborted) {
5952 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5953 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5954 
5955 					out_flags |= MSG_TRUNC;
5956 				} else {
5957 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5958 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5959 				}
5960 				goto done_with_control;
5961 			}
5962 			if (so->so_rcv.sb_cc > held_length) {
5963 				control->held_length = so->so_rcv.sb_cc;
5964 				held_length = 0;
5965 			}
5966 			goto wait_some_more;
5967 		} else if (control->data == NULL) {
5968 			/*
5969 			 * we must re-sync since data is probably being
5970 			 * added
5971 			 */
5972 			SCTP_INP_READ_LOCK(inp);
5973 			if ((control->length > 0) && (control->data == NULL)) {
5974 				/*
5975 				 * big trouble.. we have the lock and its
5976 				 * corrupt?
5977 				 */
5978 #ifdef INVARIANTS
5979 				panic("Impossible data==NULL length !=0");
5980 #endif
5981 				out_flags |= MSG_EOR;
5982 				out_flags |= MSG_TRUNC;
5983 				control->length = 0;
5984 				SCTP_INP_READ_UNLOCK(inp);
5985 				goto done_with_control;
5986 			}
5987 			SCTP_INP_READ_UNLOCK(inp);
5988 			/* We will fall around to get more data */
5989 		}
5990 		goto get_more_data;
5991 	} else {
5992 		/*-
5993 		 * Give caller back the mbuf chain,
5994 		 * store in uio_resid the length
5995 		 */
5996 		wakeup_read_socket = 0;
5997 		if ((control->end_added == 0) ||
5998 		    (TAILQ_NEXT(control, next) == NULL)) {
5999 			/* Need to get rlock */
6000 			if (hold_rlock == 0) {
6001 				SCTP_INP_READ_LOCK(inp);
6002 				hold_rlock = 1;
6003 			}
6004 		}
6005 		if (control->end_added) {
6006 			out_flags |= MSG_EOR;
6007 			if ((control->do_not_ref_stcb == 0) &&
6008 			    (control->stcb != NULL) &&
6009 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6010 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6011 		}
6012 		if (control->spec_flags & M_NOTIFICATION) {
6013 			out_flags |= MSG_NOTIFICATION;
6014 		}
6015 		uio->uio_resid = control->length;
6016 		*mp = control->data;
6017 		m = control->data;
6018 		while (m) {
6019 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6020 				sctp_sblog(&so->so_rcv,
6021 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6022 			}
6023 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6024 			freed_so_far += SCTP_BUF_LEN(m);
6025 			freed_so_far += MSIZE;
6026 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6027 				sctp_sblog(&so->so_rcv,
6028 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6029 			}
6030 			m = SCTP_BUF_NEXT(m);
6031 		}
6032 		control->data = control->tail_mbuf = NULL;
6033 		control->length = 0;
6034 		if (out_flags & MSG_EOR) {
6035 			/* Done with this control */
6036 			goto done_with_control;
6037 		}
6038 	}
6039 release:
6040 	if (hold_rlock == 1) {
6041 		SCTP_INP_READ_UNLOCK(inp);
6042 		hold_rlock = 0;
6043 	}
6044 	if (hold_sblock == 1) {
6045 		SOCKBUF_UNLOCK(&so->so_rcv);
6046 		hold_sblock = 0;
6047 	}
6048 	sbunlock(&so->so_rcv);
6049 	sockbuf_lock = 0;
6050 
6051 release_unlocked:
6052 	if (hold_sblock) {
6053 		SOCKBUF_UNLOCK(&so->so_rcv);
6054 		hold_sblock = 0;
6055 	}
6056 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6057 		if ((freed_so_far >= rwnd_req) &&
6058 		    (control && (control->do_not_ref_stcb == 0)) &&
6059 		    (no_rcv_needed == 0))
6060 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6061 	}
6062 out:
6063 	if (msg_flags) {
6064 		*msg_flags = out_flags;
6065 	}
6066 	if (((out_flags & MSG_EOR) == 0) &&
6067 	    ((in_flags & MSG_PEEK) == 0) &&
6068 	    (sinfo) &&
6069 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6070 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6071 		struct sctp_extrcvinfo *s_extra;
6072 
6073 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6074 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6075 	}
6076 	if (hold_rlock == 1) {
6077 		SCTP_INP_READ_UNLOCK(inp);
6078 	}
6079 	if (hold_sblock) {
6080 		SOCKBUF_UNLOCK(&so->so_rcv);
6081 	}
6082 	if (sockbuf_lock) {
6083 		sbunlock(&so->so_rcv);
6084 	}
6085 	if (freecnt_applied) {
6086 		/*
6087 		 * The lock on the socket buffer protects us so the free
6088 		 * code will stop. But since we used the socketbuf lock and
6089 		 * the sender uses the tcb_lock to increment, we need to use
6090 		 * the atomic add to the refcnt.
6091 		 */
6092 		if (stcb == NULL) {
6093 #ifdef INVARIANTS
6094 			panic("stcb for refcnt has gone NULL?");
6095 			goto stage_left;
6096 #else
6097 			goto stage_left;
6098 #endif
6099 		}
6100 		atomic_add_int(&stcb->asoc.refcnt, -1);
6101 		/* Save the value back for next time */
6102 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6103 	}
6104 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6105 		if (stcb) {
6106 			sctp_misc_ints(SCTP_SORECV_DONE,
6107 			    freed_so_far,
6108 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6109 			    stcb->asoc.my_rwnd,
6110 			    so->so_rcv.sb_cc);
6111 		} else {
6112 			sctp_misc_ints(SCTP_SORECV_DONE,
6113 			    freed_so_far,
6114 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6115 			    0,
6116 			    so->so_rcv.sb_cc);
6117 		}
6118 	}
6119 stage_left:
6120 	if (wakeup_read_socket) {
6121 		sctp_sorwakeup(inp, so);
6122 	}
6123 	return (error);
6124 }
6125 
6126 
6127 #ifdef SCTP_MBUF_LOGGING
6128 struct mbuf *
6129 sctp_m_free(struct mbuf *m)
6130 {
6131 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6132 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6133 	}
6134 	return (m_free(m));
6135 }
6136 
6137 void
6138 sctp_m_freem(struct mbuf *mb)
6139 {
6140 	while (mb != NULL)
6141 		mb = sctp_m_free(mb);
6142 }
6143 
6144 #endif
6145 
6146 int
6147 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6148 {
6149 	/*
6150 	 * Given a local address. For all associations that holds the
6151 	 * address, request a peer-set-primary.
6152 	 */
6153 	struct sctp_ifa *ifa;
6154 	struct sctp_laddr *wi;
6155 
6156 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6157 	if (ifa == NULL) {
6158 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6159 		return (EADDRNOTAVAIL);
6160 	}
6161 	/*
6162 	 * Now that we have the ifa we must awaken the iterator with this
6163 	 * message.
6164 	 */
6165 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6166 	if (wi == NULL) {
6167 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6168 		return (ENOMEM);
6169 	}
6170 	/* Now incr the count and int wi structure */
6171 	SCTP_INCR_LADDR_COUNT();
6172 	bzero(wi, sizeof(*wi));
6173 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6174 	wi->ifa = ifa;
6175 	wi->action = SCTP_SET_PRIM_ADDR;
6176 	atomic_add_int(&ifa->refcount, 1);
6177 
6178 	/* Now add it to the work queue */
6179 	SCTP_WQ_ADDR_LOCK();
6180 	/*
6181 	 * Should this really be a tailq? As it is we will process the
6182 	 * newest first :-0
6183 	 */
6184 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6185 	SCTP_WQ_ADDR_UNLOCK();
6186 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6187 	    (struct sctp_inpcb *)NULL,
6188 	    (struct sctp_tcb *)NULL,
6189 	    (struct sctp_nets *)NULL);
6190 	return (0);
6191 }
6192 
6193 
6194 int
6195 sctp_soreceive(struct socket *so,
6196     struct sockaddr **psa,
6197     struct uio *uio,
6198     struct mbuf **mp0,
6199     struct mbuf **controlp,
6200     int *flagsp)
6201 {
6202 	int error, fromlen;
6203 	uint8_t sockbuf[256];
6204 	struct sockaddr *from;
6205 	struct sctp_extrcvinfo sinfo;
6206 	int filling_sinfo = 1;
6207 	struct sctp_inpcb *inp;
6208 
6209 	inp = (struct sctp_inpcb *)so->so_pcb;
6210 	/* pickup the assoc we are reading from */
6211 	if (inp == NULL) {
6212 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6213 		return (EINVAL);
6214 	}
6215 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6217 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6218 	    (controlp == NULL)) {
6219 		/* user does not want the sndrcv ctl */
6220 		filling_sinfo = 0;
6221 	}
6222 	if (psa) {
6223 		from = (struct sockaddr *)sockbuf;
6224 		fromlen = sizeof(sockbuf);
6225 		from->sa_len = 0;
6226 	} else {
6227 		from = NULL;
6228 		fromlen = 0;
6229 	}
6230 
6231 	if (filling_sinfo) {
6232 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6233 	}
6234 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6235 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6236 	if (controlp != NULL) {
6237 		/* copy back the sinfo in a CMSG format */
6238 		if (filling_sinfo)
6239 			*controlp = sctp_build_ctl_nchunk(inp,
6240 			    (struct sctp_sndrcvinfo *)&sinfo);
6241 		else
6242 			*controlp = NULL;
6243 	}
6244 	if (psa) {
6245 		/* copy back the address info */
6246 		if (from && from->sa_len) {
6247 			*psa = sodupsockaddr(from, M_NOWAIT);
6248 		} else {
6249 			*psa = NULL;
6250 		}
6251 	}
6252 	return (error);
6253 }
6254 
6255 
6256 
6257 
6258 
6259 int
6260 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6261     int totaddr, int *error)
6262 {
6263 	int added = 0;
6264 	int i;
6265 	struct sctp_inpcb *inp;
6266 	struct sockaddr *sa;
6267 	size_t incr = 0;
6268 
6269 #ifdef INET
6270 	struct sockaddr_in *sin;
6271 
6272 #endif
6273 #ifdef INET6
6274 	struct sockaddr_in6 *sin6;
6275 
6276 #endif
6277 
6278 	sa = addr;
6279 	inp = stcb->sctp_ep;
6280 	*error = 0;
6281 	for (i = 0; i < totaddr; i++) {
6282 		switch (sa->sa_family) {
6283 #ifdef INET
6284 		case AF_INET:
6285 			incr = sizeof(struct sockaddr_in);
6286 			sin = (struct sockaddr_in *)sa;
6287 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6288 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6289 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6290 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6291 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6292 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6293 				*error = EINVAL;
6294 				goto out_now;
6295 			}
6296 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6297 			    SCTP_DONOT_SETSCOPE,
6298 			    SCTP_ADDR_IS_CONFIRMED)) {
6299 				/* assoc gone no un-lock */
6300 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6301 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6302 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6303 				*error = ENOBUFS;
6304 				goto out_now;
6305 			}
6306 			added++;
6307 			break;
6308 #endif
6309 #ifdef INET6
6310 		case AF_INET6:
6311 			incr = sizeof(struct sockaddr_in6);
6312 			sin6 = (struct sockaddr_in6 *)sa;
6313 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6314 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6315 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6316 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6317 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6318 				*error = EINVAL;
6319 				goto out_now;
6320 			}
6321 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6322 			    SCTP_DONOT_SETSCOPE,
6323 			    SCTP_ADDR_IS_CONFIRMED)) {
6324 				/* assoc gone no un-lock */
6325 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6326 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6327 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6328 				*error = ENOBUFS;
6329 				goto out_now;
6330 			}
6331 			added++;
6332 			break;
6333 #endif
6334 		default:
6335 			break;
6336 		}
6337 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6338 	}
6339 out_now:
6340 	return (added);
6341 }
6342 
6343 struct sctp_tcb *
6344 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6345     unsigned int *totaddr,
6346     unsigned int *num_v4, unsigned int *num_v6, int *error,
6347     unsigned int limit, int *bad_addr)
6348 {
6349 	struct sockaddr *sa;
6350 	struct sctp_tcb *stcb = NULL;
6351 	unsigned int incr, at, i;
6352 
6353 	at = incr = 0;
6354 	sa = addr;
6355 	*error = *num_v6 = *num_v4 = 0;
6356 	/* account and validate addresses */
6357 	for (i = 0; i < *totaddr; i++) {
6358 		switch (sa->sa_family) {
6359 #ifdef INET
6360 		case AF_INET:
6361 			if (sa->sa_len != incr) {
6362 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6363 				*error = EINVAL;
6364 				*bad_addr = 1;
6365 				return (NULL);
6366 			}
6367 			(*num_v4) += 1;
6368 			incr = (unsigned int)sizeof(struct sockaddr_in);
6369 			break;
6370 #endif
6371 #ifdef INET6
6372 		case AF_INET6:
6373 			{
6374 				struct sockaddr_in6 *sin6;
6375 
6376 				sin6 = (struct sockaddr_in6 *)sa;
6377 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6378 					/* Must be non-mapped for connectx */
6379 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6380 					*error = EINVAL;
6381 					*bad_addr = 1;
6382 					return (NULL);
6383 				}
6384 				if (sa->sa_len != incr) {
6385 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6386 					*error = EINVAL;
6387 					*bad_addr = 1;
6388 					return (NULL);
6389 				}
6390 				(*num_v6) += 1;
6391 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6392 				break;
6393 			}
6394 #endif
6395 		default:
6396 			*totaddr = i;
6397 			/* we are done */
6398 			break;
6399 		}
6400 		if (i == *totaddr) {
6401 			break;
6402 		}
6403 		SCTP_INP_INCR_REF(inp);
6404 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6405 		if (stcb != NULL) {
6406 			/* Already have or am bring up an association */
6407 			return (stcb);
6408 		} else {
6409 			SCTP_INP_DECR_REF(inp);
6410 		}
6411 		if ((at + incr) > limit) {
6412 			*totaddr = i;
6413 			break;
6414 		}
6415 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6416 	}
6417 	return ((struct sctp_tcb *)NULL);
6418 }
6419 
6420 /*
6421  * sctp_bindx(ADD) for one address.
6422  * assumes all arguments are valid/checked by caller.
6423  */
6424 void
6425 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6426     struct sockaddr *sa, sctp_assoc_t assoc_id,
6427     uint32_t vrf_id, int *error, void *p)
6428 {
6429 	struct sockaddr *addr_touse;
6430 
6431 #if defined(INET) && defined(INET6)
6432 	struct sockaddr_in sin;
6433 
6434 #endif
6435 
6436 	/* see if we're bound all already! */
6437 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6438 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6439 		*error = EINVAL;
6440 		return;
6441 	}
6442 	addr_touse = sa;
6443 #ifdef INET6
6444 	if (sa->sa_family == AF_INET6) {
6445 #ifdef INET
6446 		struct sockaddr_in6 *sin6;
6447 
6448 #endif
6449 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6450 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451 			*error = EINVAL;
6452 			return;
6453 		}
6454 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6455 			/* can only bind v6 on PF_INET6 sockets */
6456 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6457 			*error = EINVAL;
6458 			return;
6459 		}
6460 #ifdef INET
6461 		sin6 = (struct sockaddr_in6 *)addr_touse;
6462 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6463 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6464 			    SCTP_IPV6_V6ONLY(inp)) {
6465 				/* can't bind v4-mapped on PF_INET sockets */
6466 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467 				*error = EINVAL;
6468 				return;
6469 			}
6470 			in6_sin6_2_sin(&sin, sin6);
6471 			addr_touse = (struct sockaddr *)&sin;
6472 		}
6473 #endif
6474 	}
6475 #endif
6476 #ifdef INET
6477 	if (sa->sa_family == AF_INET) {
6478 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6479 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6480 			*error = EINVAL;
6481 			return;
6482 		}
6483 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6484 		    SCTP_IPV6_V6ONLY(inp)) {
6485 			/* can't bind v4 on PF_INET sockets */
6486 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487 			*error = EINVAL;
6488 			return;
6489 		}
6490 	}
6491 #endif
6492 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6493 		if (p == NULL) {
6494 			/* Can't get proc for Net/Open BSD */
6495 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6496 			*error = EINVAL;
6497 			return;
6498 		}
6499 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6500 		return;
6501 	}
6502 	/*
6503 	 * No locks required here since bind and mgmt_ep_sa all do their own
6504 	 * locking. If we do something for the FIX: below we may need to
6505 	 * lock in that case.
6506 	 */
6507 	if (assoc_id == 0) {
6508 		/* add the address */
6509 		struct sctp_inpcb *lep;
6510 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6511 
6512 		/* validate the incoming port */
6513 		if ((lsin->sin_port != 0) &&
6514 		    (lsin->sin_port != inp->sctp_lport)) {
6515 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6516 			*error = EINVAL;
6517 			return;
6518 		} else {
6519 			/* user specified 0 port, set it to existing port */
6520 			lsin->sin_port = inp->sctp_lport;
6521 		}
6522 
6523 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6524 		if (lep != NULL) {
6525 			/*
6526 			 * We must decrement the refcount since we have the
6527 			 * ep already and are binding. No remove going on
6528 			 * here.
6529 			 */
6530 			SCTP_INP_DECR_REF(lep);
6531 		}
6532 		if (lep == inp) {
6533 			/* already bound to it.. ok */
6534 			return;
6535 		} else if (lep == NULL) {
6536 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6537 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6538 			    SCTP_ADD_IP_ADDRESS,
6539 			    vrf_id, NULL);
6540 		} else {
6541 			*error = EADDRINUSE;
6542 		}
6543 		if (*error)
6544 			return;
6545 	} else {
6546 		/*
6547 		 * FIX: decide whether we allow assoc based bindx
6548 		 */
6549 	}
6550 }
6551 
6552 /*
6553  * sctp_bindx(DELETE) for one address.
6554  * assumes all arguments are valid/checked by caller.
6555  */
6556 void
6557 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6558     struct sockaddr *sa, sctp_assoc_t assoc_id,
6559     uint32_t vrf_id, int *error)
6560 {
6561 	struct sockaddr *addr_touse;
6562 
6563 #if defined(INET) && defined(INET6)
6564 	struct sockaddr_in sin;
6565 
6566 #endif
6567 
6568 	/* see if we're bound all already! */
6569 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6570 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6571 		*error = EINVAL;
6572 		return;
6573 	}
6574 	addr_touse = sa;
6575 #ifdef INET6
6576 	if (sa->sa_family == AF_INET6) {
6577 #ifdef INET
6578 		struct sockaddr_in6 *sin6;
6579 
6580 #endif
6581 
6582 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6583 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6584 			*error = EINVAL;
6585 			return;
6586 		}
6587 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6588 			/* can only bind v6 on PF_INET6 sockets */
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 #ifdef INET
6594 		sin6 = (struct sockaddr_in6 *)addr_touse;
6595 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6596 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6597 			    SCTP_IPV6_V6ONLY(inp)) {
6598 				/* can't bind mapped-v4 on PF_INET sockets */
6599 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6600 				*error = EINVAL;
6601 				return;
6602 			}
6603 			in6_sin6_2_sin(&sin, sin6);
6604 			addr_touse = (struct sockaddr *)&sin;
6605 		}
6606 #endif
6607 	}
6608 #endif
6609 #ifdef INET
6610 	if (sa->sa_family == AF_INET) {
6611 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6612 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6613 			*error = EINVAL;
6614 			return;
6615 		}
6616 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6617 		    SCTP_IPV6_V6ONLY(inp)) {
6618 			/* can't bind v4 on PF_INET sockets */
6619 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6620 			*error = EINVAL;
6621 			return;
6622 		}
6623 	}
6624 #endif
6625 	/*
6626 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6627 	 * below is ever changed we may need to lock before calling
6628 	 * association level binding.
6629 	 */
6630 	if (assoc_id == 0) {
6631 		/* delete the address */
6632 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6633 		    SCTP_DEL_IP_ADDRESS,
6634 		    vrf_id, NULL);
6635 	} else {
6636 		/*
6637 		 * FIX: decide whether we allow assoc based bindx
6638 		 */
6639 	}
6640 }
6641 
6642 /*
6643  * returns the valid local address count for an assoc, taking into account
6644  * all scoping rules
6645  */
6646 int
6647 sctp_local_addr_count(struct sctp_tcb *stcb)
6648 {
6649 	int loopback_scope;
6650 
6651 #if defined(INET)
6652 	int ipv4_local_scope, ipv4_addr_legal;
6653 
6654 #endif
6655 #if defined (INET6)
6656 	int local_scope, site_scope, ipv6_addr_legal;
6657 
6658 #endif
6659 	struct sctp_vrf *vrf;
6660 	struct sctp_ifn *sctp_ifn;
6661 	struct sctp_ifa *sctp_ifa;
6662 	int count = 0;
6663 
6664 	/* Turn on all the appropriate scopes */
6665 	loopback_scope = stcb->asoc.scope.loopback_scope;
6666 #if defined(INET)
6667 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6668 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6669 #endif
6670 #if defined(INET6)
6671 	local_scope = stcb->asoc.scope.local_scope;
6672 	site_scope = stcb->asoc.scope.site_scope;
6673 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6674 #endif
6675 	SCTP_IPI_ADDR_RLOCK();
6676 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6677 	if (vrf == NULL) {
6678 		/* no vrf, no addresses */
6679 		SCTP_IPI_ADDR_RUNLOCK();
6680 		return (0);
6681 	}
6682 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6683 		/*
6684 		 * bound all case: go through all ifns on the vrf
6685 		 */
6686 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6687 			if ((loopback_scope == 0) &&
6688 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6689 				continue;
6690 			}
6691 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6692 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6693 					continue;
6694 				switch (sctp_ifa->address.sa.sa_family) {
6695 #ifdef INET
6696 				case AF_INET:
6697 					if (ipv4_addr_legal) {
6698 						struct sockaddr_in *sin;
6699 
6700 						sin = &sctp_ifa->address.sin;
6701 						if (sin->sin_addr.s_addr == 0) {
6702 							/*
6703 							 * skip unspecified
6704 							 * addrs
6705 							 */
6706 							continue;
6707 						}
6708 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6709 						    &sin->sin_addr) != 0) {
6710 							continue;
6711 						}
6712 						if ((ipv4_local_scope == 0) &&
6713 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6714 							continue;
6715 						}
6716 						/* count this one */
6717 						count++;
6718 					} else {
6719 						continue;
6720 					}
6721 					break;
6722 #endif
6723 #ifdef INET6
6724 				case AF_INET6:
6725 					if (ipv6_addr_legal) {
6726 						struct sockaddr_in6 *sin6;
6727 
6728 						sin6 = &sctp_ifa->address.sin6;
6729 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6730 							continue;
6731 						}
6732 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6733 						    &sin6->sin6_addr) != 0) {
6734 							continue;
6735 						}
6736 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6737 							if (local_scope == 0)
6738 								continue;
6739 							if (sin6->sin6_scope_id == 0) {
6740 								if (sa6_recoverscope(sin6) != 0)
6741 									/*
6742 									 *
6743 									 * bad
6744 									 *
6745 									 * li
6746 									 * nk
6747 									 *
6748 									 * loc
6749 									 * al
6750 									 *
6751 									 * add
6752 									 * re
6753 									 * ss
6754 									 * */
6755 									continue;
6756 							}
6757 						}
6758 						if ((site_scope == 0) &&
6759 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6760 							continue;
6761 						}
6762 						/* count this one */
6763 						count++;
6764 					}
6765 					break;
6766 #endif
6767 				default:
6768 					/* TSNH */
6769 					break;
6770 				}
6771 			}
6772 		}
6773 	} else {
6774 		/*
6775 		 * subset bound case
6776 		 */
6777 		struct sctp_laddr *laddr;
6778 
6779 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6780 		    sctp_nxt_addr) {
6781 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6782 				continue;
6783 			}
6784 			/* count this one */
6785 			count++;
6786 		}
6787 	}
6788 	SCTP_IPI_ADDR_RUNLOCK();
6789 	return (count);
6790 }
6791 
6792 #if defined(SCTP_LOCAL_TRACE_BUF)
6793 
6794 void
6795 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6796 {
6797 	uint32_t saveindex, newindex;
6798 
6799 	do {
6800 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6801 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6802 			newindex = 1;
6803 		} else {
6804 			newindex = saveindex + 1;
6805 		}
6806 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6807 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6808 		saveindex = 0;
6809 	}
6810 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6811 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6812 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6813 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6814 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6815 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6816 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6817 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6818 }
6819 
6820 #endif
6821 static void
6822 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6823     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6824 {
6825 	struct ip *iph;
6826 
6827 #ifdef INET6
6828 	struct ip6_hdr *ip6;
6829 
6830 #endif
6831 	struct mbuf *sp, *last;
6832 	struct udphdr *uhdr;
6833 	uint16_t port;
6834 
6835 	if ((m->m_flags & M_PKTHDR) == 0) {
6836 		/* Can't handle one that is not a pkt hdr */
6837 		goto out;
6838 	}
6839 	/* Pull the src port */
6840 	iph = mtod(m, struct ip *);
6841 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6842 	port = uhdr->uh_sport;
6843 	/*
6844 	 * Split out the mbuf chain. Leave the IP header in m, place the
6845 	 * rest in the sp.
6846 	 */
6847 	sp = m_split(m, off, M_NOWAIT);
6848 	if (sp == NULL) {
6849 		/* Gak, drop packet, we can't do a split */
6850 		goto out;
6851 	}
6852 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6853 		/* Gak, packet can't have an SCTP header in it - too small */
6854 		m_freem(sp);
6855 		goto out;
6856 	}
6857 	/* Now pull up the UDP header and SCTP header together */
6858 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6859 	if (sp == NULL) {
6860 		/* Gak pullup failed */
6861 		goto out;
6862 	}
6863 	/* Trim out the UDP header */
6864 	m_adj(sp, sizeof(struct udphdr));
6865 
6866 	/* Now reconstruct the mbuf chain */
6867 	for (last = m; last->m_next; last = last->m_next);
6868 	last->m_next = sp;
6869 	m->m_pkthdr.len += sp->m_pkthdr.len;
6870 	/*
6871 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6872 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6873 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6874 	 * SCTP checksum. Therefore, clear the bit.
6875 	 */
6876 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6877 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6878 	    m->m_pkthdr.len,
6879 	    if_name(m->m_pkthdr.rcvif),
6880 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6881 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6882 	iph = mtod(m, struct ip *);
6883 	switch (iph->ip_v) {
6884 #ifdef INET
6885 	case IPVERSION:
6886 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6887 		sctp_input_with_port(m, off, port);
6888 		break;
6889 #endif
6890 #ifdef INET6
6891 	case IPV6_VERSION >> 4:
6892 		ip6 = mtod(m, struct ip6_hdr *);
6893 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6894 		sctp6_input_with_port(&m, &off, port);
6895 		break;
6896 #endif
6897 	default:
6898 		goto out;
6899 		break;
6900 	}
6901 	return;
6902 out:
6903 	m_freem(m);
6904 }
6905 
6906 #ifdef INET
6907 static void
6908 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6909 {
6910 	struct ip *outer_ip, *inner_ip;
6911 	struct sctphdr *sh;
6912 	struct icmp *icmp;
6913 	struct udphdr *udp;
6914 	struct sctp_inpcb *inp;
6915 	struct sctp_tcb *stcb;
6916 	struct sctp_nets *net;
6917 	struct sctp_init_chunk *ch;
6918 	struct sockaddr_in src, dst;
6919 	uint8_t type, code;
6920 
6921 	inner_ip = (struct ip *)vip;
6922 	icmp = (struct icmp *)((caddr_t)inner_ip -
6923 	    (sizeof(struct icmp) - sizeof(struct ip)));
6924 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6925 	if (ntohs(outer_ip->ip_len) <
6926 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6927 		return;
6928 	}
6929 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6930 	sh = (struct sctphdr *)(udp + 1);
6931 	memset(&src, 0, sizeof(struct sockaddr_in));
6932 	src.sin_family = AF_INET;
6933 	src.sin_len = sizeof(struct sockaddr_in);
6934 	src.sin_port = sh->src_port;
6935 	src.sin_addr = inner_ip->ip_src;
6936 	memset(&dst, 0, sizeof(struct sockaddr_in));
6937 	dst.sin_family = AF_INET;
6938 	dst.sin_len = sizeof(struct sockaddr_in);
6939 	dst.sin_port = sh->dest_port;
6940 	dst.sin_addr = inner_ip->ip_dst;
6941 	/*
6942 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6943 	 * holds our local endpoint address. Thus we reverse the dst and the
6944 	 * src in the lookup.
6945 	 */
6946 	inp = NULL;
6947 	net = NULL;
6948 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6949 	    (struct sockaddr *)&src,
6950 	    &inp, &net, 1,
6951 	    SCTP_DEFAULT_VRFID);
6952 	if ((stcb != NULL) &&
6953 	    (net != NULL) &&
6954 	    (inp != NULL)) {
6955 		/* Check the UDP port numbers */
6956 		if ((udp->uh_dport != net->port) ||
6957 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6958 			SCTP_TCB_UNLOCK(stcb);
6959 			return;
6960 		}
6961 		/* Check the verification tag */
6962 		if (ntohl(sh->v_tag) != 0) {
6963 			/*
6964 			 * This must be the verification tag used for
6965 			 * sending out packets. We don't consider packets
6966 			 * reflecting the verification tag.
6967 			 */
6968 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6969 				SCTP_TCB_UNLOCK(stcb);
6970 				return;
6971 			}
6972 		} else {
6973 			if (ntohs(outer_ip->ip_len) >=
6974 			    sizeof(struct ip) +
6975 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6976 				/*
6977 				 * In this case we can check if we got an
6978 				 * INIT chunk and if the initiate tag
6979 				 * matches.
6980 				 */
6981 				ch = (struct sctp_init_chunk *)(sh + 1);
6982 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6983 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6984 					SCTP_TCB_UNLOCK(stcb);
6985 					return;
6986 				}
6987 			} else {
6988 				SCTP_TCB_UNLOCK(stcb);
6989 				return;
6990 			}
6991 		}
6992 		type = icmp->icmp_type;
6993 		code = icmp->icmp_code;
6994 		if ((type == ICMP_UNREACH) &&
6995 		    (code == ICMP_UNREACH_PORT)) {
6996 			code = ICMP_UNREACH_PROTOCOL;
6997 		}
6998 		sctp_notify(inp, stcb, net, type, code,
6999 		    ntohs(inner_ip->ip_len),
7000 		    ntohs(icmp->icmp_nextmtu));
7001 	} else {
7002 		if ((stcb == NULL) && (inp != NULL)) {
7003 			/* reduce ref-count */
7004 			SCTP_INP_WLOCK(inp);
7005 			SCTP_INP_DECR_REF(inp);
7006 			SCTP_INP_WUNLOCK(inp);
7007 		}
7008 		if (stcb) {
7009 			SCTP_TCB_UNLOCK(stcb);
7010 		}
7011 	}
7012 	return;
7013 }
7014 
7015 #endif
7016 
7017 #ifdef INET6
7018 static void
7019 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7020 {
7021 	struct ip6ctlparam *ip6cp;
7022 	struct sctp_inpcb *inp;
7023 	struct sctp_tcb *stcb;
7024 	struct sctp_nets *net;
7025 	struct sctphdr sh;
7026 	struct udphdr udp;
7027 	struct sockaddr_in6 src, dst;
7028 	uint8_t type, code;
7029 
7030 	ip6cp = (struct ip6ctlparam *)d;
7031 	/*
7032 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7033 	 */
7034 	if (ip6cp->ip6c_m == NULL) {
7035 		return;
7036 	}
7037 	/*
7038 	 * Check if we can safely examine the ports and the verification tag
7039 	 * of the SCTP common header.
7040 	 */
7041 	if (ip6cp->ip6c_m->m_pkthdr.len <
7042 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7043 		return;
7044 	}
7045 	/* Copy out the UDP header. */
7046 	memset(&udp, 0, sizeof(struct udphdr));
7047 	m_copydata(ip6cp->ip6c_m,
7048 	    ip6cp->ip6c_off,
7049 	    sizeof(struct udphdr),
7050 	    (caddr_t)&udp);
7051 	/* Copy out the port numbers and the verification tag. */
7052 	memset(&sh, 0, sizeof(struct sctphdr));
7053 	m_copydata(ip6cp->ip6c_m,
7054 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7055 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7056 	    (caddr_t)&sh);
7057 	memset(&src, 0, sizeof(struct sockaddr_in6));
7058 	src.sin6_family = AF_INET6;
7059 	src.sin6_len = sizeof(struct sockaddr_in6);
7060 	src.sin6_port = sh.src_port;
7061 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7062 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7063 		return;
7064 	}
7065 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7066 	dst.sin6_family = AF_INET6;
7067 	dst.sin6_len = sizeof(struct sockaddr_in6);
7068 	dst.sin6_port = sh.dest_port;
7069 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7070 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7071 		return;
7072 	}
7073 	inp = NULL;
7074 	net = NULL;
7075 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7076 	    (struct sockaddr *)&src,
7077 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7078 	if ((stcb != NULL) &&
7079 	    (net != NULL) &&
7080 	    (inp != NULL)) {
7081 		/* Check the UDP port numbers */
7082 		if ((udp.uh_dport != net->port) ||
7083 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7084 			SCTP_TCB_UNLOCK(stcb);
7085 			return;
7086 		}
7087 		/* Check the verification tag */
7088 		if (ntohl(sh.v_tag) != 0) {
7089 			/*
7090 			 * This must be the verification tag used for
7091 			 * sending out packets. We don't consider packets
7092 			 * reflecting the verification tag.
7093 			 */
7094 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7095 				SCTP_TCB_UNLOCK(stcb);
7096 				return;
7097 			}
7098 		} else {
7099 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7100 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7101 			    sizeof(struct sctphdr) +
7102 			    sizeof(struct sctp_chunkhdr) +
7103 			    offsetof(struct sctp_init, a_rwnd)) {
7104 				/*
7105 				 * In this case we can check if we got an
7106 				 * INIT chunk and if the initiate tag
7107 				 * matches.
7108 				 */
7109 				uint32_t initiate_tag;
7110 				uint8_t chunk_type;
7111 
7112 				m_copydata(ip6cp->ip6c_m,
7113 				    ip6cp->ip6c_off +
7114 				    sizeof(struct udphdr) +
7115 				    sizeof(struct sctphdr),
7116 				    sizeof(uint8_t),
7117 				    (caddr_t)&chunk_type);
7118 				m_copydata(ip6cp->ip6c_m,
7119 				    ip6cp->ip6c_off +
7120 				    sizeof(struct udphdr) +
7121 				    sizeof(struct sctphdr) +
7122 				    sizeof(struct sctp_chunkhdr),
7123 				    sizeof(uint32_t),
7124 				    (caddr_t)&initiate_tag);
7125 				if ((chunk_type != SCTP_INITIATION) ||
7126 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7127 					SCTP_TCB_UNLOCK(stcb);
7128 					return;
7129 				}
7130 			} else {
7131 				SCTP_TCB_UNLOCK(stcb);
7132 				return;
7133 			}
7134 		}
7135 		type = ip6cp->ip6c_icmp6->icmp6_type;
7136 		code = ip6cp->ip6c_icmp6->icmp6_code;
7137 		if ((type == ICMP6_DST_UNREACH) &&
7138 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7139 			type = ICMP6_PARAM_PROB;
7140 			code = ICMP6_PARAMPROB_NEXTHEADER;
7141 		}
7142 		sctp6_notify(inp, stcb, net, type, code,
7143 		    (uint16_t) ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7144 	} else {
7145 		if ((stcb == NULL) && (inp != NULL)) {
7146 			/* reduce inp's ref-count */
7147 			SCTP_INP_WLOCK(inp);
7148 			SCTP_INP_DECR_REF(inp);
7149 			SCTP_INP_WUNLOCK(inp);
7150 		}
7151 		if (stcb) {
7152 			SCTP_TCB_UNLOCK(stcb);
7153 		}
7154 	}
7155 }
7156 
7157 #endif
7158 
7159 void
7160 sctp_over_udp_stop(void)
7161 {
7162 	/*
7163 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7164 	 * for writting!
7165 	 */
7166 #ifdef INET
7167 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7168 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7169 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7170 	}
7171 #endif
7172 #ifdef INET6
7173 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7174 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7175 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7176 	}
7177 #endif
7178 }
7179 
7180 int
7181 sctp_over_udp_start(void)
7182 {
7183 	uint16_t port;
7184 	int ret;
7185 
7186 #ifdef INET
7187 	struct sockaddr_in sin;
7188 
7189 #endif
7190 #ifdef INET6
7191 	struct sockaddr_in6 sin6;
7192 
7193 #endif
7194 	/*
7195 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7196 	 * for writting!
7197 	 */
7198 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7199 	if (ntohs(port) == 0) {
7200 		/* Must have a port set */
7201 		return (EINVAL);
7202 	}
7203 #ifdef INET
7204 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7205 		/* Already running -- must stop first */
7206 		return (EALREADY);
7207 	}
7208 #endif
7209 #ifdef INET6
7210 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7211 		/* Already running -- must stop first */
7212 		return (EALREADY);
7213 	}
7214 #endif
7215 #ifdef INET
7216 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7217 	    SOCK_DGRAM, IPPROTO_UDP,
7218 	    curthread->td_ucred, curthread))) {
7219 		sctp_over_udp_stop();
7220 		return (ret);
7221 	}
7222 	/* Call the special UDP hook. */
7223 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7224 	    sctp_recv_udp_tunneled_packet,
7225 	    sctp_recv_icmp_tunneled_packet,
7226 	    NULL))) {
7227 		sctp_over_udp_stop();
7228 		return (ret);
7229 	}
7230 	/* Ok, we have a socket, bind it to the port. */
7231 	memset(&sin, 0, sizeof(struct sockaddr_in));
7232 	sin.sin_len = sizeof(struct sockaddr_in);
7233 	sin.sin_family = AF_INET;
7234 	sin.sin_port = htons(port);
7235 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7236 	    (struct sockaddr *)&sin, curthread))) {
7237 		sctp_over_udp_stop();
7238 		return (ret);
7239 	}
7240 #endif
7241 #ifdef INET6
7242 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7243 	    SOCK_DGRAM, IPPROTO_UDP,
7244 	    curthread->td_ucred, curthread))) {
7245 		sctp_over_udp_stop();
7246 		return (ret);
7247 	}
7248 	/* Call the special UDP hook. */
7249 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7250 	    sctp_recv_udp_tunneled_packet,
7251 	    sctp_recv_icmp6_tunneled_packet,
7252 	    NULL))) {
7253 		sctp_over_udp_stop();
7254 		return (ret);
7255 	}
7256 	/* Ok, we have a socket, bind it to the port. */
7257 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7258 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7259 	sin6.sin6_family = AF_INET6;
7260 	sin6.sin6_port = htons(port);
7261 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7262 	    (struct sockaddr *)&sin6, curthread))) {
7263 		sctp_over_udp_stop();
7264 		return (ret);
7265 	}
7266 #endif
7267 	return (0);
7268 }
7269