xref: /freebsd/sys/netinet/sctputil.c (revision 9fc5c47fa5c7fa58d61245f0408611943e613164)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int32_t
897 sctp_map_assoc_state(int kernel_state)
898 {
899 	int32_t user_state;
900 
901 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
902 		user_state = SCTP_CLOSED;
903 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
904 		user_state = SCTP_SHUTDOWN_PENDING;
905 	} else {
906 		switch (kernel_state & SCTP_STATE_MASK) {
907 		case SCTP_STATE_EMPTY:
908 			user_state = SCTP_CLOSED;
909 			break;
910 		case SCTP_STATE_INUSE:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_COOKIE_WAIT:
914 			user_state = SCTP_COOKIE_WAIT;
915 			break;
916 		case SCTP_STATE_COOKIE_ECHOED:
917 			user_state = SCTP_COOKIE_ECHOED;
918 			break;
919 		case SCTP_STATE_OPEN:
920 			user_state = SCTP_ESTABLISHED;
921 			break;
922 		case SCTP_STATE_SHUTDOWN_SENT:
923 			user_state = SCTP_SHUTDOWN_SENT;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_RECEIVED:
926 			user_state = SCTP_SHUTDOWN_RECEIVED;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
929 			user_state = SCTP_SHUTDOWN_ACK_SENT;
930 			break;
931 		default:
932 			user_state = SCTP_CLOSED;
933 			break;
934 		}
935 	}
936 	return (user_state);
937 }
938 
939 int
940 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
941     uint32_t override_tag, uint32_t vrf_id)
942 {
943 	struct sctp_association *asoc;
944 
945 	/*
946 	 * Anything set to zero is taken care of by the allocation routine's
947 	 * bzero
948 	 */
949 
950 	/*
951 	 * Up front select what scoping to apply on addresses I tell my peer
952 	 * Not sure what to do with these right now, we will need to come up
953 	 * with a way to set them. We may need to pass them through from the
954 	 * caller in the sctp_aloc_assoc() function.
955 	 */
956 	int i;
957 
958 #if defined(SCTP_DETAILED_STR_STATS)
959 	int j;
960 
961 #endif
962 
963 	asoc = &stcb->asoc;
964 	/* init all variables to a known value. */
965 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
966 	asoc->max_burst = inp->sctp_ep.max_burst;
967 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
968 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
969 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
970 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
971 	asoc->ecn_supported = inp->ecn_supported;
972 	asoc->prsctp_supported = inp->prsctp_supported;
973 	asoc->auth_supported = inp->auth_supported;
974 	asoc->asconf_supported = inp->asconf_supported;
975 	asoc->reconfig_supported = inp->reconfig_supported;
976 	asoc->nrsack_supported = inp->nrsack_supported;
977 	asoc->pktdrop_supported = inp->pktdrop_supported;
978 	asoc->sctp_cmt_pf = (uint8_t) 0;
979 	asoc->sctp_frag_point = inp->sctp_frag_point;
980 	asoc->sctp_features = inp->sctp_features;
981 	asoc->default_dscp = inp->sctp_ep.default_dscp;
982 	asoc->max_cwnd = inp->max_cwnd;
983 #ifdef INET6
984 	if (inp->sctp_ep.default_flowlabel) {
985 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
986 	} else {
987 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
988 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
989 			asoc->default_flowlabel &= 0x000fffff;
990 			asoc->default_flowlabel |= 0x80000000;
991 		} else {
992 			asoc->default_flowlabel = 0;
993 		}
994 	}
995 #endif
996 	asoc->sb_send_resv = 0;
997 	if (override_tag) {
998 		asoc->my_vtag = override_tag;
999 	} else {
1000 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1001 	}
1002 	/* Get the nonce tags */
1003 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1004 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1005 	asoc->vrf_id = vrf_id;
1006 
1007 #ifdef SCTP_ASOCLOG_OF_TSNS
1008 	asoc->tsn_in_at = 0;
1009 	asoc->tsn_out_at = 0;
1010 	asoc->tsn_in_wrapped = 0;
1011 	asoc->tsn_out_wrapped = 0;
1012 	asoc->cumack_log_at = 0;
1013 	asoc->cumack_log_atsnt = 0;
1014 #endif
1015 #ifdef SCTP_FS_SPEC_LOG
1016 	asoc->fs_index = 0;
1017 #endif
1018 	asoc->refcnt = 0;
1019 	asoc->assoc_up_sent = 0;
1020 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1021 	    sctp_select_initial_TSN(&inp->sctp_ep);
1022 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1023 	/* we are optimisitic here */
1024 	asoc->peer_supports_nat = 0;
1025 	asoc->sent_queue_retran_cnt = 0;
1026 
1027 	/* for CMT */
1028 	asoc->last_net_cmt_send_started = NULL;
1029 
1030 	/* This will need to be adjusted */
1031 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1032 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1033 	asoc->asconf_seq_in = asoc->last_acked_seq;
1034 
1035 	/* here we are different, we hold the next one we expect */
1036 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1037 
1038 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1039 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1040 
1041 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1042 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1043 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1044 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1045 	asoc->free_chunk_cnt = 0;
1046 
1047 	asoc->iam_blocking = 0;
1048 	asoc->context = inp->sctp_context;
1049 	asoc->local_strreset_support = inp->local_strreset_support;
1050 	asoc->def_send = inp->def_send;
1051 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1052 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1053 	asoc->pr_sctp_cnt = 0;
1054 	asoc->total_output_queue_size = 0;
1055 
1056 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1057 		asoc->scope.ipv6_addr_legal = 1;
1058 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1059 			asoc->scope.ipv4_addr_legal = 1;
1060 		} else {
1061 			asoc->scope.ipv4_addr_legal = 0;
1062 		}
1063 	} else {
1064 		asoc->scope.ipv6_addr_legal = 0;
1065 		asoc->scope.ipv4_addr_legal = 1;
1066 	}
1067 
1068 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1069 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1070 
1071 	asoc->smallest_mtu = inp->sctp_frag_point;
1072 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1073 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1074 
1075 	asoc->locked_on_sending = NULL;
1076 	asoc->stream_locked_on = 0;
1077 	asoc->ecn_echo_cnt_onq = 0;
1078 	asoc->stream_locked = 0;
1079 
1080 	asoc->send_sack = 1;
1081 
1082 	LIST_INIT(&asoc->sctp_restricted_addrs);
1083 
1084 	TAILQ_INIT(&asoc->nets);
1085 	TAILQ_INIT(&asoc->pending_reply_queue);
1086 	TAILQ_INIT(&asoc->asconf_ack_sent);
1087 	/* Setup to fill the hb random cache at first HB */
1088 	asoc->hb_random_idx = 4;
1089 
1090 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1091 
1092 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1093 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1094 
1095 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1096 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1097 
1098 	/*
1099 	 * Now the stream parameters, here we allocate space for all streams
1100 	 * that we request by default.
1101 	 */
1102 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1103 	    inp->sctp_ep.pre_open_stream_count;
1104 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1105 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1106 	    SCTP_M_STRMO);
1107 	if (asoc->strmout == NULL) {
1108 		/* big trouble no memory */
1109 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1110 		return (ENOMEM);
1111 	}
1112 	for (i = 0; i < asoc->streamoutcnt; i++) {
1113 		/*
1114 		 * inbound side must be set to 0xffff, also NOTE when we get
1115 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1116 		 * count (streamoutcnt) but first check if we sent to any of
1117 		 * the upper streams that were dropped (if some were). Those
1118 		 * that were dropped must be notified to the upper layer as
1119 		 * failed to send.
1120 		 */
1121 		asoc->strmout[i].next_sequence_send = 0x0;
1122 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1123 		asoc->strmout[i].chunks_on_queues = 0;
1124 #if defined(SCTP_DETAILED_STR_STATS)
1125 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1126 			asoc->strmout[i].abandoned_sent[j] = 0;
1127 			asoc->strmout[i].abandoned_unsent[j] = 0;
1128 		}
1129 #else
1130 		asoc->strmout[i].abandoned_sent[0] = 0;
1131 		asoc->strmout[i].abandoned_unsent[0] = 0;
1132 #endif
1133 		asoc->strmout[i].stream_no = i;
1134 		asoc->strmout[i].last_msg_incomplete = 0;
1135 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1136 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1137 	}
1138 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1139 
1140 	/* Now the mapping array */
1141 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1142 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1143 	    SCTP_M_MAP);
1144 	if (asoc->mapping_array == NULL) {
1145 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147 		return (ENOMEM);
1148 	}
1149 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1150 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1151 	    SCTP_M_MAP);
1152 	if (asoc->nr_mapping_array == NULL) {
1153 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1155 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1156 		return (ENOMEM);
1157 	}
1158 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1159 
1160 	/* Now the init of the other outqueues */
1161 	TAILQ_INIT(&asoc->free_chunks);
1162 	TAILQ_INIT(&asoc->control_send_queue);
1163 	TAILQ_INIT(&asoc->asconf_send_queue);
1164 	TAILQ_INIT(&asoc->send_queue);
1165 	TAILQ_INIT(&asoc->sent_queue);
1166 	TAILQ_INIT(&asoc->reasmqueue);
1167 	TAILQ_INIT(&asoc->resetHead);
1168 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1169 	TAILQ_INIT(&asoc->asconf_queue);
1170 	/* authentication fields */
1171 	asoc->authinfo.random = NULL;
1172 	asoc->authinfo.active_keyid = 0;
1173 	asoc->authinfo.assoc_key = NULL;
1174 	asoc->authinfo.assoc_keyid = 0;
1175 	asoc->authinfo.recv_key = NULL;
1176 	asoc->authinfo.recv_keyid = 0;
1177 	LIST_INIT(&asoc->shared_keys);
1178 	asoc->marked_retrans = 0;
1179 	asoc->port = inp->sctp_ep.port;
1180 	asoc->timoinit = 0;
1181 	asoc->timodata = 0;
1182 	asoc->timosack = 0;
1183 	asoc->timoshutdown = 0;
1184 	asoc->timoheartbeat = 0;
1185 	asoc->timocookie = 0;
1186 	asoc->timoshutdownack = 0;
1187 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188 	asoc->discontinuity_time = asoc->start_time;
1189 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1190 		asoc->abandoned_unsent[i] = 0;
1191 		asoc->abandoned_sent[i] = 0;
1192 	}
1193 	/*
1194 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1195 	 * freed later when the association is freed.
1196 	 */
1197 	return (0);
1198 }
1199 
1200 void
1201 sctp_print_mapping_array(struct sctp_association *asoc)
1202 {
1203 	unsigned int i, limit;
1204 
1205 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1206 	    asoc->mapping_array_size,
1207 	    asoc->mapping_array_base_tsn,
1208 	    asoc->cumulative_tsn,
1209 	    asoc->highest_tsn_inside_map,
1210 	    asoc->highest_tsn_inside_nr_map);
1211 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1212 		if (asoc->mapping_array[limit - 1] != 0) {
1213 			break;
1214 		}
1215 	}
1216 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1217 	for (i = 0; i < limit; i++) {
1218 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1219 	}
1220 	if (limit % 16)
1221 		SCTP_PRINTF("\n");
1222 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1223 		if (asoc->nr_mapping_array[limit - 1]) {
1224 			break;
1225 		}
1226 	}
1227 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1228 	for (i = 0; i < limit; i++) {
1229 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1230 	}
1231 	if (limit % 16)
1232 		SCTP_PRINTF("\n");
1233 }
1234 
1235 int
1236 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1237 {
1238 	/* mapping array needs to grow */
1239 	uint8_t *new_array1, *new_array2;
1240 	uint32_t new_size;
1241 
1242 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1243 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1244 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1245 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1246 		/* can't get more, forget it */
1247 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1248 		if (new_array1) {
1249 			SCTP_FREE(new_array1, SCTP_M_MAP);
1250 		}
1251 		if (new_array2) {
1252 			SCTP_FREE(new_array2, SCTP_M_MAP);
1253 		}
1254 		return (-1);
1255 	}
1256 	memset(new_array1, 0, new_size);
1257 	memset(new_array2, 0, new_size);
1258 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1259 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1260 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1261 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1262 	asoc->mapping_array = new_array1;
1263 	asoc->nr_mapping_array = new_array2;
1264 	asoc->mapping_array_size = new_size;
1265 	return (0);
1266 }
1267 
1268 
1269 static void
1270 sctp_iterator_work(struct sctp_iterator *it)
1271 {
1272 	int iteration_count = 0;
1273 	int inp_skip = 0;
1274 	int first_in = 1;
1275 	struct sctp_inpcb *tinp;
1276 
1277 	SCTP_INP_INFO_RLOCK();
1278 	SCTP_ITERATOR_LOCK();
1279 	if (it->inp) {
1280 		SCTP_INP_RLOCK(it->inp);
1281 		SCTP_INP_DECR_REF(it->inp);
1282 	}
1283 	if (it->inp == NULL) {
1284 		/* iterator is complete */
1285 done_with_iterator:
1286 		SCTP_ITERATOR_UNLOCK();
1287 		SCTP_INP_INFO_RUNLOCK();
1288 		if (it->function_atend != NULL) {
1289 			(*it->function_atend) (it->pointer, it->val);
1290 		}
1291 		SCTP_FREE(it, SCTP_M_ITER);
1292 		return;
1293 	}
1294 select_a_new_ep:
1295 	if (first_in) {
1296 		first_in = 0;
1297 	} else {
1298 		SCTP_INP_RLOCK(it->inp);
1299 	}
1300 	while (((it->pcb_flags) &&
1301 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1302 	    ((it->pcb_features) &&
1303 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1304 		/* endpoint flags or features don't match, so keep looking */
1305 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1306 			SCTP_INP_RUNLOCK(it->inp);
1307 			goto done_with_iterator;
1308 		}
1309 		tinp = it->inp;
1310 		it->inp = LIST_NEXT(it->inp, sctp_list);
1311 		SCTP_INP_RUNLOCK(tinp);
1312 		if (it->inp == NULL) {
1313 			goto done_with_iterator;
1314 		}
1315 		SCTP_INP_RLOCK(it->inp);
1316 	}
1317 	/* now go through each assoc which is in the desired state */
1318 	if (it->done_current_ep == 0) {
1319 		if (it->function_inp != NULL)
1320 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1321 		it->done_current_ep = 1;
1322 	}
1323 	if (it->stcb == NULL) {
1324 		/* run the per instance function */
1325 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1326 	}
1327 	if ((inp_skip) || it->stcb == NULL) {
1328 		if (it->function_inp_end != NULL) {
1329 			inp_skip = (*it->function_inp_end) (it->inp,
1330 			    it->pointer,
1331 			    it->val);
1332 		}
1333 		SCTP_INP_RUNLOCK(it->inp);
1334 		goto no_stcb;
1335 	}
1336 	while (it->stcb) {
1337 		SCTP_TCB_LOCK(it->stcb);
1338 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1339 			/* not in the right state... keep looking */
1340 			SCTP_TCB_UNLOCK(it->stcb);
1341 			goto next_assoc;
1342 		}
1343 		/* see if we have limited out the iterator loop */
1344 		iteration_count++;
1345 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1346 			/* Pause to let others grab the lock */
1347 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1348 			SCTP_TCB_UNLOCK(it->stcb);
1349 			SCTP_INP_INCR_REF(it->inp);
1350 			SCTP_INP_RUNLOCK(it->inp);
1351 			SCTP_ITERATOR_UNLOCK();
1352 			SCTP_INP_INFO_RUNLOCK();
1353 			SCTP_INP_INFO_RLOCK();
1354 			SCTP_ITERATOR_LOCK();
1355 			if (sctp_it_ctl.iterator_flags) {
1356 				/* We won't be staying here */
1357 				SCTP_INP_DECR_REF(it->inp);
1358 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1359 				if (sctp_it_ctl.iterator_flags &
1360 				    SCTP_ITERATOR_STOP_CUR_IT) {
1361 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1362 					goto done_with_iterator;
1363 				}
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_INP) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1367 					goto no_stcb;
1368 				}
1369 				/* If we reach here huh? */
1370 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1371 				    sctp_it_ctl.iterator_flags);
1372 				sctp_it_ctl.iterator_flags = 0;
1373 			}
1374 			SCTP_INP_RLOCK(it->inp);
1375 			SCTP_INP_DECR_REF(it->inp);
1376 			SCTP_TCB_LOCK(it->stcb);
1377 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1378 			iteration_count = 0;
1379 		}
1380 		/* run function on this one */
1381 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1382 
1383 		/*
1384 		 * we lie here, it really needs to have its own type but
1385 		 * first I must verify that this won't effect things :-0
1386 		 */
1387 		if (it->no_chunk_output == 0)
1388 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1389 
1390 		SCTP_TCB_UNLOCK(it->stcb);
1391 next_assoc:
1392 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1393 		if (it->stcb == NULL) {
1394 			/* Run last function */
1395 			if (it->function_inp_end != NULL) {
1396 				inp_skip = (*it->function_inp_end) (it->inp,
1397 				    it->pointer,
1398 				    it->val);
1399 			}
1400 		}
1401 	}
1402 	SCTP_INP_RUNLOCK(it->inp);
1403 no_stcb:
1404 	/* done with all assocs on this endpoint, move on to next endpoint */
1405 	it->done_current_ep = 0;
1406 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1407 		it->inp = NULL;
1408 	} else {
1409 		it->inp = LIST_NEXT(it->inp, sctp_list);
1410 	}
1411 	if (it->inp == NULL) {
1412 		goto done_with_iterator;
1413 	}
1414 	goto select_a_new_ep;
1415 }
1416 
1417 void
1418 sctp_iterator_worker(void)
1419 {
1420 	struct sctp_iterator *it, *nit;
1421 
1422 	/* This function is called with the WQ lock in place */
1423 
1424 	sctp_it_ctl.iterator_running = 1;
1425 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1426 		sctp_it_ctl.cur_it = it;
1427 		/* now lets work on this one */
1428 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1429 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1430 		CURVNET_SET(it->vn);
1431 		sctp_iterator_work(it);
1432 		sctp_it_ctl.cur_it = NULL;
1433 		CURVNET_RESTORE();
1434 		SCTP_IPI_ITERATOR_WQ_LOCK();
1435 		/* sa_ignore FREED_MEMORY */
1436 	}
1437 	sctp_it_ctl.iterator_running = 0;
1438 	return;
1439 }
1440 
1441 
1442 static void
1443 sctp_handle_addr_wq(void)
1444 {
1445 	/* deal with the ADDR wq from the rtsock calls */
1446 	struct sctp_laddr *wi, *nwi;
1447 	struct sctp_asconf_iterator *asc;
1448 
1449 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1450 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1451 	if (asc == NULL) {
1452 		/* Try later, no memory */
1453 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1454 		    (struct sctp_inpcb *)NULL,
1455 		    (struct sctp_tcb *)NULL,
1456 		    (struct sctp_nets *)NULL);
1457 		return;
1458 	}
1459 	LIST_INIT(&asc->list_of_work);
1460 	asc->cnt = 0;
1461 
1462 	SCTP_WQ_ADDR_LOCK();
1463 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1464 		LIST_REMOVE(wi, sctp_nxt_addr);
1465 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1466 		asc->cnt++;
1467 	}
1468 	SCTP_WQ_ADDR_UNLOCK();
1469 
1470 	if (asc->cnt == 0) {
1471 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1472 	} else {
1473 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1474 		    sctp_asconf_iterator_stcb,
1475 		    NULL,	/* No ep end for boundall */
1476 		    SCTP_PCB_FLAGS_BOUNDALL,
1477 		    SCTP_PCB_ANY_FEATURES,
1478 		    SCTP_ASOC_ANY_STATE,
1479 		    (void *)asc, 0,
1480 		    sctp_asconf_iterator_end, NULL, 0);
1481 	}
1482 }
1483 
1484 void
1485 sctp_timeout_handler(void *t)
1486 {
1487 	struct sctp_inpcb *inp;
1488 	struct sctp_tcb *stcb;
1489 	struct sctp_nets *net;
1490 	struct sctp_timer *tmr;
1491 	struct mbuf *op_err;
1492 
1493 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1494 	struct socket *so;
1495 
1496 #endif
1497 	int did_output;
1498 
1499 	tmr = (struct sctp_timer *)t;
1500 	inp = (struct sctp_inpcb *)tmr->ep;
1501 	stcb = (struct sctp_tcb *)tmr->tcb;
1502 	net = (struct sctp_nets *)tmr->net;
1503 	CURVNET_SET((struct vnet *)tmr->vnet);
1504 	did_output = 1;
1505 
1506 #ifdef SCTP_AUDITING_ENABLED
1507 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1508 	sctp_auditing(3, inp, stcb, net);
1509 #endif
1510 
1511 	/* sanity checks... */
1512 	if (tmr->self != (void *)tmr) {
1513 		/*
1514 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1515 		 * (void *)tmr);
1516 		 */
1517 		CURVNET_RESTORE();
1518 		return;
1519 	}
1520 	tmr->stopped_from = 0xa001;
1521 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1522 		/*
1523 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1524 		 * tmr->type);
1525 		 */
1526 		CURVNET_RESTORE();
1527 		return;
1528 	}
1529 	tmr->stopped_from = 0xa002;
1530 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1531 		CURVNET_RESTORE();
1532 		return;
1533 	}
1534 	/* if this is an iterator timeout, get the struct and clear inp */
1535 	tmr->stopped_from = 0xa003;
1536 	if (inp) {
1537 		SCTP_INP_INCR_REF(inp);
1538 		if ((inp->sctp_socket == NULL) &&
1539 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1540 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1541 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1542 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1543 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1544 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1545 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1546 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1547 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1548 		    ) {
1549 			SCTP_INP_DECR_REF(inp);
1550 			CURVNET_RESTORE();
1551 			return;
1552 		}
1553 	}
1554 	tmr->stopped_from = 0xa004;
1555 	if (stcb) {
1556 		atomic_add_int(&stcb->asoc.refcnt, 1);
1557 		if (stcb->asoc.state == 0) {
1558 			atomic_add_int(&stcb->asoc.refcnt, -1);
1559 			if (inp) {
1560 				SCTP_INP_DECR_REF(inp);
1561 			}
1562 			CURVNET_RESTORE();
1563 			return;
1564 		}
1565 	}
1566 	tmr->stopped_from = 0xa005;
1567 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1568 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1569 		if (inp) {
1570 			SCTP_INP_DECR_REF(inp);
1571 		}
1572 		if (stcb) {
1573 			atomic_add_int(&stcb->asoc.refcnt, -1);
1574 		}
1575 		CURVNET_RESTORE();
1576 		return;
1577 	}
1578 	tmr->stopped_from = 0xa006;
1579 
1580 	if (stcb) {
1581 		SCTP_TCB_LOCK(stcb);
1582 		atomic_add_int(&stcb->asoc.refcnt, -1);
1583 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1584 		    ((stcb->asoc.state == 0) ||
1585 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1586 			SCTP_TCB_UNLOCK(stcb);
1587 			if (inp) {
1588 				SCTP_INP_DECR_REF(inp);
1589 			}
1590 			CURVNET_RESTORE();
1591 			return;
1592 		}
1593 	}
1594 	/* record in stopped what t-o occured */
1595 	tmr->stopped_from = tmr->type;
1596 
1597 	/* mark as being serviced now */
1598 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1599 		/*
1600 		 * Callout has been rescheduled.
1601 		 */
1602 		goto get_out;
1603 	}
1604 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1605 		/*
1606 		 * Not active, so no action.
1607 		 */
1608 		goto get_out;
1609 	}
1610 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1611 
1612 	/* call the handler for the appropriate timer type */
1613 	switch (tmr->type) {
1614 	case SCTP_TIMER_TYPE_ZERO_COPY:
1615 		if (inp == NULL) {
1616 			break;
1617 		}
1618 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1619 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1620 		}
1621 		break;
1622 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1623 		if (inp == NULL) {
1624 			break;
1625 		}
1626 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1627 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1628 		}
1629 		break;
1630 	case SCTP_TIMER_TYPE_ADDR_WQ:
1631 		sctp_handle_addr_wq();
1632 		break;
1633 	case SCTP_TIMER_TYPE_SEND:
1634 		if ((stcb == NULL) || (inp == NULL)) {
1635 			break;
1636 		}
1637 		SCTP_STAT_INCR(sctps_timodata);
1638 		stcb->asoc.timodata++;
1639 		stcb->asoc.num_send_timers_up--;
1640 		if (stcb->asoc.num_send_timers_up < 0) {
1641 			stcb->asoc.num_send_timers_up = 0;
1642 		}
1643 		SCTP_TCB_LOCK_ASSERT(stcb);
1644 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1645 			/* no need to unlock on tcb its gone */
1646 
1647 			goto out_decr;
1648 		}
1649 		SCTP_TCB_LOCK_ASSERT(stcb);
1650 #ifdef SCTP_AUDITING_ENABLED
1651 		sctp_auditing(4, inp, stcb, net);
1652 #endif
1653 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1654 		if ((stcb->asoc.num_send_timers_up == 0) &&
1655 		    (stcb->asoc.sent_queue_cnt > 0)) {
1656 			struct sctp_tmit_chunk *chk;
1657 
1658 			/*
1659 			 * safeguard. If there on some on the sent queue
1660 			 * somewhere but no timers running something is
1661 			 * wrong... so we start a timer on the first chunk
1662 			 * on the send queue on whatever net it is sent to.
1663 			 */
1664 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1665 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1666 			    chk->whoTo);
1667 		}
1668 		break;
1669 	case SCTP_TIMER_TYPE_INIT:
1670 		if ((stcb == NULL) || (inp == NULL)) {
1671 			break;
1672 		}
1673 		SCTP_STAT_INCR(sctps_timoinit);
1674 		stcb->asoc.timoinit++;
1675 		if (sctp_t1init_timer(inp, stcb, net)) {
1676 			/* no need to unlock on tcb its gone */
1677 			goto out_decr;
1678 		}
1679 		/* We do output but not here */
1680 		did_output = 0;
1681 		break;
1682 	case SCTP_TIMER_TYPE_RECV:
1683 		if ((stcb == NULL) || (inp == NULL)) {
1684 			break;
1685 		}
1686 		SCTP_STAT_INCR(sctps_timosack);
1687 		stcb->asoc.timosack++;
1688 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1689 #ifdef SCTP_AUDITING_ENABLED
1690 		sctp_auditing(4, inp, stcb, net);
1691 #endif
1692 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1693 		break;
1694 	case SCTP_TIMER_TYPE_SHUTDOWN:
1695 		if ((stcb == NULL) || (inp == NULL)) {
1696 			break;
1697 		}
1698 		if (sctp_shutdown_timer(inp, stcb, net)) {
1699 			/* no need to unlock on tcb its gone */
1700 			goto out_decr;
1701 		}
1702 		SCTP_STAT_INCR(sctps_timoshutdown);
1703 		stcb->asoc.timoshutdown++;
1704 #ifdef SCTP_AUDITING_ENABLED
1705 		sctp_auditing(4, inp, stcb, net);
1706 #endif
1707 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1708 		break;
1709 	case SCTP_TIMER_TYPE_HEARTBEAT:
1710 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1711 			break;
1712 		}
1713 		SCTP_STAT_INCR(sctps_timoheartbeat);
1714 		stcb->asoc.timoheartbeat++;
1715 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1716 			/* no need to unlock on tcb its gone */
1717 			goto out_decr;
1718 		}
1719 #ifdef SCTP_AUDITING_ENABLED
1720 		sctp_auditing(4, inp, stcb, net);
1721 #endif
1722 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1723 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1724 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1725 		}
1726 		break;
1727 	case SCTP_TIMER_TYPE_COOKIE:
1728 		if ((stcb == NULL) || (inp == NULL)) {
1729 			break;
1730 		}
1731 		if (sctp_cookie_timer(inp, stcb, net)) {
1732 			/* no need to unlock on tcb its gone */
1733 			goto out_decr;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timocookie);
1736 		stcb->asoc.timocookie++;
1737 #ifdef SCTP_AUDITING_ENABLED
1738 		sctp_auditing(4, inp, stcb, net);
1739 #endif
1740 		/*
1741 		 * We consider T3 and Cookie timer pretty much the same with
1742 		 * respect to where from in chunk_output.
1743 		 */
1744 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1745 		break;
1746 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1747 		{
1748 			struct timeval tv;
1749 			int i, secret;
1750 
1751 			if (inp == NULL) {
1752 				break;
1753 			}
1754 			SCTP_STAT_INCR(sctps_timosecret);
1755 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1756 			SCTP_INP_WLOCK(inp);
1757 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1758 			inp->sctp_ep.last_secret_number =
1759 			    inp->sctp_ep.current_secret_number;
1760 			inp->sctp_ep.current_secret_number++;
1761 			if (inp->sctp_ep.current_secret_number >=
1762 			    SCTP_HOW_MANY_SECRETS) {
1763 				inp->sctp_ep.current_secret_number = 0;
1764 			}
1765 			secret = (int)inp->sctp_ep.current_secret_number;
1766 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1767 				inp->sctp_ep.secret_key[secret][i] =
1768 				    sctp_select_initial_TSN(&inp->sctp_ep);
1769 			}
1770 			SCTP_INP_WUNLOCK(inp);
1771 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1772 		}
1773 		did_output = 0;
1774 		break;
1775 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timopathmtu);
1780 		sctp_pathmtu_timer(inp, stcb, net);
1781 		did_output = 0;
1782 		break;
1783 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1788 			/* no need to unlock on tcb its gone */
1789 			goto out_decr;
1790 		}
1791 		SCTP_STAT_INCR(sctps_timoshutdownack);
1792 		stcb->asoc.timoshutdownack++;
1793 #ifdef SCTP_AUDITING_ENABLED
1794 		sctp_auditing(4, inp, stcb, net);
1795 #endif
1796 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1797 		break;
1798 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1799 		if ((stcb == NULL) || (inp == NULL)) {
1800 			break;
1801 		}
1802 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1803 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1804 		    "Shutdown guard timer expired");
1805 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1806 		/* no need to unlock on tcb its gone */
1807 		goto out_decr;
1808 
1809 	case SCTP_TIMER_TYPE_STRRESET:
1810 		if ((stcb == NULL) || (inp == NULL)) {
1811 			break;
1812 		}
1813 		if (sctp_strreset_timer(inp, stcb, net)) {
1814 			/* no need to unlock on tcb its gone */
1815 			goto out_decr;
1816 		}
1817 		SCTP_STAT_INCR(sctps_timostrmrst);
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_ASCONF:
1821 		if ((stcb == NULL) || (inp == NULL)) {
1822 			break;
1823 		}
1824 		if (sctp_asconf_timer(inp, stcb, net)) {
1825 			/* no need to unlock on tcb its gone */
1826 			goto out_decr;
1827 		}
1828 		SCTP_STAT_INCR(sctps_timoasconf);
1829 #ifdef SCTP_AUDITING_ENABLED
1830 		sctp_auditing(4, inp, stcb, net);
1831 #endif
1832 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1833 		break;
1834 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1835 		if ((stcb == NULL) || (inp == NULL)) {
1836 			break;
1837 		}
1838 		sctp_delete_prim_timer(inp, stcb, net);
1839 		SCTP_STAT_INCR(sctps_timodelprim);
1840 		break;
1841 
1842 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		SCTP_STAT_INCR(sctps_timoautoclose);
1847 		sctp_autoclose_timer(inp, stcb, net);
1848 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1849 		did_output = 0;
1850 		break;
1851 	case SCTP_TIMER_TYPE_ASOCKILL:
1852 		if ((stcb == NULL) || (inp == NULL)) {
1853 			break;
1854 		}
1855 		SCTP_STAT_INCR(sctps_timoassockill);
1856 		/* Can we free it yet? */
1857 		SCTP_INP_DECR_REF(inp);
1858 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1859 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1860 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1861 		so = SCTP_INP_SO(inp);
1862 		atomic_add_int(&stcb->asoc.refcnt, 1);
1863 		SCTP_TCB_UNLOCK(stcb);
1864 		SCTP_SOCKET_LOCK(so, 1);
1865 		SCTP_TCB_LOCK(stcb);
1866 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1867 #endif
1868 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1869 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1870 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1871 		SCTP_SOCKET_UNLOCK(so, 1);
1872 #endif
1873 		/*
1874 		 * free asoc, always unlocks (or destroy's) so prevent
1875 		 * duplicate unlock or unlock of a free mtx :-0
1876 		 */
1877 		stcb = NULL;
1878 		goto out_no_decr;
1879 	case SCTP_TIMER_TYPE_INPKILL:
1880 		SCTP_STAT_INCR(sctps_timoinpkill);
1881 		if (inp == NULL) {
1882 			break;
1883 		}
1884 		/*
1885 		 * special case, take away our increment since WE are the
1886 		 * killer
1887 		 */
1888 		SCTP_INP_DECR_REF(inp);
1889 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1890 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1891 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1892 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1893 		inp = NULL;
1894 		goto out_no_decr;
1895 	default:
1896 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1897 		    tmr->type);
1898 		break;
1899 	}
1900 #ifdef SCTP_AUDITING_ENABLED
1901 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1902 	if (inp)
1903 		sctp_auditing(5, inp, stcb, net);
1904 #endif
1905 	if ((did_output) && stcb) {
1906 		/*
1907 		 * Now we need to clean up the control chunk chain if an
1908 		 * ECNE is on it. It must be marked as UNSENT again so next
1909 		 * call will continue to send it until such time that we get
1910 		 * a CWR, to remove it. It is, however, less likely that we
1911 		 * will find a ecn echo on the chain though.
1912 		 */
1913 		sctp_fix_ecn_echo(&stcb->asoc);
1914 	}
1915 get_out:
1916 	if (stcb) {
1917 		SCTP_TCB_UNLOCK(stcb);
1918 	}
1919 out_decr:
1920 	if (inp) {
1921 		SCTP_INP_DECR_REF(inp);
1922 	}
1923 out_no_decr:
1924 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1925 	    tmr->type);
1926 	CURVNET_RESTORE();
1927 }
1928 
1929 void
1930 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1931     struct sctp_nets *net)
1932 {
1933 	uint32_t to_ticks;
1934 	struct sctp_timer *tmr;
1935 
1936 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1937 		return;
1938 
1939 	tmr = NULL;
1940 	if (stcb) {
1941 		SCTP_TCB_LOCK_ASSERT(stcb);
1942 	}
1943 	switch (t_type) {
1944 	case SCTP_TIMER_TYPE_ZERO_COPY:
1945 		tmr = &inp->sctp_ep.zero_copy_timer;
1946 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1947 		break;
1948 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1949 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1950 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1951 		break;
1952 	case SCTP_TIMER_TYPE_ADDR_WQ:
1953 		/* Only 1 tick away :-) */
1954 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1955 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1956 		break;
1957 	case SCTP_TIMER_TYPE_SEND:
1958 		/* Here we use the RTO timer */
1959 		{
1960 			int rto_val;
1961 
1962 			if ((stcb == NULL) || (net == NULL)) {
1963 				return;
1964 			}
1965 			tmr = &net->rxt_timer;
1966 			if (net->RTO == 0) {
1967 				rto_val = stcb->asoc.initial_rto;
1968 			} else {
1969 				rto_val = net->RTO;
1970 			}
1971 			to_ticks = MSEC_TO_TICKS(rto_val);
1972 		}
1973 		break;
1974 	case SCTP_TIMER_TYPE_INIT:
1975 		/*
1976 		 * Here we use the INIT timer default usually about 1
1977 		 * minute.
1978 		 */
1979 		if ((stcb == NULL) || (net == NULL)) {
1980 			return;
1981 		}
1982 		tmr = &net->rxt_timer;
1983 		if (net->RTO == 0) {
1984 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1985 		} else {
1986 			to_ticks = MSEC_TO_TICKS(net->RTO);
1987 		}
1988 		break;
1989 	case SCTP_TIMER_TYPE_RECV:
1990 		/*
1991 		 * Here we use the Delayed-Ack timer value from the inp
1992 		 * ususually about 200ms.
1993 		 */
1994 		if (stcb == NULL) {
1995 			return;
1996 		}
1997 		tmr = &stcb->asoc.dack_timer;
1998 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1999 		break;
2000 	case SCTP_TIMER_TYPE_SHUTDOWN:
2001 		/* Here we use the RTO of the destination. */
2002 		if ((stcb == NULL) || (net == NULL)) {
2003 			return;
2004 		}
2005 		if (net->RTO == 0) {
2006 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2007 		} else {
2008 			to_ticks = MSEC_TO_TICKS(net->RTO);
2009 		}
2010 		tmr = &net->rxt_timer;
2011 		break;
2012 	case SCTP_TIMER_TYPE_HEARTBEAT:
2013 		/*
2014 		 * the net is used here so that we can add in the RTO. Even
2015 		 * though we use a different timer. We also add the HB timer
2016 		 * PLUS a random jitter.
2017 		 */
2018 		if ((stcb == NULL) || (net == NULL)) {
2019 			return;
2020 		} else {
2021 			uint32_t rndval;
2022 			uint32_t jitter;
2023 
2024 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2025 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2026 				return;
2027 			}
2028 			if (net->RTO == 0) {
2029 				to_ticks = stcb->asoc.initial_rto;
2030 			} else {
2031 				to_ticks = net->RTO;
2032 			}
2033 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2034 			jitter = rndval % to_ticks;
2035 			if (jitter >= (to_ticks >> 1)) {
2036 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2037 			} else {
2038 				to_ticks = to_ticks - jitter;
2039 			}
2040 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2041 			    !(net->dest_state & SCTP_ADDR_PF)) {
2042 				to_ticks += net->heart_beat_delay;
2043 			}
2044 			/*
2045 			 * Now we must convert the to_ticks that are now in
2046 			 * ms to ticks.
2047 			 */
2048 			to_ticks = MSEC_TO_TICKS(to_ticks);
2049 			tmr = &net->hb_timer;
2050 		}
2051 		break;
2052 	case SCTP_TIMER_TYPE_COOKIE:
2053 		/*
2054 		 * Here we can use the RTO timer from the network since one
2055 		 * RTT was compelete. If a retran happened then we will be
2056 		 * using the RTO initial value.
2057 		 */
2058 		if ((stcb == NULL) || (net == NULL)) {
2059 			return;
2060 		}
2061 		if (net->RTO == 0) {
2062 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2063 		} else {
2064 			to_ticks = MSEC_TO_TICKS(net->RTO);
2065 		}
2066 		tmr = &net->rxt_timer;
2067 		break;
2068 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2069 		/*
2070 		 * nothing needed but the endpoint here ususually about 60
2071 		 * minutes.
2072 		 */
2073 		tmr = &inp->sctp_ep.signature_change;
2074 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2075 		break;
2076 	case SCTP_TIMER_TYPE_ASOCKILL:
2077 		if (stcb == NULL) {
2078 			return;
2079 		}
2080 		tmr = &stcb->asoc.strreset_timer;
2081 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2082 		break;
2083 	case SCTP_TIMER_TYPE_INPKILL:
2084 		/*
2085 		 * The inp is setup to die. We re-use the signature_chage
2086 		 * timer since that has stopped and we are in the GONE
2087 		 * state.
2088 		 */
2089 		tmr = &inp->sctp_ep.signature_change;
2090 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2091 		break;
2092 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2093 		/*
2094 		 * Here we use the value found in the EP for PMTU ususually
2095 		 * about 10 minutes.
2096 		 */
2097 		if ((stcb == NULL) || (net == NULL)) {
2098 			return;
2099 		}
2100 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2101 			return;
2102 		}
2103 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2104 		tmr = &net->pmtu_timer;
2105 		break;
2106 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2107 		/* Here we use the RTO of the destination */
2108 		if ((stcb == NULL) || (net == NULL)) {
2109 			return;
2110 		}
2111 		if (net->RTO == 0) {
2112 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2113 		} else {
2114 			to_ticks = MSEC_TO_TICKS(net->RTO);
2115 		}
2116 		tmr = &net->rxt_timer;
2117 		break;
2118 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2119 		/*
2120 		 * Here we use the endpoints shutdown guard timer usually
2121 		 * about 3 minutes.
2122 		 */
2123 		if (stcb == NULL) {
2124 			return;
2125 		}
2126 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2127 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2128 		} else {
2129 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2130 		}
2131 		tmr = &stcb->asoc.shut_guard_timer;
2132 		break;
2133 	case SCTP_TIMER_TYPE_STRRESET:
2134 		/*
2135 		 * Here the timer comes from the stcb but its value is from
2136 		 * the net's RTO.
2137 		 */
2138 		if ((stcb == NULL) || (net == NULL)) {
2139 			return;
2140 		}
2141 		if (net->RTO == 0) {
2142 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2143 		} else {
2144 			to_ticks = MSEC_TO_TICKS(net->RTO);
2145 		}
2146 		tmr = &stcb->asoc.strreset_timer;
2147 		break;
2148 	case SCTP_TIMER_TYPE_ASCONF:
2149 		/*
2150 		 * Here the timer comes from the stcb but its value is from
2151 		 * the net's RTO.
2152 		 */
2153 		if ((stcb == NULL) || (net == NULL)) {
2154 			return;
2155 		}
2156 		if (net->RTO == 0) {
2157 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2158 		} else {
2159 			to_ticks = MSEC_TO_TICKS(net->RTO);
2160 		}
2161 		tmr = &stcb->asoc.asconf_timer;
2162 		break;
2163 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2164 		if ((stcb == NULL) || (net != NULL)) {
2165 			return;
2166 		}
2167 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		tmr = &stcb->asoc.delete_prim_timer;
2169 		break;
2170 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2171 		if (stcb == NULL) {
2172 			return;
2173 		}
2174 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2175 			/*
2176 			 * Really an error since stcb is NOT set to
2177 			 * autoclose
2178 			 */
2179 			return;
2180 		}
2181 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2182 		tmr = &stcb->asoc.autoclose_timer;
2183 		break;
2184 	default:
2185 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2186 		    __FUNCTION__, t_type);
2187 		return;
2188 		break;
2189 	}
2190 	if ((to_ticks <= 0) || (tmr == NULL)) {
2191 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2192 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2193 		return;
2194 	}
2195 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2196 		/*
2197 		 * we do NOT allow you to have it already running. if it is
2198 		 * we leave the current one up unchanged
2199 		 */
2200 		return;
2201 	}
2202 	/* At this point we can proceed */
2203 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2204 		stcb->asoc.num_send_timers_up++;
2205 	}
2206 	tmr->stopped_from = 0;
2207 	tmr->type = t_type;
2208 	tmr->ep = (void *)inp;
2209 	tmr->tcb = (void *)stcb;
2210 	tmr->net = (void *)net;
2211 	tmr->self = (void *)tmr;
2212 	tmr->vnet = (void *)curvnet;
2213 	tmr->ticks = sctp_get_tick_count();
2214 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2215 	return;
2216 }
2217 
2218 void
2219 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2220     struct sctp_nets *net, uint32_t from)
2221 {
2222 	struct sctp_timer *tmr;
2223 
2224 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2225 	    (inp == NULL))
2226 		return;
2227 
2228 	tmr = NULL;
2229 	if (stcb) {
2230 		SCTP_TCB_LOCK_ASSERT(stcb);
2231 	}
2232 	switch (t_type) {
2233 	case SCTP_TIMER_TYPE_ZERO_COPY:
2234 		tmr = &inp->sctp_ep.zero_copy_timer;
2235 		break;
2236 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2237 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_ADDR_WQ:
2240 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2241 		break;
2242 	case SCTP_TIMER_TYPE_SEND:
2243 		if ((stcb == NULL) || (net == NULL)) {
2244 			return;
2245 		}
2246 		tmr = &net->rxt_timer;
2247 		break;
2248 	case SCTP_TIMER_TYPE_INIT:
2249 		if ((stcb == NULL) || (net == NULL)) {
2250 			return;
2251 		}
2252 		tmr = &net->rxt_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_RECV:
2255 		if (stcb == NULL) {
2256 			return;
2257 		}
2258 		tmr = &stcb->asoc.dack_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_SHUTDOWN:
2261 		if ((stcb == NULL) || (net == NULL)) {
2262 			return;
2263 		}
2264 		tmr = &net->rxt_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_HEARTBEAT:
2267 		if ((stcb == NULL) || (net == NULL)) {
2268 			return;
2269 		}
2270 		tmr = &net->hb_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_COOKIE:
2273 		if ((stcb == NULL) || (net == NULL)) {
2274 			return;
2275 		}
2276 		tmr = &net->rxt_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2279 		/* nothing needed but the endpoint here */
2280 		tmr = &inp->sctp_ep.signature_change;
2281 		/*
2282 		 * We re-use the newcookie timer for the INP kill timer. We
2283 		 * must assure that we do not kill it by accident.
2284 		 */
2285 		break;
2286 	case SCTP_TIMER_TYPE_ASOCKILL:
2287 		/*
2288 		 * Stop the asoc kill timer.
2289 		 */
2290 		if (stcb == NULL) {
2291 			return;
2292 		}
2293 		tmr = &stcb->asoc.strreset_timer;
2294 		break;
2295 
2296 	case SCTP_TIMER_TYPE_INPKILL:
2297 		/*
2298 		 * The inp is setup to die. We re-use the signature_chage
2299 		 * timer since that has stopped and we are in the GONE
2300 		 * state.
2301 		 */
2302 		tmr = &inp->sctp_ep.signature_change;
2303 		break;
2304 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2305 		if ((stcb == NULL) || (net == NULL)) {
2306 			return;
2307 		}
2308 		tmr = &net->pmtu_timer;
2309 		break;
2310 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2311 		if ((stcb == NULL) || (net == NULL)) {
2312 			return;
2313 		}
2314 		tmr = &net->rxt_timer;
2315 		break;
2316 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2317 		if (stcb == NULL) {
2318 			return;
2319 		}
2320 		tmr = &stcb->asoc.shut_guard_timer;
2321 		break;
2322 	case SCTP_TIMER_TYPE_STRRESET:
2323 		if (stcb == NULL) {
2324 			return;
2325 		}
2326 		tmr = &stcb->asoc.strreset_timer;
2327 		break;
2328 	case SCTP_TIMER_TYPE_ASCONF:
2329 		if (stcb == NULL) {
2330 			return;
2331 		}
2332 		tmr = &stcb->asoc.asconf_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2335 		if (stcb == NULL) {
2336 			return;
2337 		}
2338 		tmr = &stcb->asoc.delete_prim_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2341 		if (stcb == NULL) {
2342 			return;
2343 		}
2344 		tmr = &stcb->asoc.autoclose_timer;
2345 		break;
2346 	default:
2347 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2348 		    __FUNCTION__, t_type);
2349 		break;
2350 	}
2351 	if (tmr == NULL) {
2352 		return;
2353 	}
2354 	if ((tmr->type != t_type) && tmr->type) {
2355 		/*
2356 		 * Ok we have a timer that is under joint use. Cookie timer
2357 		 * per chance with the SEND timer. We therefore are NOT
2358 		 * running the timer that the caller wants stopped.  So just
2359 		 * return.
2360 		 */
2361 		return;
2362 	}
2363 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2364 		stcb->asoc.num_send_timers_up--;
2365 		if (stcb->asoc.num_send_timers_up < 0) {
2366 			stcb->asoc.num_send_timers_up = 0;
2367 		}
2368 	}
2369 	tmr->self = NULL;
2370 	tmr->stopped_from = from;
2371 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2372 	return;
2373 }
2374 
2375 uint32_t
2376 sctp_calculate_len(struct mbuf *m)
2377 {
2378 	uint32_t tlen = 0;
2379 	struct mbuf *at;
2380 
2381 	at = m;
2382 	while (at) {
2383 		tlen += SCTP_BUF_LEN(at);
2384 		at = SCTP_BUF_NEXT(at);
2385 	}
2386 	return (tlen);
2387 }
2388 
2389 void
2390 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2391     struct sctp_association *asoc, uint32_t mtu)
2392 {
2393 	/*
2394 	 * Reset the P-MTU size on this association, this involves changing
2395 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2396 	 * allow the DF flag to be cleared.
2397 	 */
2398 	struct sctp_tmit_chunk *chk;
2399 	unsigned int eff_mtu, ovh;
2400 
2401 	asoc->smallest_mtu = mtu;
2402 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2403 		ovh = SCTP_MIN_OVERHEAD;
2404 	} else {
2405 		ovh = SCTP_MIN_V4_OVERHEAD;
2406 	}
2407 	eff_mtu = mtu - ovh;
2408 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2409 		if (chk->send_size > eff_mtu) {
2410 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2411 		}
2412 	}
2413 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2414 		if (chk->send_size > eff_mtu) {
2415 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2416 		}
2417 	}
2418 }
2419 
2420 
2421 /*
2422  * given an association and starting time of the current RTT period return
2423  * RTO in number of msecs net should point to the current network
2424  */
2425 
2426 uint32_t
2427 sctp_calculate_rto(struct sctp_tcb *stcb,
2428     struct sctp_association *asoc,
2429     struct sctp_nets *net,
2430     struct timeval *told,
2431     int safe, int rtt_from_sack)
2432 {
2433 	/*-
2434 	 * given an association and the starting time of the current RTT
2435 	 * period (in value1/value2) return RTO in number of msecs.
2436 	 */
2437 	int32_t rtt;		/* RTT in ms */
2438 	uint32_t new_rto;
2439 	int first_measure = 0;
2440 	struct timeval now, then, *old;
2441 
2442 	/* Copy it out for sparc64 */
2443 	if (safe == sctp_align_unsafe_makecopy) {
2444 		old = &then;
2445 		memcpy(&then, told, sizeof(struct timeval));
2446 	} else if (safe == sctp_align_safe_nocopy) {
2447 		old = told;
2448 	} else {
2449 		/* error */
2450 		SCTP_PRINTF("Huh, bad rto calc call\n");
2451 		return (0);
2452 	}
2453 	/************************/
2454 	/* 1. calculate new RTT */
2455 	/************************/
2456 	/* get the current time */
2457 	if (stcb->asoc.use_precise_time) {
2458 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2459 	} else {
2460 		(void)SCTP_GETTIME_TIMEVAL(&now);
2461 	}
2462 	timevalsub(&now, old);
2463 	/* store the current RTT in us */
2464 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2465 	        (uint64_t) now.tv_usec;
2466 
2467 	/* compute rtt in ms */
2468 	rtt = (int32_t) (net->rtt / 1000);
2469 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2470 		/*
2471 		 * Tell the CC module that a new update has just occurred
2472 		 * from a sack
2473 		 */
2474 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2475 	}
2476 	/*
2477 	 * Do we need to determine the lan? We do this only on sacks i.e.
2478 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2479 	 */
2480 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2481 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2482 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2483 			net->lan_type = SCTP_LAN_INTERNET;
2484 		} else {
2485 			net->lan_type = SCTP_LAN_LOCAL;
2486 		}
2487 	}
2488 	/***************************/
2489 	/* 2. update RTTVAR & SRTT */
2490 	/***************************/
2491 	/*-
2492 	 * Compute the scaled average lastsa and the
2493 	 * scaled variance lastsv as described in van Jacobson
2494 	 * Paper "Congestion Avoidance and Control", Annex A.
2495 	 *
2496 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2497 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2498 	 */
2499 	if (net->RTO_measured) {
2500 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2501 		net->lastsa += rtt;
2502 		if (rtt < 0) {
2503 			rtt = -rtt;
2504 		}
2505 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2506 		net->lastsv += rtt;
2507 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2508 			rto_logging(net, SCTP_LOG_RTTVAR);
2509 		}
2510 	} else {
2511 		/* First RTO measurment */
2512 		net->RTO_measured = 1;
2513 		first_measure = 1;
2514 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2515 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2516 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2517 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2518 		}
2519 	}
2520 	if (net->lastsv == 0) {
2521 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2522 	}
2523 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2524 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2525 	    (stcb->asoc.sat_network_lockout == 0)) {
2526 		stcb->asoc.sat_network = 1;
2527 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2528 		stcb->asoc.sat_network = 0;
2529 		stcb->asoc.sat_network_lockout = 1;
2530 	}
2531 	/* bound it, per C6/C7 in Section 5.3.1 */
2532 	if (new_rto < stcb->asoc.minrto) {
2533 		new_rto = stcb->asoc.minrto;
2534 	}
2535 	if (new_rto > stcb->asoc.maxrto) {
2536 		new_rto = stcb->asoc.maxrto;
2537 	}
2538 	/* we are now returning the RTO */
2539 	return (new_rto);
2540 }
2541 
2542 /*
2543  * return a pointer to a contiguous piece of data from the given mbuf chain
2544  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2545  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2546  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2547  */
2548 caddr_t
2549 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2550 {
2551 	uint32_t count;
2552 	uint8_t *ptr;
2553 
2554 	ptr = in_ptr;
2555 	if ((off < 0) || (len <= 0))
2556 		return (NULL);
2557 
2558 	/* find the desired start location */
2559 	while ((m != NULL) && (off > 0)) {
2560 		if (off < SCTP_BUF_LEN(m))
2561 			break;
2562 		off -= SCTP_BUF_LEN(m);
2563 		m = SCTP_BUF_NEXT(m);
2564 	}
2565 	if (m == NULL)
2566 		return (NULL);
2567 
2568 	/* is the current mbuf large enough (eg. contiguous)? */
2569 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2570 		return (mtod(m, caddr_t)+off);
2571 	} else {
2572 		/* else, it spans more than one mbuf, so save a temp copy... */
2573 		while ((m != NULL) && (len > 0)) {
2574 			count = min(SCTP_BUF_LEN(m) - off, len);
2575 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2576 			len -= count;
2577 			ptr += count;
2578 			off = 0;
2579 			m = SCTP_BUF_NEXT(m);
2580 		}
2581 		if ((m == NULL) && (len > 0))
2582 			return (NULL);
2583 		else
2584 			return ((caddr_t)in_ptr);
2585 	}
2586 }
2587 
2588 
2589 
2590 struct sctp_paramhdr *
2591 sctp_get_next_param(struct mbuf *m,
2592     int offset,
2593     struct sctp_paramhdr *pull,
2594     int pull_limit)
2595 {
2596 	/* This just provides a typed signature to Peter's Pull routine */
2597 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2598 	    (uint8_t *) pull));
2599 }
2600 
2601 
2602 struct mbuf *
2603 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2604 {
2605 	struct mbuf *m_last;
2606 	caddr_t dp;
2607 
2608 	if (padlen > 3) {
2609 		return (NULL);
2610 	}
2611 	if (padlen <= M_TRAILINGSPACE(m)) {
2612 		/*
2613 		 * The easy way. We hope the majority of the time we hit
2614 		 * here :)
2615 		 */
2616 		m_last = m;
2617 	} else {
2618 		/* Hard way we must grow the mbuf chain */
2619 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2620 		if (m_last == NULL) {
2621 			return (NULL);
2622 		}
2623 		SCTP_BUF_LEN(m_last) = 0;
2624 		SCTP_BUF_NEXT(m_last) = NULL;
2625 		SCTP_BUF_NEXT(m) = m_last;
2626 	}
2627 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2628 	SCTP_BUF_LEN(m_last) += padlen;
2629 	memset(dp, 0, padlen);
2630 	return (m_last);
2631 }
2632 
2633 struct mbuf *
2634 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2635 {
2636 	/* find the last mbuf in chain and pad it */
2637 	struct mbuf *m_at;
2638 
2639 	if (last_mbuf != NULL) {
2640 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2641 	} else {
2642 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2643 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2644 				return (sctp_add_pad_tombuf(m_at, padval));
2645 			}
2646 		}
2647 	}
2648 	return (NULL);
2649 }
2650 
2651 static void
2652 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2653     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2654 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2655     SCTP_UNUSED
2656 #endif
2657 )
2658 {
2659 	struct mbuf *m_notify;
2660 	struct sctp_assoc_change *sac;
2661 	struct sctp_queued_to_read *control;
2662 	size_t notif_len, abort_len;
2663 	unsigned int i;
2664 
2665 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2666 	struct socket *so;
2667 
2668 #endif
2669 
2670 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2671 		notif_len = sizeof(struct sctp_assoc_change);
2672 		if (abort != NULL) {
2673 			abort_len = ntohs(abort->ch.chunk_length);
2674 		} else {
2675 			abort_len = 0;
2676 		}
2677 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2678 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2679 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2680 			notif_len += abort_len;
2681 		}
2682 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2683 		if (m_notify == NULL) {
2684 			/* Retry with smaller value. */
2685 			notif_len = sizeof(struct sctp_assoc_change);
2686 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2687 			if (m_notify == NULL) {
2688 				goto set_error;
2689 			}
2690 		}
2691 		SCTP_BUF_NEXT(m_notify) = NULL;
2692 		sac = mtod(m_notify, struct sctp_assoc_change *);
2693 		memset(sac, 0, notif_len);
2694 		sac->sac_type = SCTP_ASSOC_CHANGE;
2695 		sac->sac_flags = 0;
2696 		sac->sac_length = sizeof(struct sctp_assoc_change);
2697 		sac->sac_state = state;
2698 		sac->sac_error = error;
2699 		/* XXX verify these stream counts */
2700 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2701 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2702 		sac->sac_assoc_id = sctp_get_associd(stcb);
2703 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2704 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2705 				i = 0;
2706 				if (stcb->asoc.prsctp_supported == 1) {
2707 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2708 				}
2709 				if (stcb->asoc.auth_supported == 1) {
2710 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2711 				}
2712 				if (stcb->asoc.asconf_supported == 1) {
2713 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2714 				}
2715 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2716 				if (stcb->asoc.reconfig_supported == 1) {
2717 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2718 				}
2719 				sac->sac_length += i;
2720 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2721 				memcpy(sac->sac_info, abort, abort_len);
2722 				sac->sac_length += abort_len;
2723 			}
2724 		}
2725 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2726 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2727 		    0, 0, stcb->asoc.context, 0, 0, 0,
2728 		    m_notify);
2729 		if (control != NULL) {
2730 			control->length = SCTP_BUF_LEN(m_notify);
2731 			/* not that we need this */
2732 			control->tail_mbuf = m_notify;
2733 			control->spec_flags = M_NOTIFICATION;
2734 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2735 			    control,
2736 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2737 			    so_locked);
2738 		} else {
2739 			sctp_m_freem(m_notify);
2740 		}
2741 	}
2742 	/*
2743 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2744 	 * comes in.
2745 	 */
2746 set_error:
2747 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2748 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2749 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2750 		SOCK_LOCK(stcb->sctp_socket);
2751 		if (from_peer) {
2752 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2753 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2754 				stcb->sctp_socket->so_error = ECONNREFUSED;
2755 			} else {
2756 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2757 				stcb->sctp_socket->so_error = ECONNRESET;
2758 			}
2759 		} else {
2760 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2761 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2762 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2763 				stcb->sctp_socket->so_error = ETIMEDOUT;
2764 			} else {
2765 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2766 				stcb->sctp_socket->so_error = ECONNABORTED;
2767 			}
2768 		}
2769 	}
2770 	/* Wake ANY sleepers */
2771 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2772 	so = SCTP_INP_SO(stcb->sctp_ep);
2773 	if (!so_locked) {
2774 		atomic_add_int(&stcb->asoc.refcnt, 1);
2775 		SCTP_TCB_UNLOCK(stcb);
2776 		SCTP_SOCKET_LOCK(so, 1);
2777 		SCTP_TCB_LOCK(stcb);
2778 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2779 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2780 			SCTP_SOCKET_UNLOCK(so, 1);
2781 			return;
2782 		}
2783 	}
2784 #endif
2785 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2786 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2787 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2788 		socantrcvmore_locked(stcb->sctp_socket);
2789 	}
2790 	sorwakeup(stcb->sctp_socket);
2791 	sowwakeup(stcb->sctp_socket);
2792 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2793 	if (!so_locked) {
2794 		SCTP_SOCKET_UNLOCK(so, 1);
2795 	}
2796 #endif
2797 }
2798 
2799 static void
2800 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2801     struct sockaddr *sa, uint32_t error, int so_locked
2802 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2803     SCTP_UNUSED
2804 #endif
2805 )
2806 {
2807 	struct mbuf *m_notify;
2808 	struct sctp_paddr_change *spc;
2809 	struct sctp_queued_to_read *control;
2810 
2811 	if ((stcb == NULL) ||
2812 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2813 		/* event not enabled */
2814 		return;
2815 	}
2816 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2817 	if (m_notify == NULL)
2818 		return;
2819 	SCTP_BUF_LEN(m_notify) = 0;
2820 	spc = mtod(m_notify, struct sctp_paddr_change *);
2821 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2822 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2823 	spc->spc_flags = 0;
2824 	spc->spc_length = sizeof(struct sctp_paddr_change);
2825 	switch (sa->sa_family) {
2826 #ifdef INET
2827 	case AF_INET:
2828 #ifdef INET6
2829 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2830 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2831 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2832 		} else {
2833 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2834 		}
2835 #else
2836 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2837 #endif
2838 		break;
2839 #endif
2840 #ifdef INET6
2841 	case AF_INET6:
2842 		{
2843 			struct sockaddr_in6 *sin6;
2844 
2845 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2846 
2847 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2848 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2849 				if (sin6->sin6_scope_id == 0) {
2850 					/* recover scope_id for user */
2851 					(void)sa6_recoverscope(sin6);
2852 				} else {
2853 					/* clear embedded scope_id for user */
2854 					in6_clearscope(&sin6->sin6_addr);
2855 				}
2856 			}
2857 			break;
2858 		}
2859 #endif
2860 	default:
2861 		/* TSNH */
2862 		break;
2863 	}
2864 	spc->spc_state = state;
2865 	spc->spc_error = error;
2866 	spc->spc_assoc_id = sctp_get_associd(stcb);
2867 
2868 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2869 	SCTP_BUF_NEXT(m_notify) = NULL;
2870 
2871 	/* append to socket */
2872 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2873 	    0, 0, stcb->asoc.context, 0, 0, 0,
2874 	    m_notify);
2875 	if (control == NULL) {
2876 		/* no memory */
2877 		sctp_m_freem(m_notify);
2878 		return;
2879 	}
2880 	control->length = SCTP_BUF_LEN(m_notify);
2881 	control->spec_flags = M_NOTIFICATION;
2882 	/* not that we need this */
2883 	control->tail_mbuf = m_notify;
2884 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2885 	    control,
2886 	    &stcb->sctp_socket->so_rcv, 1,
2887 	    SCTP_READ_LOCK_NOT_HELD,
2888 	    so_locked);
2889 }
2890 
2891 
2892 static void
2893 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2894     struct sctp_tmit_chunk *chk, int so_locked
2895 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2896     SCTP_UNUSED
2897 #endif
2898 )
2899 {
2900 	struct mbuf *m_notify;
2901 	struct sctp_send_failed *ssf;
2902 	struct sctp_send_failed_event *ssfe;
2903 	struct sctp_queued_to_read *control;
2904 	int length;
2905 
2906 	if ((stcb == NULL) ||
2907 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2908 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2909 		/* event not enabled */
2910 		return;
2911 	}
2912 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2913 		length = sizeof(struct sctp_send_failed_event);
2914 	} else {
2915 		length = sizeof(struct sctp_send_failed);
2916 	}
2917 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2918 	if (m_notify == NULL)
2919 		/* no space left */
2920 		return;
2921 	SCTP_BUF_LEN(m_notify) = 0;
2922 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2923 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2924 		memset(ssfe, 0, length);
2925 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2926 		if (sent) {
2927 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2928 		} else {
2929 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2930 		}
2931 		length += chk->send_size;
2932 		length -= sizeof(struct sctp_data_chunk);
2933 		ssfe->ssfe_length = length;
2934 		ssfe->ssfe_error = error;
2935 		/* not exactly what the user sent in, but should be close :) */
2936 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2937 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2938 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2939 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2940 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2941 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2942 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2943 	} else {
2944 		ssf = mtod(m_notify, struct sctp_send_failed *);
2945 		memset(ssf, 0, length);
2946 		ssf->ssf_type = SCTP_SEND_FAILED;
2947 		if (sent) {
2948 			ssf->ssf_flags = SCTP_DATA_SENT;
2949 		} else {
2950 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2951 		}
2952 		length += chk->send_size;
2953 		length -= sizeof(struct sctp_data_chunk);
2954 		ssf->ssf_length = length;
2955 		ssf->ssf_error = error;
2956 		/* not exactly what the user sent in, but should be close :) */
2957 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2958 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2959 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2960 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2961 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2962 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2963 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2964 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2965 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2966 	}
2967 	if (chk->data) {
2968 		/*
2969 		 * trim off the sctp chunk header(it should be there)
2970 		 */
2971 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2972 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2973 			sctp_mbuf_crush(chk->data);
2974 			chk->send_size -= sizeof(struct sctp_data_chunk);
2975 		}
2976 	}
2977 	SCTP_BUF_NEXT(m_notify) = chk->data;
2978 	/* Steal off the mbuf */
2979 	chk->data = NULL;
2980 	/*
2981 	 * For this case, we check the actual socket buffer, since the assoc
2982 	 * is going away we don't want to overfill the socket buffer for a
2983 	 * non-reader
2984 	 */
2985 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2986 		sctp_m_freem(m_notify);
2987 		return;
2988 	}
2989 	/* append to socket */
2990 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2991 	    0, 0, stcb->asoc.context, 0, 0, 0,
2992 	    m_notify);
2993 	if (control == NULL) {
2994 		/* no memory */
2995 		sctp_m_freem(m_notify);
2996 		return;
2997 	}
2998 	control->spec_flags = M_NOTIFICATION;
2999 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3000 	    control,
3001 	    &stcb->sctp_socket->so_rcv, 1,
3002 	    SCTP_READ_LOCK_NOT_HELD,
3003 	    so_locked);
3004 }
3005 
3006 
3007 static void
3008 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3009     struct sctp_stream_queue_pending *sp, int so_locked
3010 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3011     SCTP_UNUSED
3012 #endif
3013 )
3014 {
3015 	struct mbuf *m_notify;
3016 	struct sctp_send_failed *ssf;
3017 	struct sctp_send_failed_event *ssfe;
3018 	struct sctp_queued_to_read *control;
3019 	int length;
3020 
3021 	if ((stcb == NULL) ||
3022 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3023 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3024 		/* event not enabled */
3025 		return;
3026 	}
3027 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3028 		length = sizeof(struct sctp_send_failed_event);
3029 	} else {
3030 		length = sizeof(struct sctp_send_failed);
3031 	}
3032 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3033 	if (m_notify == NULL) {
3034 		/* no space left */
3035 		return;
3036 	}
3037 	SCTP_BUF_LEN(m_notify) = 0;
3038 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3039 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3040 		memset(ssfe, 0, length);
3041 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3042 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3043 		length += sp->length;
3044 		ssfe->ssfe_length = length;
3045 		ssfe->ssfe_error = error;
3046 		/* not exactly what the user sent in, but should be close :) */
3047 		ssfe->ssfe_info.snd_sid = sp->stream;
3048 		if (sp->some_taken) {
3049 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3050 		} else {
3051 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3052 		}
3053 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3054 		ssfe->ssfe_info.snd_context = sp->context;
3055 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3056 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3057 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3058 	} else {
3059 		ssf = mtod(m_notify, struct sctp_send_failed *);
3060 		memset(ssf, 0, length);
3061 		ssf->ssf_type = SCTP_SEND_FAILED;
3062 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3063 		length += sp->length;
3064 		ssf->ssf_length = length;
3065 		ssf->ssf_error = error;
3066 		/* not exactly what the user sent in, but should be close :) */
3067 		ssf->ssf_info.sinfo_stream = sp->stream;
3068 		ssf->ssf_info.sinfo_ssn = 0;
3069 		if (sp->some_taken) {
3070 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3071 		} else {
3072 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3073 		}
3074 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3075 		ssf->ssf_info.sinfo_context = sp->context;
3076 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3077 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3078 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3079 	}
3080 	SCTP_BUF_NEXT(m_notify) = sp->data;
3081 
3082 	/* Steal off the mbuf */
3083 	sp->data = NULL;
3084 	/*
3085 	 * For this case, we check the actual socket buffer, since the assoc
3086 	 * is going away we don't want to overfill the socket buffer for a
3087 	 * non-reader
3088 	 */
3089 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3090 		sctp_m_freem(m_notify);
3091 		return;
3092 	}
3093 	/* append to socket */
3094 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3095 	    0, 0, stcb->asoc.context, 0, 0, 0,
3096 	    m_notify);
3097 	if (control == NULL) {
3098 		/* no memory */
3099 		sctp_m_freem(m_notify);
3100 		return;
3101 	}
3102 	control->spec_flags = M_NOTIFICATION;
3103 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3104 	    control,
3105 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3106 }
3107 
3108 
3109 
3110 static void
3111 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3112 {
3113 	struct mbuf *m_notify;
3114 	struct sctp_adaptation_event *sai;
3115 	struct sctp_queued_to_read *control;
3116 
3117 	if ((stcb == NULL) ||
3118 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3119 		/* event not enabled */
3120 		return;
3121 	}
3122 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3123 	if (m_notify == NULL)
3124 		/* no space left */
3125 		return;
3126 	SCTP_BUF_LEN(m_notify) = 0;
3127 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3128 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3129 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3130 	sai->sai_flags = 0;
3131 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3132 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3133 	sai->sai_assoc_id = sctp_get_associd(stcb);
3134 
3135 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3136 	SCTP_BUF_NEXT(m_notify) = NULL;
3137 
3138 	/* append to socket */
3139 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3140 	    0, 0, stcb->asoc.context, 0, 0, 0,
3141 	    m_notify);
3142 	if (control == NULL) {
3143 		/* no memory */
3144 		sctp_m_freem(m_notify);
3145 		return;
3146 	}
3147 	control->length = SCTP_BUF_LEN(m_notify);
3148 	control->spec_flags = M_NOTIFICATION;
3149 	/* not that we need this */
3150 	control->tail_mbuf = m_notify;
3151 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3152 	    control,
3153 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3154 }
3155 
3156 /* This always must be called with the read-queue LOCKED in the INP */
3157 static void
3158 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3159     uint32_t val, int so_locked
3160 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3161     SCTP_UNUSED
3162 #endif
3163 )
3164 {
3165 	struct mbuf *m_notify;
3166 	struct sctp_pdapi_event *pdapi;
3167 	struct sctp_queued_to_read *control;
3168 	struct sockbuf *sb;
3169 
3170 	if ((stcb == NULL) ||
3171 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3172 		/* event not enabled */
3173 		return;
3174 	}
3175 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3176 		return;
3177 	}
3178 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3179 	if (m_notify == NULL)
3180 		/* no space left */
3181 		return;
3182 	SCTP_BUF_LEN(m_notify) = 0;
3183 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3184 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3185 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3186 	pdapi->pdapi_flags = 0;
3187 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3188 	pdapi->pdapi_indication = error;
3189 	pdapi->pdapi_stream = (val >> 16);
3190 	pdapi->pdapi_seq = (val & 0x0000ffff);
3191 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3192 
3193 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3194 	SCTP_BUF_NEXT(m_notify) = NULL;
3195 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3196 	    0, 0, stcb->asoc.context, 0, 0, 0,
3197 	    m_notify);
3198 	if (control == NULL) {
3199 		/* no memory */
3200 		sctp_m_freem(m_notify);
3201 		return;
3202 	}
3203 	control->spec_flags = M_NOTIFICATION;
3204 	control->length = SCTP_BUF_LEN(m_notify);
3205 	/* not that we need this */
3206 	control->tail_mbuf = m_notify;
3207 	control->held_length = 0;
3208 	control->length = 0;
3209 	sb = &stcb->sctp_socket->so_rcv;
3210 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3211 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3212 	}
3213 	sctp_sballoc(stcb, sb, m_notify);
3214 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3215 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3216 	}
3217 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3218 	control->end_added = 1;
3219 	if (stcb->asoc.control_pdapi)
3220 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3221 	else {
3222 		/* we really should not see this case */
3223 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3224 	}
3225 	if (stcb->sctp_ep && stcb->sctp_socket) {
3226 		/* This should always be the case */
3227 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3228 		struct socket *so;
3229 
3230 		so = SCTP_INP_SO(stcb->sctp_ep);
3231 		if (!so_locked) {
3232 			atomic_add_int(&stcb->asoc.refcnt, 1);
3233 			SCTP_TCB_UNLOCK(stcb);
3234 			SCTP_SOCKET_LOCK(so, 1);
3235 			SCTP_TCB_LOCK(stcb);
3236 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3237 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3238 				SCTP_SOCKET_UNLOCK(so, 1);
3239 				return;
3240 			}
3241 		}
3242 #endif
3243 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3244 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3245 		if (!so_locked) {
3246 			SCTP_SOCKET_UNLOCK(so, 1);
3247 		}
3248 #endif
3249 	}
3250 }
3251 
3252 static void
3253 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3254 {
3255 	struct mbuf *m_notify;
3256 	struct sctp_shutdown_event *sse;
3257 	struct sctp_queued_to_read *control;
3258 
3259 	/*
3260 	 * For TCP model AND UDP connected sockets we will send an error up
3261 	 * when an SHUTDOWN completes
3262 	 */
3263 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3264 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3265 		/* mark socket closed for read/write and wakeup! */
3266 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3267 		struct socket *so;
3268 
3269 		so = SCTP_INP_SO(stcb->sctp_ep);
3270 		atomic_add_int(&stcb->asoc.refcnt, 1);
3271 		SCTP_TCB_UNLOCK(stcb);
3272 		SCTP_SOCKET_LOCK(so, 1);
3273 		SCTP_TCB_LOCK(stcb);
3274 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3275 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3276 			SCTP_SOCKET_UNLOCK(so, 1);
3277 			return;
3278 		}
3279 #endif
3280 		socantsendmore(stcb->sctp_socket);
3281 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3282 		SCTP_SOCKET_UNLOCK(so, 1);
3283 #endif
3284 	}
3285 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3286 		/* event not enabled */
3287 		return;
3288 	}
3289 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3290 	if (m_notify == NULL)
3291 		/* no space left */
3292 		return;
3293 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3294 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3295 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3296 	sse->sse_flags = 0;
3297 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3298 	sse->sse_assoc_id = sctp_get_associd(stcb);
3299 
3300 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3301 	SCTP_BUF_NEXT(m_notify) = NULL;
3302 
3303 	/* append to socket */
3304 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3305 	    0, 0, stcb->asoc.context, 0, 0, 0,
3306 	    m_notify);
3307 	if (control == NULL) {
3308 		/* no memory */
3309 		sctp_m_freem(m_notify);
3310 		return;
3311 	}
3312 	control->spec_flags = M_NOTIFICATION;
3313 	control->length = SCTP_BUF_LEN(m_notify);
3314 	/* not that we need this */
3315 	control->tail_mbuf = m_notify;
3316 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3317 	    control,
3318 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3319 }
3320 
3321 static void
3322 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3323     int so_locked
3324 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3325     SCTP_UNUSED
3326 #endif
3327 )
3328 {
3329 	struct mbuf *m_notify;
3330 	struct sctp_sender_dry_event *event;
3331 	struct sctp_queued_to_read *control;
3332 
3333 	if ((stcb == NULL) ||
3334 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3335 		/* event not enabled */
3336 		return;
3337 	}
3338 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3339 	if (m_notify == NULL) {
3340 		/* no space left */
3341 		return;
3342 	}
3343 	SCTP_BUF_LEN(m_notify) = 0;
3344 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3345 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3346 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3347 	event->sender_dry_flags = 0;
3348 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3349 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3350 
3351 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3352 	SCTP_BUF_NEXT(m_notify) = NULL;
3353 
3354 	/* append to socket */
3355 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3356 	    0, 0, stcb->asoc.context, 0, 0, 0,
3357 	    m_notify);
3358 	if (control == NULL) {
3359 		/* no memory */
3360 		sctp_m_freem(m_notify);
3361 		return;
3362 	}
3363 	control->length = SCTP_BUF_LEN(m_notify);
3364 	control->spec_flags = M_NOTIFICATION;
3365 	/* not that we need this */
3366 	control->tail_mbuf = m_notify;
3367 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3368 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3369 }
3370 
3371 
3372 void
3373 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3374 {
3375 	struct mbuf *m_notify;
3376 	struct sctp_queued_to_read *control;
3377 	struct sctp_stream_change_event *stradd;
3378 
3379 	if ((stcb == NULL) ||
3380 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3381 		/* event not enabled */
3382 		return;
3383 	}
3384 	if ((stcb->asoc.peer_req_out) && flag) {
3385 		/* Peer made the request, don't tell the local user */
3386 		stcb->asoc.peer_req_out = 0;
3387 		return;
3388 	}
3389 	stcb->asoc.peer_req_out = 0;
3390 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3391 	if (m_notify == NULL)
3392 		/* no space left */
3393 		return;
3394 	SCTP_BUF_LEN(m_notify) = 0;
3395 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3396 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3397 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3398 	stradd->strchange_flags = flag;
3399 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3400 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3401 	stradd->strchange_instrms = numberin;
3402 	stradd->strchange_outstrms = numberout;
3403 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3404 	SCTP_BUF_NEXT(m_notify) = NULL;
3405 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3406 		/* no space */
3407 		sctp_m_freem(m_notify);
3408 		return;
3409 	}
3410 	/* append to socket */
3411 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3412 	    0, 0, stcb->asoc.context, 0, 0, 0,
3413 	    m_notify);
3414 	if (control == NULL) {
3415 		/* no memory */
3416 		sctp_m_freem(m_notify);
3417 		return;
3418 	}
3419 	control->spec_flags = M_NOTIFICATION;
3420 	control->length = SCTP_BUF_LEN(m_notify);
3421 	/* not that we need this */
3422 	control->tail_mbuf = m_notify;
3423 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3424 	    control,
3425 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3426 }
3427 
3428 void
3429 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3430 {
3431 	struct mbuf *m_notify;
3432 	struct sctp_queued_to_read *control;
3433 	struct sctp_assoc_reset_event *strasoc;
3434 
3435 	if ((stcb == NULL) ||
3436 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3437 		/* event not enabled */
3438 		return;
3439 	}
3440 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3441 	if (m_notify == NULL)
3442 		/* no space left */
3443 		return;
3444 	SCTP_BUF_LEN(m_notify) = 0;
3445 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3446 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3447 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3448 	strasoc->assocreset_flags = flag;
3449 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3450 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3451 	strasoc->assocreset_local_tsn = sending_tsn;
3452 	strasoc->assocreset_remote_tsn = recv_tsn;
3453 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3454 	SCTP_BUF_NEXT(m_notify) = NULL;
3455 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3456 		/* no space */
3457 		sctp_m_freem(m_notify);
3458 		return;
3459 	}
3460 	/* append to socket */
3461 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3462 	    0, 0, stcb->asoc.context, 0, 0, 0,
3463 	    m_notify);
3464 	if (control == NULL) {
3465 		/* no memory */
3466 		sctp_m_freem(m_notify);
3467 		return;
3468 	}
3469 	control->spec_flags = M_NOTIFICATION;
3470 	control->length = SCTP_BUF_LEN(m_notify);
3471 	/* not that we need this */
3472 	control->tail_mbuf = m_notify;
3473 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3474 	    control,
3475 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3476 }
3477 
3478 
3479 
3480 static void
3481 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3482     int number_entries, uint16_t * list, int flag)
3483 {
3484 	struct mbuf *m_notify;
3485 	struct sctp_queued_to_read *control;
3486 	struct sctp_stream_reset_event *strreset;
3487 	int len;
3488 
3489 	if ((stcb == NULL) ||
3490 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3491 		/* event not enabled */
3492 		return;
3493 	}
3494 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3495 	if (m_notify == NULL)
3496 		/* no space left */
3497 		return;
3498 	SCTP_BUF_LEN(m_notify) = 0;
3499 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3500 	if (len > M_TRAILINGSPACE(m_notify)) {
3501 		/* never enough room */
3502 		sctp_m_freem(m_notify);
3503 		return;
3504 	}
3505 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3506 	memset(strreset, 0, len);
3507 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3508 	strreset->strreset_flags = flag;
3509 	strreset->strreset_length = len;
3510 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3511 	if (number_entries) {
3512 		int i;
3513 
3514 		for (i = 0; i < number_entries; i++) {
3515 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3516 		}
3517 	}
3518 	SCTP_BUF_LEN(m_notify) = len;
3519 	SCTP_BUF_NEXT(m_notify) = NULL;
3520 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3521 		/* no space */
3522 		sctp_m_freem(m_notify);
3523 		return;
3524 	}
3525 	/* append to socket */
3526 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3527 	    0, 0, stcb->asoc.context, 0, 0, 0,
3528 	    m_notify);
3529 	if (control == NULL) {
3530 		/* no memory */
3531 		sctp_m_freem(m_notify);
3532 		return;
3533 	}
3534 	control->spec_flags = M_NOTIFICATION;
3535 	control->length = SCTP_BUF_LEN(m_notify);
3536 	/* not that we need this */
3537 	control->tail_mbuf = m_notify;
3538 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3539 	    control,
3540 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3541 }
3542 
3543 
3544 static void
3545 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3546 {
3547 	struct mbuf *m_notify;
3548 	struct sctp_remote_error *sre;
3549 	struct sctp_queued_to_read *control;
3550 	size_t notif_len, chunk_len;
3551 
3552 	if ((stcb == NULL) ||
3553 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3554 		return;
3555 	}
3556 	if (chunk != NULL) {
3557 		chunk_len = ntohs(chunk->ch.chunk_length);
3558 	} else {
3559 		chunk_len = 0;
3560 	}
3561 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3562 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3563 	if (m_notify == NULL) {
3564 		/* Retry with smaller value. */
3565 		notif_len = sizeof(struct sctp_remote_error);
3566 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3567 		if (m_notify == NULL) {
3568 			return;
3569 		}
3570 	}
3571 	SCTP_BUF_NEXT(m_notify) = NULL;
3572 	sre = mtod(m_notify, struct sctp_remote_error *);
3573 	memset(sre, 0, notif_len);
3574 	sre->sre_type = SCTP_REMOTE_ERROR;
3575 	sre->sre_flags = 0;
3576 	sre->sre_length = sizeof(struct sctp_remote_error);
3577 	sre->sre_error = error;
3578 	sre->sre_assoc_id = sctp_get_associd(stcb);
3579 	if (notif_len > sizeof(struct sctp_remote_error)) {
3580 		memcpy(sre->sre_data, chunk, chunk_len);
3581 		sre->sre_length += chunk_len;
3582 	}
3583 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3584 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3585 	    0, 0, stcb->asoc.context, 0, 0, 0,
3586 	    m_notify);
3587 	if (control != NULL) {
3588 		control->length = SCTP_BUF_LEN(m_notify);
3589 		/* not that we need this */
3590 		control->tail_mbuf = m_notify;
3591 		control->spec_flags = M_NOTIFICATION;
3592 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3593 		    control,
3594 		    &stcb->sctp_socket->so_rcv, 1,
3595 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3596 	} else {
3597 		sctp_m_freem(m_notify);
3598 	}
3599 }
3600 
3601 
3602 void
3603 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3604     uint32_t error, void *data, int so_locked
3605 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3606     SCTP_UNUSED
3607 #endif
3608 )
3609 {
3610 	if ((stcb == NULL) ||
3611 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3612 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3613 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3614 		/* If the socket is gone we are out of here */
3615 		return;
3616 	}
3617 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3618 		return;
3619 	}
3620 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3621 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3622 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3623 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3624 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3625 			/* Don't report these in front states */
3626 			return;
3627 		}
3628 	}
3629 	switch (notification) {
3630 	case SCTP_NOTIFY_ASSOC_UP:
3631 		if (stcb->asoc.assoc_up_sent == 0) {
3632 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3633 			stcb->asoc.assoc_up_sent = 1;
3634 		}
3635 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3636 			sctp_notify_adaptation_layer(stcb);
3637 		}
3638 		if (stcb->asoc.auth_supported == 0) {
3639 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3640 			    NULL, so_locked);
3641 		}
3642 		break;
3643 	case SCTP_NOTIFY_ASSOC_DOWN:
3644 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3645 		break;
3646 	case SCTP_NOTIFY_INTERFACE_DOWN:
3647 		{
3648 			struct sctp_nets *net;
3649 
3650 			net = (struct sctp_nets *)data;
3651 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3652 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3653 			break;
3654 		}
3655 	case SCTP_NOTIFY_INTERFACE_UP:
3656 		{
3657 			struct sctp_nets *net;
3658 
3659 			net = (struct sctp_nets *)data;
3660 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3661 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3662 			break;
3663 		}
3664 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3665 		{
3666 			struct sctp_nets *net;
3667 
3668 			net = (struct sctp_nets *)data;
3669 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3670 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3671 			break;
3672 		}
3673 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3674 		sctp_notify_send_failed2(stcb, error,
3675 		    (struct sctp_stream_queue_pending *)data, so_locked);
3676 		break;
3677 	case SCTP_NOTIFY_SENT_DG_FAIL:
3678 		sctp_notify_send_failed(stcb, 1, error,
3679 		    (struct sctp_tmit_chunk *)data, so_locked);
3680 		break;
3681 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3682 		sctp_notify_send_failed(stcb, 0, error,
3683 		    (struct sctp_tmit_chunk *)data, so_locked);
3684 		break;
3685 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3686 		{
3687 			uint32_t val;
3688 
3689 			val = *((uint32_t *) data);
3690 
3691 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3692 			break;
3693 		}
3694 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3695 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3696 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3697 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3698 		} else {
3699 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3700 		}
3701 		break;
3702 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3703 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3704 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3705 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3706 		} else {
3707 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3708 		}
3709 		break;
3710 	case SCTP_NOTIFY_ASSOC_RESTART:
3711 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3712 		if (stcb->asoc.auth_supported == 0) {
3713 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3714 			    NULL, so_locked);
3715 		}
3716 		break;
3717 	case SCTP_NOTIFY_STR_RESET_SEND:
3718 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3719 		break;
3720 	case SCTP_NOTIFY_STR_RESET_RECV:
3721 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3722 		break;
3723 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3724 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3725 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3726 		break;
3727 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3728 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3729 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3730 		break;
3731 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3732 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3733 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3734 		break;
3735 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3736 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3737 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3738 		break;
3739 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3740 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3741 		    error, so_locked);
3742 		break;
3743 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3744 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3745 		    error, so_locked);
3746 		break;
3747 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3748 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3749 		    error, so_locked);
3750 		break;
3751 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3752 		sctp_notify_shutdown_event(stcb);
3753 		break;
3754 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3755 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3756 		    (uint16_t) (uintptr_t) data,
3757 		    so_locked);
3758 		break;
3759 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3760 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3761 		    (uint16_t) (uintptr_t) data,
3762 		    so_locked);
3763 		break;
3764 	case SCTP_NOTIFY_NO_PEER_AUTH:
3765 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3766 		    (uint16_t) (uintptr_t) data,
3767 		    so_locked);
3768 		break;
3769 	case SCTP_NOTIFY_SENDER_DRY:
3770 		sctp_notify_sender_dry_event(stcb, so_locked);
3771 		break;
3772 	case SCTP_NOTIFY_REMOTE_ERROR:
3773 		sctp_notify_remote_error(stcb, error, data);
3774 		break;
3775 	default:
3776 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3777 		    __FUNCTION__, notification, notification);
3778 		break;
3779 	}			/* end switch */
3780 }
3781 
3782 void
3783 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3784 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3785     SCTP_UNUSED
3786 #endif
3787 )
3788 {
3789 	struct sctp_association *asoc;
3790 	struct sctp_stream_out *outs;
3791 	struct sctp_tmit_chunk *chk, *nchk;
3792 	struct sctp_stream_queue_pending *sp, *nsp;
3793 	int i;
3794 
3795 	if (stcb == NULL) {
3796 		return;
3797 	}
3798 	asoc = &stcb->asoc;
3799 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3800 		/* already being freed */
3801 		return;
3802 	}
3803 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3804 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3805 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3806 		return;
3807 	}
3808 	/* now through all the gunk freeing chunks */
3809 	if (holds_lock == 0) {
3810 		SCTP_TCB_SEND_LOCK(stcb);
3811 	}
3812 	/* sent queue SHOULD be empty */
3813 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3814 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3815 		asoc->sent_queue_cnt--;
3816 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3817 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3818 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3819 #ifdef INVARIANTS
3820 			} else {
3821 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3822 #endif
3823 			}
3824 		}
3825 		if (chk->data != NULL) {
3826 			sctp_free_bufspace(stcb, asoc, chk, 1);
3827 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3828 			    error, chk, so_locked);
3829 			if (chk->data) {
3830 				sctp_m_freem(chk->data);
3831 				chk->data = NULL;
3832 			}
3833 		}
3834 		sctp_free_a_chunk(stcb, chk, so_locked);
3835 		/* sa_ignore FREED_MEMORY */
3836 	}
3837 	/* pending send queue SHOULD be empty */
3838 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3839 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3840 		asoc->send_queue_cnt--;
3841 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3842 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3843 #ifdef INVARIANTS
3844 		} else {
3845 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3846 #endif
3847 		}
3848 		if (chk->data != NULL) {
3849 			sctp_free_bufspace(stcb, asoc, chk, 1);
3850 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3851 			    error, chk, so_locked);
3852 			if (chk->data) {
3853 				sctp_m_freem(chk->data);
3854 				chk->data = NULL;
3855 			}
3856 		}
3857 		sctp_free_a_chunk(stcb, chk, so_locked);
3858 		/* sa_ignore FREED_MEMORY */
3859 	}
3860 	for (i = 0; i < asoc->streamoutcnt; i++) {
3861 		/* For each stream */
3862 		outs = &asoc->strmout[i];
3863 		/* clean up any sends there */
3864 		asoc->locked_on_sending = NULL;
3865 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3866 			asoc->stream_queue_cnt--;
3867 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3868 			sctp_free_spbufspace(stcb, asoc, sp);
3869 			if (sp->data) {
3870 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3871 				    error, (void *)sp, so_locked);
3872 				if (sp->data) {
3873 					sctp_m_freem(sp->data);
3874 					sp->data = NULL;
3875 					sp->tail_mbuf = NULL;
3876 					sp->length = 0;
3877 				}
3878 			}
3879 			if (sp->net) {
3880 				sctp_free_remote_addr(sp->net);
3881 				sp->net = NULL;
3882 			}
3883 			/* Free the chunk */
3884 			sctp_free_a_strmoq(stcb, sp, so_locked);
3885 			/* sa_ignore FREED_MEMORY */
3886 		}
3887 	}
3888 
3889 	if (holds_lock == 0) {
3890 		SCTP_TCB_SEND_UNLOCK(stcb);
3891 	}
3892 }
3893 
3894 void
3895 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3896     struct sctp_abort_chunk *abort, int so_locked
3897 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3898     SCTP_UNUSED
3899 #endif
3900 )
3901 {
3902 	if (stcb == NULL) {
3903 		return;
3904 	}
3905 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3906 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3907 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3908 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3909 	}
3910 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3911 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3912 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3913 		return;
3914 	}
3915 	/* Tell them we lost the asoc */
3916 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3917 	if (from_peer) {
3918 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3919 	} else {
3920 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3921 	}
3922 }
3923 
3924 void
3925 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3926     struct mbuf *m, int iphlen,
3927     struct sockaddr *src, struct sockaddr *dst,
3928     struct sctphdr *sh, struct mbuf *op_err,
3929     uint8_t mflowtype, uint32_t mflowid,
3930     uint32_t vrf_id, uint16_t port)
3931 {
3932 	uint32_t vtag;
3933 
3934 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3935 	struct socket *so;
3936 
3937 #endif
3938 
3939 	vtag = 0;
3940 	if (stcb != NULL) {
3941 		/* We have a TCB to abort, send notification too */
3942 		vtag = stcb->asoc.peer_vtag;
3943 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3944 		/* get the assoc vrf id and table id */
3945 		vrf_id = stcb->asoc.vrf_id;
3946 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3947 	}
3948 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3949 	    mflowtype, mflowid, inp->fibnum,
3950 	    vrf_id, port);
3951 	if (stcb != NULL) {
3952 		/* Ok, now lets free it */
3953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3954 		so = SCTP_INP_SO(inp);
3955 		atomic_add_int(&stcb->asoc.refcnt, 1);
3956 		SCTP_TCB_UNLOCK(stcb);
3957 		SCTP_SOCKET_LOCK(so, 1);
3958 		SCTP_TCB_LOCK(stcb);
3959 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3960 #endif
3961 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3962 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3963 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3964 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3965 		}
3966 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3967 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3968 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3969 		SCTP_SOCKET_UNLOCK(so, 1);
3970 #endif
3971 	}
3972 }
3973 
3974 #ifdef SCTP_ASOCLOG_OF_TSNS
3975 void
3976 sctp_print_out_track_log(struct sctp_tcb *stcb)
3977 {
3978 #ifdef NOSIY_PRINTS
3979 	int i;
3980 
3981 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3982 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3983 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3984 		SCTP_PRINTF("None rcvd\n");
3985 		goto none_in;
3986 	}
3987 	if (stcb->asoc.tsn_in_wrapped) {
3988 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3989 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3990 			    stcb->asoc.in_tsnlog[i].tsn,
3991 			    stcb->asoc.in_tsnlog[i].strm,
3992 			    stcb->asoc.in_tsnlog[i].seq,
3993 			    stcb->asoc.in_tsnlog[i].flgs,
3994 			    stcb->asoc.in_tsnlog[i].sz);
3995 		}
3996 	}
3997 	if (stcb->asoc.tsn_in_at) {
3998 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3999 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4000 			    stcb->asoc.in_tsnlog[i].tsn,
4001 			    stcb->asoc.in_tsnlog[i].strm,
4002 			    stcb->asoc.in_tsnlog[i].seq,
4003 			    stcb->asoc.in_tsnlog[i].flgs,
4004 			    stcb->asoc.in_tsnlog[i].sz);
4005 		}
4006 	}
4007 none_in:
4008 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4009 	if ((stcb->asoc.tsn_out_at == 0) &&
4010 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4011 		SCTP_PRINTF("None sent\n");
4012 	}
4013 	if (stcb->asoc.tsn_out_wrapped) {
4014 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4015 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4016 			    stcb->asoc.out_tsnlog[i].tsn,
4017 			    stcb->asoc.out_tsnlog[i].strm,
4018 			    stcb->asoc.out_tsnlog[i].seq,
4019 			    stcb->asoc.out_tsnlog[i].flgs,
4020 			    stcb->asoc.out_tsnlog[i].sz);
4021 		}
4022 	}
4023 	if (stcb->asoc.tsn_out_at) {
4024 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4025 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4026 			    stcb->asoc.out_tsnlog[i].tsn,
4027 			    stcb->asoc.out_tsnlog[i].strm,
4028 			    stcb->asoc.out_tsnlog[i].seq,
4029 			    stcb->asoc.out_tsnlog[i].flgs,
4030 			    stcb->asoc.out_tsnlog[i].sz);
4031 		}
4032 	}
4033 #endif
4034 }
4035 
4036 #endif
4037 
4038 void
4039 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4040     struct mbuf *op_err,
4041     int so_locked
4042 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4043     SCTP_UNUSED
4044 #endif
4045 )
4046 {
4047 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4048 	struct socket *so;
4049 
4050 #endif
4051 
4052 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4053 	so = SCTP_INP_SO(inp);
4054 #endif
4055 	if (stcb == NULL) {
4056 		/* Got to have a TCB */
4057 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4058 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4059 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4060 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4061 			}
4062 		}
4063 		return;
4064 	} else {
4065 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4066 	}
4067 	/* notify the ulp */
4068 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4069 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4070 	}
4071 	/* notify the peer */
4072 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4073 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4074 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4075 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4076 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4077 	}
4078 	/* now free the asoc */
4079 #ifdef SCTP_ASOCLOG_OF_TSNS
4080 	sctp_print_out_track_log(stcb);
4081 #endif
4082 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4083 	if (!so_locked) {
4084 		atomic_add_int(&stcb->asoc.refcnt, 1);
4085 		SCTP_TCB_UNLOCK(stcb);
4086 		SCTP_SOCKET_LOCK(so, 1);
4087 		SCTP_TCB_LOCK(stcb);
4088 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4089 	}
4090 #endif
4091 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4092 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4093 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4094 	if (!so_locked) {
4095 		SCTP_SOCKET_UNLOCK(so, 1);
4096 	}
4097 #endif
4098 }
4099 
4100 void
4101 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4102     struct sockaddr *src, struct sockaddr *dst,
4103     struct sctphdr *sh, struct sctp_inpcb *inp,
4104     struct mbuf *cause,
4105     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4106     uint32_t vrf_id, uint16_t port)
4107 {
4108 	struct sctp_chunkhdr *ch, chunk_buf;
4109 	unsigned int chk_length;
4110 	int contains_init_chunk;
4111 
4112 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4113 	/* Generate a TO address for future reference */
4114 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4115 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4116 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4117 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4118 		}
4119 	}
4120 	contains_init_chunk = 0;
4121 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4122 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4123 	while (ch != NULL) {
4124 		chk_length = ntohs(ch->chunk_length);
4125 		if (chk_length < sizeof(*ch)) {
4126 			/* break to abort land */
4127 			break;
4128 		}
4129 		switch (ch->chunk_type) {
4130 		case SCTP_INIT:
4131 			contains_init_chunk = 1;
4132 			break;
4133 		case SCTP_PACKET_DROPPED:
4134 			/* we don't respond to pkt-dropped */
4135 			return;
4136 		case SCTP_ABORT_ASSOCIATION:
4137 			/* we don't respond with an ABORT to an ABORT */
4138 			return;
4139 		case SCTP_SHUTDOWN_COMPLETE:
4140 			/*
4141 			 * we ignore it since we are not waiting for it and
4142 			 * peer is gone
4143 			 */
4144 			return;
4145 		case SCTP_SHUTDOWN_ACK:
4146 			sctp_send_shutdown_complete2(src, dst, sh,
4147 			    mflowtype, mflowid, fibnum,
4148 			    vrf_id, port);
4149 			return;
4150 		default:
4151 			break;
4152 		}
4153 		offset += SCTP_SIZE32(chk_length);
4154 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4155 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4156 	}
4157 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4158 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4159 	    (contains_init_chunk == 0))) {
4160 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4161 		    mflowtype, mflowid, fibnum,
4162 		    vrf_id, port);
4163 	}
4164 }
4165 
4166 /*
4167  * check the inbound datagram to make sure there is not an abort inside it,
4168  * if there is return 1, else return 0.
4169  */
4170 int
4171 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4172 {
4173 	struct sctp_chunkhdr *ch;
4174 	struct sctp_init_chunk *init_chk, chunk_buf;
4175 	int offset;
4176 	unsigned int chk_length;
4177 
4178 	offset = iphlen + sizeof(struct sctphdr);
4179 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4180 	    (uint8_t *) & chunk_buf);
4181 	while (ch != NULL) {
4182 		chk_length = ntohs(ch->chunk_length);
4183 		if (chk_length < sizeof(*ch)) {
4184 			/* packet is probably corrupt */
4185 			break;
4186 		}
4187 		/* we seem to be ok, is it an abort? */
4188 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4189 			/* yep, tell them */
4190 			return (1);
4191 		}
4192 		if (ch->chunk_type == SCTP_INITIATION) {
4193 			/* need to update the Vtag */
4194 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4195 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4196 			if (init_chk != NULL) {
4197 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4198 			}
4199 		}
4200 		/* Nope, move to the next chunk */
4201 		offset += SCTP_SIZE32(chk_length);
4202 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4203 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4204 	}
4205 	return (0);
4206 }
4207 
4208 /*
4209  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4210  * set (i.e. it's 0) so, create this function to compare link local scopes
4211  */
4212 #ifdef INET6
4213 uint32_t
4214 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4215 {
4216 	struct sockaddr_in6 a, b;
4217 
4218 	/* save copies */
4219 	a = *addr1;
4220 	b = *addr2;
4221 
4222 	if (a.sin6_scope_id == 0)
4223 		if (sa6_recoverscope(&a)) {
4224 			/* can't get scope, so can't match */
4225 			return (0);
4226 		}
4227 	if (b.sin6_scope_id == 0)
4228 		if (sa6_recoverscope(&b)) {
4229 			/* can't get scope, so can't match */
4230 			return (0);
4231 		}
4232 	if (a.sin6_scope_id != b.sin6_scope_id)
4233 		return (0);
4234 
4235 	return (1);
4236 }
4237 
4238 /*
4239  * returns a sockaddr_in6 with embedded scope recovered and removed
4240  */
4241 struct sockaddr_in6 *
4242 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4243 {
4244 	/* check and strip embedded scope junk */
4245 	if (addr->sin6_family == AF_INET6) {
4246 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4247 			if (addr->sin6_scope_id == 0) {
4248 				*store = *addr;
4249 				if (!sa6_recoverscope(store)) {
4250 					/* use the recovered scope */
4251 					addr = store;
4252 				}
4253 			} else {
4254 				/* else, return the original "to" addr */
4255 				in6_clearscope(&addr->sin6_addr);
4256 			}
4257 		}
4258 	}
4259 	return (addr);
4260 }
4261 
4262 #endif
4263 
4264 /*
4265  * are the two addresses the same?  currently a "scopeless" check returns: 1
4266  * if same, 0 if not
4267  */
4268 int
4269 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4270 {
4271 
4272 	/* must be valid */
4273 	if (sa1 == NULL || sa2 == NULL)
4274 		return (0);
4275 
4276 	/* must be the same family */
4277 	if (sa1->sa_family != sa2->sa_family)
4278 		return (0);
4279 
4280 	switch (sa1->sa_family) {
4281 #ifdef INET6
4282 	case AF_INET6:
4283 		{
4284 			/* IPv6 addresses */
4285 			struct sockaddr_in6 *sin6_1, *sin6_2;
4286 
4287 			sin6_1 = (struct sockaddr_in6 *)sa1;
4288 			sin6_2 = (struct sockaddr_in6 *)sa2;
4289 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4290 			    sin6_2));
4291 		}
4292 #endif
4293 #ifdef INET
4294 	case AF_INET:
4295 		{
4296 			/* IPv4 addresses */
4297 			struct sockaddr_in *sin_1, *sin_2;
4298 
4299 			sin_1 = (struct sockaddr_in *)sa1;
4300 			sin_2 = (struct sockaddr_in *)sa2;
4301 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4302 		}
4303 #endif
4304 	default:
4305 		/* we don't do these... */
4306 		return (0);
4307 	}
4308 }
4309 
4310 void
4311 sctp_print_address(struct sockaddr *sa)
4312 {
4313 #ifdef INET6
4314 	char ip6buf[INET6_ADDRSTRLEN];
4315 
4316 #endif
4317 
4318 	switch (sa->sa_family) {
4319 #ifdef INET6
4320 	case AF_INET6:
4321 		{
4322 			struct sockaddr_in6 *sin6;
4323 
4324 			sin6 = (struct sockaddr_in6 *)sa;
4325 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4326 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4327 			    ntohs(sin6->sin6_port),
4328 			    sin6->sin6_scope_id);
4329 			break;
4330 		}
4331 #endif
4332 #ifdef INET
4333 	case AF_INET:
4334 		{
4335 			struct sockaddr_in *sin;
4336 			unsigned char *p;
4337 
4338 			sin = (struct sockaddr_in *)sa;
4339 			p = (unsigned char *)&sin->sin_addr;
4340 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4341 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4342 			break;
4343 		}
4344 #endif
4345 	default:
4346 		SCTP_PRINTF("?\n");
4347 		break;
4348 	}
4349 }
4350 
4351 void
4352 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4353     struct sctp_inpcb *new_inp,
4354     struct sctp_tcb *stcb,
4355     int waitflags)
4356 {
4357 	/*
4358 	 * go through our old INP and pull off any control structures that
4359 	 * belong to stcb and move then to the new inp.
4360 	 */
4361 	struct socket *old_so, *new_so;
4362 	struct sctp_queued_to_read *control, *nctl;
4363 	struct sctp_readhead tmp_queue;
4364 	struct mbuf *m;
4365 	int error = 0;
4366 
4367 	old_so = old_inp->sctp_socket;
4368 	new_so = new_inp->sctp_socket;
4369 	TAILQ_INIT(&tmp_queue);
4370 	error = sblock(&old_so->so_rcv, waitflags);
4371 	if (error) {
4372 		/*
4373 		 * Gak, can't get sblock, we have a problem. data will be
4374 		 * left stranded.. and we don't dare look at it since the
4375 		 * other thread may be reading something. Oh well, its a
4376 		 * screwed up app that does a peeloff OR a accept while
4377 		 * reading from the main socket... actually its only the
4378 		 * peeloff() case, since I think read will fail on a
4379 		 * listening socket..
4380 		 */
4381 		return;
4382 	}
4383 	/* lock the socket buffers */
4384 	SCTP_INP_READ_LOCK(old_inp);
4385 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4386 		/* Pull off all for out target stcb */
4387 		if (control->stcb == stcb) {
4388 			/* remove it we want it */
4389 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4390 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4391 			m = control->data;
4392 			while (m) {
4393 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4394 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4395 				}
4396 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4397 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4398 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4399 				}
4400 				m = SCTP_BUF_NEXT(m);
4401 			}
4402 		}
4403 	}
4404 	SCTP_INP_READ_UNLOCK(old_inp);
4405 	/* Remove the sb-lock on the old socket */
4406 
4407 	sbunlock(&old_so->so_rcv);
4408 	/* Now we move them over to the new socket buffer */
4409 	SCTP_INP_READ_LOCK(new_inp);
4410 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4411 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4412 		m = control->data;
4413 		while (m) {
4414 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4415 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4416 			}
4417 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4418 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4419 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4420 			}
4421 			m = SCTP_BUF_NEXT(m);
4422 		}
4423 	}
4424 	SCTP_INP_READ_UNLOCK(new_inp);
4425 }
4426 
4427 void
4428 sctp_add_to_readq(struct sctp_inpcb *inp,
4429     struct sctp_tcb *stcb,
4430     struct sctp_queued_to_read *control,
4431     struct sockbuf *sb,
4432     int end,
4433     int inp_read_lock_held,
4434     int so_locked
4435 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4436     SCTP_UNUSED
4437 #endif
4438 )
4439 {
4440 	/*
4441 	 * Here we must place the control on the end of the socket read
4442 	 * queue AND increment sb_cc so that select will work properly on
4443 	 * read.
4444 	 */
4445 	struct mbuf *m, *prev = NULL;
4446 
4447 	if (inp == NULL) {
4448 		/* Gak, TSNH!! */
4449 #ifdef INVARIANTS
4450 		panic("Gak, inp NULL on add_to_readq");
4451 #endif
4452 		return;
4453 	}
4454 	if (inp_read_lock_held == 0)
4455 		SCTP_INP_READ_LOCK(inp);
4456 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4457 		sctp_free_remote_addr(control->whoFrom);
4458 		if (control->data) {
4459 			sctp_m_freem(control->data);
4460 			control->data = NULL;
4461 		}
4462 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4463 		if (inp_read_lock_held == 0)
4464 			SCTP_INP_READ_UNLOCK(inp);
4465 		return;
4466 	}
4467 	if (!(control->spec_flags & M_NOTIFICATION)) {
4468 		atomic_add_int(&inp->total_recvs, 1);
4469 		if (!control->do_not_ref_stcb) {
4470 			atomic_add_int(&stcb->total_recvs, 1);
4471 		}
4472 	}
4473 	m = control->data;
4474 	control->held_length = 0;
4475 	control->length = 0;
4476 	while (m) {
4477 		if (SCTP_BUF_LEN(m) == 0) {
4478 			/* Skip mbufs with NO length */
4479 			if (prev == NULL) {
4480 				/* First one */
4481 				control->data = sctp_m_free(m);
4482 				m = control->data;
4483 			} else {
4484 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4485 				m = SCTP_BUF_NEXT(prev);
4486 			}
4487 			if (m == NULL) {
4488 				control->tail_mbuf = prev;
4489 			}
4490 			continue;
4491 		}
4492 		prev = m;
4493 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4494 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4495 		}
4496 		sctp_sballoc(stcb, sb, m);
4497 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4498 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4499 		}
4500 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4501 		m = SCTP_BUF_NEXT(m);
4502 	}
4503 	if (prev != NULL) {
4504 		control->tail_mbuf = prev;
4505 	} else {
4506 		/* Everything got collapsed out?? */
4507 		sctp_free_remote_addr(control->whoFrom);
4508 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4509 		if (inp_read_lock_held == 0)
4510 			SCTP_INP_READ_UNLOCK(inp);
4511 		return;
4512 	}
4513 	if (end) {
4514 		control->end_added = 1;
4515 	}
4516 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4517 	if (inp_read_lock_held == 0)
4518 		SCTP_INP_READ_UNLOCK(inp);
4519 	if (inp && inp->sctp_socket) {
4520 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4521 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4522 		} else {
4523 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4524 			struct socket *so;
4525 
4526 			so = SCTP_INP_SO(inp);
4527 			if (!so_locked) {
4528 				if (stcb) {
4529 					atomic_add_int(&stcb->asoc.refcnt, 1);
4530 					SCTP_TCB_UNLOCK(stcb);
4531 				}
4532 				SCTP_SOCKET_LOCK(so, 1);
4533 				if (stcb) {
4534 					SCTP_TCB_LOCK(stcb);
4535 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4536 				}
4537 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4538 					SCTP_SOCKET_UNLOCK(so, 1);
4539 					return;
4540 				}
4541 			}
4542 #endif
4543 			sctp_sorwakeup(inp, inp->sctp_socket);
4544 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4545 			if (!so_locked) {
4546 				SCTP_SOCKET_UNLOCK(so, 1);
4547 			}
4548 #endif
4549 		}
4550 	}
4551 }
4552 
4553 
4554 int
4555 sctp_append_to_readq(struct sctp_inpcb *inp,
4556     struct sctp_tcb *stcb,
4557     struct sctp_queued_to_read *control,
4558     struct mbuf *m,
4559     int end,
4560     int ctls_cumack,
4561     struct sockbuf *sb)
4562 {
4563 	/*
4564 	 * A partial delivery API event is underway. OR we are appending on
4565 	 * the reassembly queue.
4566 	 *
4567 	 * If PDAPI this means we need to add m to the end of the data.
4568 	 * Increase the length in the control AND increment the sb_cc.
4569 	 * Otherwise sb is NULL and all we need to do is put it at the end
4570 	 * of the mbuf chain.
4571 	 */
4572 	int len = 0;
4573 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4574 
4575 	if (inp) {
4576 		SCTP_INP_READ_LOCK(inp);
4577 	}
4578 	if (control == NULL) {
4579 get_out:
4580 		if (inp) {
4581 			SCTP_INP_READ_UNLOCK(inp);
4582 		}
4583 		return (-1);
4584 	}
4585 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4586 		SCTP_INP_READ_UNLOCK(inp);
4587 		return (0);
4588 	}
4589 	if (control->end_added) {
4590 		/* huh this one is complete? */
4591 		goto get_out;
4592 	}
4593 	mm = m;
4594 	if (mm == NULL) {
4595 		goto get_out;
4596 	}
4597 	while (mm) {
4598 		if (SCTP_BUF_LEN(mm) == 0) {
4599 			/* Skip mbufs with NO lenght */
4600 			if (prev == NULL) {
4601 				/* First one */
4602 				m = sctp_m_free(mm);
4603 				mm = m;
4604 			} else {
4605 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4606 				mm = SCTP_BUF_NEXT(prev);
4607 			}
4608 			continue;
4609 		}
4610 		prev = mm;
4611 		len += SCTP_BUF_LEN(mm);
4612 		if (sb) {
4613 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4614 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4615 			}
4616 			sctp_sballoc(stcb, sb, mm);
4617 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4618 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4619 			}
4620 		}
4621 		mm = SCTP_BUF_NEXT(mm);
4622 	}
4623 	if (prev) {
4624 		tail = prev;
4625 	} else {
4626 		/* Really there should always be a prev */
4627 		if (m == NULL) {
4628 			/* Huh nothing left? */
4629 #ifdef INVARIANTS
4630 			panic("Nothing left to add?");
4631 #else
4632 			goto get_out;
4633 #endif
4634 		}
4635 		tail = m;
4636 	}
4637 	if (control->tail_mbuf) {
4638 		/* append */
4639 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4640 		control->tail_mbuf = tail;
4641 	} else {
4642 		/* nothing there */
4643 #ifdef INVARIANTS
4644 		if (control->data != NULL) {
4645 			panic("This should NOT happen");
4646 		}
4647 #endif
4648 		control->data = m;
4649 		control->tail_mbuf = tail;
4650 	}
4651 	atomic_add_int(&control->length, len);
4652 	if (end) {
4653 		/* message is complete */
4654 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4655 			stcb->asoc.control_pdapi = NULL;
4656 		}
4657 		control->held_length = 0;
4658 		control->end_added = 1;
4659 	}
4660 	if (stcb == NULL) {
4661 		control->do_not_ref_stcb = 1;
4662 	}
4663 	/*
4664 	 * When we are appending in partial delivery, the cum-ack is used
4665 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4666 	 * is populated in the outbound sinfo structure from the true cumack
4667 	 * if the association exists...
4668 	 */
4669 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4670 	if (inp) {
4671 		SCTP_INP_READ_UNLOCK(inp);
4672 	}
4673 	if (inp && inp->sctp_socket) {
4674 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4675 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4676 		} else {
4677 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4678 			struct socket *so;
4679 
4680 			so = SCTP_INP_SO(inp);
4681 			if (stcb) {
4682 				atomic_add_int(&stcb->asoc.refcnt, 1);
4683 				SCTP_TCB_UNLOCK(stcb);
4684 			}
4685 			SCTP_SOCKET_LOCK(so, 1);
4686 			if (stcb) {
4687 				SCTP_TCB_LOCK(stcb);
4688 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4689 			}
4690 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4691 				SCTP_SOCKET_UNLOCK(so, 1);
4692 				return (0);
4693 			}
4694 #endif
4695 			sctp_sorwakeup(inp, inp->sctp_socket);
4696 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4697 			SCTP_SOCKET_UNLOCK(so, 1);
4698 #endif
4699 		}
4700 	}
4701 	return (0);
4702 }
4703 
4704 
4705 
4706 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4707  *************ALTERNATE ROUTING CODE
4708  */
4709 
4710 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4711  *************ALTERNATE ROUTING CODE
4712  */
4713 
4714 struct mbuf *
4715 sctp_generate_cause(uint16_t code, char *info)
4716 {
4717 	struct mbuf *m;
4718 	struct sctp_gen_error_cause *cause;
4719 	size_t info_len, len;
4720 
4721 	if ((code == 0) || (info == NULL)) {
4722 		return (NULL);
4723 	}
4724 	info_len = strlen(info);
4725 	len = sizeof(struct sctp_paramhdr) + info_len;
4726 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4727 	if (m != NULL) {
4728 		SCTP_BUF_LEN(m) = len;
4729 		cause = mtod(m, struct sctp_gen_error_cause *);
4730 		cause->code = htons(code);
4731 		cause->length = htons((uint16_t) len);
4732 		memcpy(cause->info, info, info_len);
4733 	}
4734 	return (m);
4735 }
4736 
4737 struct mbuf *
4738 sctp_generate_no_user_data_cause(uint32_t tsn)
4739 {
4740 	struct mbuf *m;
4741 	struct sctp_error_no_user_data *no_user_data_cause;
4742 	size_t len;
4743 
4744 	len = sizeof(struct sctp_error_no_user_data);
4745 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4746 	if (m != NULL) {
4747 		SCTP_BUF_LEN(m) = len;
4748 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4749 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4750 		no_user_data_cause->cause.length = htons((uint16_t) len);
4751 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4752 	}
4753 	return (m);
4754 }
4755 
4756 #ifdef SCTP_MBCNT_LOGGING
4757 void
4758 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4759     struct sctp_tmit_chunk *tp1, int chk_cnt)
4760 {
4761 	if (tp1->data == NULL) {
4762 		return;
4763 	}
4764 	asoc->chunks_on_out_queue -= chk_cnt;
4765 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4766 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4767 		    asoc->total_output_queue_size,
4768 		    tp1->book_size,
4769 		    0,
4770 		    tp1->mbcnt);
4771 	}
4772 	if (asoc->total_output_queue_size >= tp1->book_size) {
4773 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4774 	} else {
4775 		asoc->total_output_queue_size = 0;
4776 	}
4777 
4778 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4779 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4780 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4781 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4782 		} else {
4783 			stcb->sctp_socket->so_snd.sb_cc = 0;
4784 
4785 		}
4786 	}
4787 }
4788 
4789 #endif
4790 
4791 int
4792 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4793     uint8_t sent, int so_locked
4794 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4795     SCTP_UNUSED
4796 #endif
4797 )
4798 {
4799 	struct sctp_stream_out *strq;
4800 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4801 	struct sctp_stream_queue_pending *sp;
4802 	uint16_t stream = 0, seq = 0;
4803 	uint8_t foundeom = 0;
4804 	int ret_sz = 0;
4805 	int notdone;
4806 	int do_wakeup_routine = 0;
4807 
4808 	stream = tp1->rec.data.stream_number;
4809 	seq = tp1->rec.data.stream_seq;
4810 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4811 		stcb->asoc.abandoned_sent[0]++;
4812 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4813 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4814 #if defined(SCTP_DETAILED_STR_STATS)
4815 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4816 #endif
4817 	} else {
4818 		stcb->asoc.abandoned_unsent[0]++;
4819 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4820 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4821 #if defined(SCTP_DETAILED_STR_STATS)
4822 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4823 #endif
4824 	}
4825 	do {
4826 		ret_sz += tp1->book_size;
4827 		if (tp1->data != NULL) {
4828 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4829 				sctp_flight_size_decrease(tp1);
4830 				sctp_total_flight_decrease(stcb, tp1);
4831 			}
4832 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4833 			stcb->asoc.peers_rwnd += tp1->send_size;
4834 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4835 			if (sent) {
4836 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4837 			} else {
4838 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4839 			}
4840 			if (tp1->data) {
4841 				sctp_m_freem(tp1->data);
4842 				tp1->data = NULL;
4843 			}
4844 			do_wakeup_routine = 1;
4845 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4846 				stcb->asoc.sent_queue_cnt_removeable--;
4847 			}
4848 		}
4849 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4850 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4851 		    SCTP_DATA_NOT_FRAG) {
4852 			/* not frag'ed we ae done   */
4853 			notdone = 0;
4854 			foundeom = 1;
4855 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4856 			/* end of frag, we are done */
4857 			notdone = 0;
4858 			foundeom = 1;
4859 		} else {
4860 			/*
4861 			 * Its a begin or middle piece, we must mark all of
4862 			 * it
4863 			 */
4864 			notdone = 1;
4865 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4866 		}
4867 	} while (tp1 && notdone);
4868 	if (foundeom == 0) {
4869 		/*
4870 		 * The multi-part message was scattered across the send and
4871 		 * sent queue.
4872 		 */
4873 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4874 			if ((tp1->rec.data.stream_number != stream) ||
4875 			    (tp1->rec.data.stream_seq != seq)) {
4876 				break;
4877 			}
4878 			/*
4879 			 * save to chk in case we have some on stream out
4880 			 * queue. If so and we have an un-transmitted one we
4881 			 * don't have to fudge the TSN.
4882 			 */
4883 			chk = tp1;
4884 			ret_sz += tp1->book_size;
4885 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4886 			if (sent) {
4887 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4888 			} else {
4889 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4890 			}
4891 			if (tp1->data) {
4892 				sctp_m_freem(tp1->data);
4893 				tp1->data = NULL;
4894 			}
4895 			/* No flight involved here book the size to 0 */
4896 			tp1->book_size = 0;
4897 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4898 				foundeom = 1;
4899 			}
4900 			do_wakeup_routine = 1;
4901 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4902 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4903 			/*
4904 			 * on to the sent queue so we can wait for it to be
4905 			 * passed by.
4906 			 */
4907 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4908 			    sctp_next);
4909 			stcb->asoc.send_queue_cnt--;
4910 			stcb->asoc.sent_queue_cnt++;
4911 		}
4912 	}
4913 	if (foundeom == 0) {
4914 		/*
4915 		 * Still no eom found. That means there is stuff left on the
4916 		 * stream out queue.. yuck.
4917 		 */
4918 		SCTP_TCB_SEND_LOCK(stcb);
4919 		strq = &stcb->asoc.strmout[stream];
4920 		sp = TAILQ_FIRST(&strq->outqueue);
4921 		if (sp != NULL) {
4922 			sp->discard_rest = 1;
4923 			/*
4924 			 * We may need to put a chunk on the queue that
4925 			 * holds the TSN that would have been sent with the
4926 			 * LAST bit.
4927 			 */
4928 			if (chk == NULL) {
4929 				/* Yep, we have to */
4930 				sctp_alloc_a_chunk(stcb, chk);
4931 				if (chk == NULL) {
4932 					/*
4933 					 * we are hosed. All we can do is
4934 					 * nothing.. which will cause an
4935 					 * abort if the peer is paying
4936 					 * attention.
4937 					 */
4938 					goto oh_well;
4939 				}
4940 				memset(chk, 0, sizeof(*chk));
4941 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4942 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4943 				chk->asoc = &stcb->asoc;
4944 				chk->rec.data.stream_seq = strq->next_sequence_send;
4945 				chk->rec.data.stream_number = sp->stream;
4946 				chk->rec.data.payloadtype = sp->ppid;
4947 				chk->rec.data.context = sp->context;
4948 				chk->flags = sp->act_flags;
4949 				chk->whoTo = NULL;
4950 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4951 				strq->chunks_on_queues++;
4952 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4953 				stcb->asoc.sent_queue_cnt++;
4954 				stcb->asoc.pr_sctp_cnt++;
4955 			} else {
4956 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4957 			}
4958 			strq->next_sequence_send++;
4959 	oh_well:
4960 			if (sp->data) {
4961 				/*
4962 				 * Pull any data to free up the SB and allow
4963 				 * sender to "add more" while we will throw
4964 				 * away :-)
4965 				 */
4966 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4967 				ret_sz += sp->length;
4968 				do_wakeup_routine = 1;
4969 				sp->some_taken = 1;
4970 				sctp_m_freem(sp->data);
4971 				sp->data = NULL;
4972 				sp->tail_mbuf = NULL;
4973 				sp->length = 0;
4974 			}
4975 		}
4976 		SCTP_TCB_SEND_UNLOCK(stcb);
4977 	}
4978 	if (do_wakeup_routine) {
4979 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4980 		struct socket *so;
4981 
4982 		so = SCTP_INP_SO(stcb->sctp_ep);
4983 		if (!so_locked) {
4984 			atomic_add_int(&stcb->asoc.refcnt, 1);
4985 			SCTP_TCB_UNLOCK(stcb);
4986 			SCTP_SOCKET_LOCK(so, 1);
4987 			SCTP_TCB_LOCK(stcb);
4988 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4989 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4990 				/* assoc was freed while we were unlocked */
4991 				SCTP_SOCKET_UNLOCK(so, 1);
4992 				return (ret_sz);
4993 			}
4994 		}
4995 #endif
4996 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4998 		if (!so_locked) {
4999 			SCTP_SOCKET_UNLOCK(so, 1);
5000 		}
5001 #endif
5002 	}
5003 	return (ret_sz);
5004 }
5005 
5006 /*
5007  * checks to see if the given address, sa, is one that is currently known by
5008  * the kernel note: can't distinguish the same address on multiple interfaces
5009  * and doesn't handle multiple addresses with different zone/scope id's note:
5010  * ifa_ifwithaddr() compares the entire sockaddr struct
5011  */
5012 struct sctp_ifa *
5013 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5014     int holds_lock)
5015 {
5016 	struct sctp_laddr *laddr;
5017 
5018 	if (holds_lock == 0) {
5019 		SCTP_INP_RLOCK(inp);
5020 	}
5021 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5022 		if (laddr->ifa == NULL)
5023 			continue;
5024 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5025 			continue;
5026 #ifdef INET
5027 		if (addr->sa_family == AF_INET) {
5028 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5029 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5030 				/* found him. */
5031 				if (holds_lock == 0) {
5032 					SCTP_INP_RUNLOCK(inp);
5033 				}
5034 				return (laddr->ifa);
5035 				break;
5036 			}
5037 		}
5038 #endif
5039 #ifdef INET6
5040 		if (addr->sa_family == AF_INET6) {
5041 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5042 			    &laddr->ifa->address.sin6)) {
5043 				/* found him. */
5044 				if (holds_lock == 0) {
5045 					SCTP_INP_RUNLOCK(inp);
5046 				}
5047 				return (laddr->ifa);
5048 				break;
5049 			}
5050 		}
5051 #endif
5052 	}
5053 	if (holds_lock == 0) {
5054 		SCTP_INP_RUNLOCK(inp);
5055 	}
5056 	return (NULL);
5057 }
5058 
5059 uint32_t
5060 sctp_get_ifa_hash_val(struct sockaddr *addr)
5061 {
5062 	switch (addr->sa_family) {
5063 #ifdef INET
5064 	case AF_INET:
5065 		{
5066 			struct sockaddr_in *sin;
5067 
5068 			sin = (struct sockaddr_in *)addr;
5069 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5070 		}
5071 #endif
5072 #ifdef INET6
5073 	case AF_INET6:
5074 		{
5075 			struct sockaddr_in6 *sin6;
5076 			uint32_t hash_of_addr;
5077 
5078 			sin6 = (struct sockaddr_in6 *)addr;
5079 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5080 			    sin6->sin6_addr.s6_addr32[1] +
5081 			    sin6->sin6_addr.s6_addr32[2] +
5082 			    sin6->sin6_addr.s6_addr32[3]);
5083 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5084 			return (hash_of_addr);
5085 		}
5086 #endif
5087 	default:
5088 		break;
5089 	}
5090 	return (0);
5091 }
5092 
5093 struct sctp_ifa *
5094 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5095 {
5096 	struct sctp_ifa *sctp_ifap;
5097 	struct sctp_vrf *vrf;
5098 	struct sctp_ifalist *hash_head;
5099 	uint32_t hash_of_addr;
5100 
5101 	if (holds_lock == 0)
5102 		SCTP_IPI_ADDR_RLOCK();
5103 
5104 	vrf = sctp_find_vrf(vrf_id);
5105 	if (vrf == NULL) {
5106 		if (holds_lock == 0)
5107 			SCTP_IPI_ADDR_RUNLOCK();
5108 		return (NULL);
5109 	}
5110 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5111 
5112 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5113 	if (hash_head == NULL) {
5114 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5115 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5116 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5117 		sctp_print_address(addr);
5118 		SCTP_PRINTF("No such bucket for address\n");
5119 		if (holds_lock == 0)
5120 			SCTP_IPI_ADDR_RUNLOCK();
5121 
5122 		return (NULL);
5123 	}
5124 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5125 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5126 			continue;
5127 #ifdef INET
5128 		if (addr->sa_family == AF_INET) {
5129 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5130 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5131 				/* found him. */
5132 				if (holds_lock == 0)
5133 					SCTP_IPI_ADDR_RUNLOCK();
5134 				return (sctp_ifap);
5135 				break;
5136 			}
5137 		}
5138 #endif
5139 #ifdef INET6
5140 		if (addr->sa_family == AF_INET6) {
5141 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5142 			    &sctp_ifap->address.sin6)) {
5143 				/* found him. */
5144 				if (holds_lock == 0)
5145 					SCTP_IPI_ADDR_RUNLOCK();
5146 				return (sctp_ifap);
5147 				break;
5148 			}
5149 		}
5150 #endif
5151 	}
5152 	if (holds_lock == 0)
5153 		SCTP_IPI_ADDR_RUNLOCK();
5154 	return (NULL);
5155 }
5156 
5157 static void
5158 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5159     uint32_t rwnd_req)
5160 {
5161 	/* User pulled some data, do we need a rwnd update? */
5162 	int r_unlocked = 0;
5163 	uint32_t dif, rwnd;
5164 	struct socket *so = NULL;
5165 
5166 	if (stcb == NULL)
5167 		return;
5168 
5169 	atomic_add_int(&stcb->asoc.refcnt, 1);
5170 
5171 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5172 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5173 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5174 		/* Pre-check If we are freeing no update */
5175 		goto no_lock;
5176 	}
5177 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5178 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5179 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5180 		goto out;
5181 	}
5182 	so = stcb->sctp_socket;
5183 	if (so == NULL) {
5184 		goto out;
5185 	}
5186 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5187 	/* Have you have freed enough to look */
5188 	*freed_so_far = 0;
5189 	/* Yep, its worth a look and the lock overhead */
5190 
5191 	/* Figure out what the rwnd would be */
5192 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5193 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5194 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5195 	} else {
5196 		dif = 0;
5197 	}
5198 	if (dif >= rwnd_req) {
5199 		if (hold_rlock) {
5200 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5201 			r_unlocked = 1;
5202 		}
5203 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5204 			/*
5205 			 * One last check before we allow the guy possibly
5206 			 * to get in. There is a race, where the guy has not
5207 			 * reached the gate. In that case
5208 			 */
5209 			goto out;
5210 		}
5211 		SCTP_TCB_LOCK(stcb);
5212 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5213 			/* No reports here */
5214 			SCTP_TCB_UNLOCK(stcb);
5215 			goto out;
5216 		}
5217 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5218 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5219 
5220 		sctp_chunk_output(stcb->sctp_ep, stcb,
5221 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5222 		/* make sure no timer is running */
5223 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5224 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5225 		SCTP_TCB_UNLOCK(stcb);
5226 	} else {
5227 		/* Update how much we have pending */
5228 		stcb->freed_by_sorcv_sincelast = dif;
5229 	}
5230 out:
5231 	if (so && r_unlocked && hold_rlock) {
5232 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5233 	}
5234 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5235 no_lock:
5236 	atomic_add_int(&stcb->asoc.refcnt, -1);
5237 	return;
5238 }
5239 
5240 int
5241 sctp_sorecvmsg(struct socket *so,
5242     struct uio *uio,
5243     struct mbuf **mp,
5244     struct sockaddr *from,
5245     int fromlen,
5246     int *msg_flags,
5247     struct sctp_sndrcvinfo *sinfo,
5248     int filling_sinfo)
5249 {
5250 	/*
5251 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5252 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5253 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5254 	 * On the way out we may send out any combination of:
5255 	 * MSG_NOTIFICATION MSG_EOR
5256 	 *
5257 	 */
5258 	struct sctp_inpcb *inp = NULL;
5259 	int my_len = 0;
5260 	int cp_len = 0, error = 0;
5261 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5262 	struct mbuf *m = NULL;
5263 	struct sctp_tcb *stcb = NULL;
5264 	int wakeup_read_socket = 0;
5265 	int freecnt_applied = 0;
5266 	int out_flags = 0, in_flags = 0;
5267 	int block_allowed = 1;
5268 	uint32_t freed_so_far = 0;
5269 	uint32_t copied_so_far = 0;
5270 	int in_eeor_mode = 0;
5271 	int no_rcv_needed = 0;
5272 	uint32_t rwnd_req = 0;
5273 	int hold_sblock = 0;
5274 	int hold_rlock = 0;
5275 	int slen = 0;
5276 	uint32_t held_length = 0;
5277 	int sockbuf_lock = 0;
5278 
5279 	if (uio == NULL) {
5280 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5281 		return (EINVAL);
5282 	}
5283 	if (msg_flags) {
5284 		in_flags = *msg_flags;
5285 		if (in_flags & MSG_PEEK)
5286 			SCTP_STAT_INCR(sctps_read_peeks);
5287 	} else {
5288 		in_flags = 0;
5289 	}
5290 	slen = uio->uio_resid;
5291 
5292 	/* Pull in and set up our int flags */
5293 	if (in_flags & MSG_OOB) {
5294 		/* Out of band's NOT supported */
5295 		return (EOPNOTSUPP);
5296 	}
5297 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5298 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5299 		return (EINVAL);
5300 	}
5301 	if ((in_flags & (MSG_DONTWAIT
5302 	    | MSG_NBIO
5303 	    )) ||
5304 	    SCTP_SO_IS_NBIO(so)) {
5305 		block_allowed = 0;
5306 	}
5307 	/* setup the endpoint */
5308 	inp = (struct sctp_inpcb *)so->so_pcb;
5309 	if (inp == NULL) {
5310 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5311 		return (EFAULT);
5312 	}
5313 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5314 	/* Must be at least a MTU's worth */
5315 	if (rwnd_req < SCTP_MIN_RWND)
5316 		rwnd_req = SCTP_MIN_RWND;
5317 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5318 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5319 		sctp_misc_ints(SCTP_SORECV_ENTER,
5320 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5321 	}
5322 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5323 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5324 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5325 	}
5326 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5327 	if (error) {
5328 		goto release_unlocked;
5329 	}
5330 	sockbuf_lock = 1;
5331 restart:
5332 
5333 
5334 restart_nosblocks:
5335 	if (hold_sblock == 0) {
5336 		SOCKBUF_LOCK(&so->so_rcv);
5337 		hold_sblock = 1;
5338 	}
5339 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5340 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5341 		goto out;
5342 	}
5343 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5344 		if (so->so_error) {
5345 			error = so->so_error;
5346 			if ((in_flags & MSG_PEEK) == 0)
5347 				so->so_error = 0;
5348 			goto out;
5349 		} else {
5350 			if (so->so_rcv.sb_cc == 0) {
5351 				/* indicate EOF */
5352 				error = 0;
5353 				goto out;
5354 			}
5355 		}
5356 	}
5357 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5358 		/* we need to wait for data */
5359 		if ((so->so_rcv.sb_cc == 0) &&
5360 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5361 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5362 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5363 				/*
5364 				 * For active open side clear flags for
5365 				 * re-use passive open is blocked by
5366 				 * connect.
5367 				 */
5368 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5369 					/*
5370 					 * You were aborted, passive side
5371 					 * always hits here
5372 					 */
5373 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5374 					error = ECONNRESET;
5375 				}
5376 				so->so_state &= ~(SS_ISCONNECTING |
5377 				    SS_ISDISCONNECTING |
5378 				    SS_ISCONFIRMING |
5379 				    SS_ISCONNECTED);
5380 				if (error == 0) {
5381 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5382 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5383 						error = ENOTCONN;
5384 					}
5385 				}
5386 				goto out;
5387 			}
5388 		}
5389 		error = sbwait(&so->so_rcv);
5390 		if (error) {
5391 			goto out;
5392 		}
5393 		held_length = 0;
5394 		goto restart_nosblocks;
5395 	} else if (so->so_rcv.sb_cc == 0) {
5396 		if (so->so_error) {
5397 			error = so->so_error;
5398 			if ((in_flags & MSG_PEEK) == 0)
5399 				so->so_error = 0;
5400 		} else {
5401 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5402 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5403 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5404 					/*
5405 					 * For active open side clear flags
5406 					 * for re-use passive open is
5407 					 * blocked by connect.
5408 					 */
5409 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5410 						/*
5411 						 * You were aborted, passive
5412 						 * side always hits here
5413 						 */
5414 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5415 						error = ECONNRESET;
5416 					}
5417 					so->so_state &= ~(SS_ISCONNECTING |
5418 					    SS_ISDISCONNECTING |
5419 					    SS_ISCONFIRMING |
5420 					    SS_ISCONNECTED);
5421 					if (error == 0) {
5422 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5423 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5424 							error = ENOTCONN;
5425 						}
5426 					}
5427 					goto out;
5428 				}
5429 			}
5430 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5431 			error = EWOULDBLOCK;
5432 		}
5433 		goto out;
5434 	}
5435 	if (hold_sblock == 1) {
5436 		SOCKBUF_UNLOCK(&so->so_rcv);
5437 		hold_sblock = 0;
5438 	}
5439 	/* we possibly have data we can read */
5440 	/* sa_ignore FREED_MEMORY */
5441 	control = TAILQ_FIRST(&inp->read_queue);
5442 	if (control == NULL) {
5443 		/*
5444 		 * This could be happening since the appender did the
5445 		 * increment but as not yet did the tailq insert onto the
5446 		 * read_queue
5447 		 */
5448 		if (hold_rlock == 0) {
5449 			SCTP_INP_READ_LOCK(inp);
5450 		}
5451 		control = TAILQ_FIRST(&inp->read_queue);
5452 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5453 #ifdef INVARIANTS
5454 			panic("Huh, its non zero and nothing on control?");
5455 #endif
5456 			so->so_rcv.sb_cc = 0;
5457 		}
5458 		SCTP_INP_READ_UNLOCK(inp);
5459 		hold_rlock = 0;
5460 		goto restart;
5461 	}
5462 	if ((control->length == 0) &&
5463 	    (control->do_not_ref_stcb)) {
5464 		/*
5465 		 * Clean up code for freeing assoc that left behind a
5466 		 * pdapi.. maybe a peer in EEOR that just closed after
5467 		 * sending and never indicated a EOR.
5468 		 */
5469 		if (hold_rlock == 0) {
5470 			hold_rlock = 1;
5471 			SCTP_INP_READ_LOCK(inp);
5472 		}
5473 		control->held_length = 0;
5474 		if (control->data) {
5475 			/* Hmm there is data here .. fix */
5476 			struct mbuf *m_tmp;
5477 			int cnt = 0;
5478 
5479 			m_tmp = control->data;
5480 			while (m_tmp) {
5481 				cnt += SCTP_BUF_LEN(m_tmp);
5482 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5483 					control->tail_mbuf = m_tmp;
5484 					control->end_added = 1;
5485 				}
5486 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5487 			}
5488 			control->length = cnt;
5489 		} else {
5490 			/* remove it */
5491 			TAILQ_REMOVE(&inp->read_queue, control, next);
5492 			/* Add back any hiddend data */
5493 			sctp_free_remote_addr(control->whoFrom);
5494 			sctp_free_a_readq(stcb, control);
5495 		}
5496 		if (hold_rlock) {
5497 			hold_rlock = 0;
5498 			SCTP_INP_READ_UNLOCK(inp);
5499 		}
5500 		goto restart;
5501 	}
5502 	if ((control->length == 0) &&
5503 	    (control->end_added == 1)) {
5504 		/*
5505 		 * Do we also need to check for (control->pdapi_aborted ==
5506 		 * 1)?
5507 		 */
5508 		if (hold_rlock == 0) {
5509 			hold_rlock = 1;
5510 			SCTP_INP_READ_LOCK(inp);
5511 		}
5512 		TAILQ_REMOVE(&inp->read_queue, control, next);
5513 		if (control->data) {
5514 #ifdef INVARIANTS
5515 			panic("control->data not null but control->length == 0");
5516 #else
5517 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5518 			sctp_m_freem(control->data);
5519 			control->data = NULL;
5520 #endif
5521 		}
5522 		if (control->aux_data) {
5523 			sctp_m_free(control->aux_data);
5524 			control->aux_data = NULL;
5525 		}
5526 		sctp_free_remote_addr(control->whoFrom);
5527 		sctp_free_a_readq(stcb, control);
5528 		if (hold_rlock) {
5529 			hold_rlock = 0;
5530 			SCTP_INP_READ_UNLOCK(inp);
5531 		}
5532 		goto restart;
5533 	}
5534 	if (control->length == 0) {
5535 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5536 		    (filling_sinfo)) {
5537 			/* find a more suitable one then this */
5538 			ctl = TAILQ_NEXT(control, next);
5539 			while (ctl) {
5540 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5541 				    (ctl->some_taken ||
5542 				    (ctl->spec_flags & M_NOTIFICATION) ||
5543 				    ((ctl->do_not_ref_stcb == 0) &&
5544 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5545 				    ) {
5546 					/*-
5547 					 * If we have a different TCB next, and there is data
5548 					 * present. If we have already taken some (pdapi), OR we can
5549 					 * ref the tcb and no delivery as started on this stream, we
5550 					 * take it. Note we allow a notification on a different
5551 					 * assoc to be delivered..
5552 					 */
5553 					control = ctl;
5554 					goto found_one;
5555 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5556 					    (ctl->length) &&
5557 					    ((ctl->some_taken) ||
5558 					    ((ctl->do_not_ref_stcb == 0) &&
5559 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5560 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5561 					/*-
5562 					 * If we have the same tcb, and there is data present, and we
5563 					 * have the strm interleave feature present. Then if we have
5564 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5565 					 * not started a delivery for this stream, we can take it.
5566 					 * Note we do NOT allow a notificaiton on the same assoc to
5567 					 * be delivered.
5568 					 */
5569 					control = ctl;
5570 					goto found_one;
5571 				}
5572 				ctl = TAILQ_NEXT(ctl, next);
5573 			}
5574 		}
5575 		/*
5576 		 * if we reach here, not suitable replacement is available
5577 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5578 		 * into the our held count, and its time to sleep again.
5579 		 */
5580 		held_length = so->so_rcv.sb_cc;
5581 		control->held_length = so->so_rcv.sb_cc;
5582 		goto restart;
5583 	}
5584 	/* Clear the held length since there is something to read */
5585 	control->held_length = 0;
5586 	if (hold_rlock) {
5587 		SCTP_INP_READ_UNLOCK(inp);
5588 		hold_rlock = 0;
5589 	}
5590 found_one:
5591 	/*
5592 	 * If we reach here, control has a some data for us to read off.
5593 	 * Note that stcb COULD be NULL.
5594 	 */
5595 	control->some_taken++;
5596 	if (hold_sblock) {
5597 		SOCKBUF_UNLOCK(&so->so_rcv);
5598 		hold_sblock = 0;
5599 	}
5600 	stcb = control->stcb;
5601 	if (stcb) {
5602 		if ((control->do_not_ref_stcb == 0) &&
5603 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5604 			if (freecnt_applied == 0)
5605 				stcb = NULL;
5606 		} else if (control->do_not_ref_stcb == 0) {
5607 			/* you can't free it on me please */
5608 			/*
5609 			 * The lock on the socket buffer protects us so the
5610 			 * free code will stop. But since we used the
5611 			 * socketbuf lock and the sender uses the tcb_lock
5612 			 * to increment, we need to use the atomic add to
5613 			 * the refcnt
5614 			 */
5615 			if (freecnt_applied) {
5616 #ifdef INVARIANTS
5617 				panic("refcnt already incremented");
5618 #else
5619 				SCTP_PRINTF("refcnt already incremented?\n");
5620 #endif
5621 			} else {
5622 				atomic_add_int(&stcb->asoc.refcnt, 1);
5623 				freecnt_applied = 1;
5624 			}
5625 			/*
5626 			 * Setup to remember how much we have not yet told
5627 			 * the peer our rwnd has opened up. Note we grab the
5628 			 * value from the tcb from last time. Note too that
5629 			 * sack sending clears this when a sack is sent,
5630 			 * which is fine. Once we hit the rwnd_req, we then
5631 			 * will go to the sctp_user_rcvd() that will not
5632 			 * lock until it KNOWs it MUST send a WUP-SACK.
5633 			 */
5634 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5635 			stcb->freed_by_sorcv_sincelast = 0;
5636 		}
5637 	}
5638 	if (stcb &&
5639 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5640 	    control->do_not_ref_stcb == 0) {
5641 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5642 	}
5643 	/* First lets get off the sinfo and sockaddr info */
5644 	if ((sinfo) && filling_sinfo) {
5645 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5646 		nxt = TAILQ_NEXT(control, next);
5647 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5648 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5649 			struct sctp_extrcvinfo *s_extra;
5650 
5651 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5652 			if ((nxt) &&
5653 			    (nxt->length)) {
5654 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5655 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5656 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5657 				}
5658 				if (nxt->spec_flags & M_NOTIFICATION) {
5659 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5660 				}
5661 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5662 				s_extra->sreinfo_next_length = nxt->length;
5663 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5664 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5665 				if (nxt->tail_mbuf != NULL) {
5666 					if (nxt->end_added) {
5667 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5668 					}
5669 				}
5670 			} else {
5671 				/*
5672 				 * we explicitly 0 this, since the memcpy
5673 				 * got some other things beyond the older
5674 				 * sinfo_ that is on the control's structure
5675 				 * :-D
5676 				 */
5677 				nxt = NULL;
5678 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5679 				s_extra->sreinfo_next_aid = 0;
5680 				s_extra->sreinfo_next_length = 0;
5681 				s_extra->sreinfo_next_ppid = 0;
5682 				s_extra->sreinfo_next_stream = 0;
5683 			}
5684 		}
5685 		/*
5686 		 * update off the real current cum-ack, if we have an stcb.
5687 		 */
5688 		if ((control->do_not_ref_stcb == 0) && stcb)
5689 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5690 		/*
5691 		 * mask off the high bits, we keep the actual chunk bits in
5692 		 * there.
5693 		 */
5694 		sinfo->sinfo_flags &= 0x00ff;
5695 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5696 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5697 		}
5698 	}
5699 #ifdef SCTP_ASOCLOG_OF_TSNS
5700 	{
5701 		int index, newindex;
5702 		struct sctp_pcbtsn_rlog *entry;
5703 
5704 		do {
5705 			index = inp->readlog_index;
5706 			newindex = index + 1;
5707 			if (newindex >= SCTP_READ_LOG_SIZE) {
5708 				newindex = 0;
5709 			}
5710 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5711 		entry = &inp->readlog[index];
5712 		entry->vtag = control->sinfo_assoc_id;
5713 		entry->strm = control->sinfo_stream;
5714 		entry->seq = control->sinfo_ssn;
5715 		entry->sz = control->length;
5716 		entry->flgs = control->sinfo_flags;
5717 	}
5718 #endif
5719 	if ((fromlen > 0) && (from != NULL)) {
5720 		union sctp_sockstore store;
5721 		size_t len;
5722 
5723 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5724 #ifdef INET6
5725 		case AF_INET6:
5726 			len = sizeof(struct sockaddr_in6);
5727 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5728 			store.sin6.sin6_port = control->port_from;
5729 			break;
5730 #endif
5731 #ifdef INET
5732 		case AF_INET:
5733 #ifdef INET6
5734 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5735 				len = sizeof(struct sockaddr_in6);
5736 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5737 				    &store.sin6);
5738 				store.sin6.sin6_port = control->port_from;
5739 			} else {
5740 				len = sizeof(struct sockaddr_in);
5741 				store.sin = control->whoFrom->ro._l_addr.sin;
5742 				store.sin.sin_port = control->port_from;
5743 			}
5744 #else
5745 			len = sizeof(struct sockaddr_in);
5746 			store.sin = control->whoFrom->ro._l_addr.sin;
5747 			store.sin.sin_port = control->port_from;
5748 #endif
5749 			break;
5750 #endif
5751 		default:
5752 			len = 0;
5753 			break;
5754 		}
5755 		memcpy(from, &store, min((size_t)fromlen, len));
5756 #ifdef INET6
5757 		{
5758 			struct sockaddr_in6 lsa6, *from6;
5759 
5760 			from6 = (struct sockaddr_in6 *)from;
5761 			sctp_recover_scope_mac(from6, (&lsa6));
5762 		}
5763 #endif
5764 	}
5765 	/* now copy out what data we can */
5766 	if (mp == NULL) {
5767 		/* copy out each mbuf in the chain up to length */
5768 get_more_data:
5769 		m = control->data;
5770 		while (m) {
5771 			/* Move out all we can */
5772 			cp_len = (int)uio->uio_resid;
5773 			my_len = (int)SCTP_BUF_LEN(m);
5774 			if (cp_len > my_len) {
5775 				/* not enough in this buf */
5776 				cp_len = my_len;
5777 			}
5778 			if (hold_rlock) {
5779 				SCTP_INP_READ_UNLOCK(inp);
5780 				hold_rlock = 0;
5781 			}
5782 			if (cp_len > 0)
5783 				error = uiomove(mtod(m, char *), cp_len, uio);
5784 			/* re-read */
5785 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5786 				goto release;
5787 			}
5788 			if ((control->do_not_ref_stcb == 0) && stcb &&
5789 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5790 				no_rcv_needed = 1;
5791 			}
5792 			if (error) {
5793 				/* error we are out of here */
5794 				goto release;
5795 			}
5796 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5797 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5798 			    ((control->end_added == 0) ||
5799 			    (control->end_added &&
5800 			    (TAILQ_NEXT(control, next) == NULL)))
5801 			    ) {
5802 				SCTP_INP_READ_LOCK(inp);
5803 				hold_rlock = 1;
5804 			}
5805 			if (cp_len == SCTP_BUF_LEN(m)) {
5806 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5807 				    (control->end_added)) {
5808 					out_flags |= MSG_EOR;
5809 					if ((control->do_not_ref_stcb == 0) &&
5810 					    (control->stcb != NULL) &&
5811 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5812 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5813 				}
5814 				if (control->spec_flags & M_NOTIFICATION) {
5815 					out_flags |= MSG_NOTIFICATION;
5816 				}
5817 				/* we ate up the mbuf */
5818 				if (in_flags & MSG_PEEK) {
5819 					/* just looking */
5820 					m = SCTP_BUF_NEXT(m);
5821 					copied_so_far += cp_len;
5822 				} else {
5823 					/* dispose of the mbuf */
5824 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5825 						sctp_sblog(&so->so_rcv,
5826 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5827 					}
5828 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5829 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5830 						sctp_sblog(&so->so_rcv,
5831 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5832 					}
5833 					copied_so_far += cp_len;
5834 					freed_so_far += cp_len;
5835 					freed_so_far += MSIZE;
5836 					atomic_subtract_int(&control->length, cp_len);
5837 					control->data = sctp_m_free(m);
5838 					m = control->data;
5839 					/*
5840 					 * been through it all, must hold sb
5841 					 * lock ok to null tail
5842 					 */
5843 					if (control->data == NULL) {
5844 #ifdef INVARIANTS
5845 						if ((control->end_added == 0) ||
5846 						    (TAILQ_NEXT(control, next) == NULL)) {
5847 							/*
5848 							 * If the end is not
5849 							 * added, OR the
5850 							 * next is NOT null
5851 							 * we MUST have the
5852 							 * lock.
5853 							 */
5854 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5855 								panic("Hmm we don't own the lock?");
5856 							}
5857 						}
5858 #endif
5859 						control->tail_mbuf = NULL;
5860 #ifdef INVARIANTS
5861 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5862 							panic("end_added, nothing left and no MSG_EOR");
5863 						}
5864 #endif
5865 					}
5866 				}
5867 			} else {
5868 				/* Do we need to trim the mbuf? */
5869 				if (control->spec_flags & M_NOTIFICATION) {
5870 					out_flags |= MSG_NOTIFICATION;
5871 				}
5872 				if ((in_flags & MSG_PEEK) == 0) {
5873 					SCTP_BUF_RESV_UF(m, cp_len);
5874 					SCTP_BUF_LEN(m) -= cp_len;
5875 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5876 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5877 					}
5878 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5879 					if ((control->do_not_ref_stcb == 0) &&
5880 					    stcb) {
5881 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5882 					}
5883 					copied_so_far += cp_len;
5884 					freed_so_far += cp_len;
5885 					freed_so_far += MSIZE;
5886 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5887 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5888 						    SCTP_LOG_SBRESULT, 0);
5889 					}
5890 					atomic_subtract_int(&control->length, cp_len);
5891 				} else {
5892 					copied_so_far += cp_len;
5893 				}
5894 			}
5895 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5896 				break;
5897 			}
5898 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5899 			    (control->do_not_ref_stcb == 0) &&
5900 			    (freed_so_far >= rwnd_req)) {
5901 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5902 			}
5903 		}		/* end while(m) */
5904 		/*
5905 		 * At this point we have looked at it all and we either have
5906 		 * a MSG_EOR/or read all the user wants... <OR>
5907 		 * control->length == 0.
5908 		 */
5909 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5910 			/* we are done with this control */
5911 			if (control->length == 0) {
5912 				if (control->data) {
5913 #ifdef INVARIANTS
5914 					panic("control->data not null at read eor?");
5915 #else
5916 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5917 					sctp_m_freem(control->data);
5918 					control->data = NULL;
5919 #endif
5920 				}
5921 		done_with_control:
5922 				if (TAILQ_NEXT(control, next) == NULL) {
5923 					/*
5924 					 * If we don't have a next we need a
5925 					 * lock, if there is a next
5926 					 * interrupt is filling ahead of us
5927 					 * and we don't need a lock to
5928 					 * remove this guy (which is the
5929 					 * head of the queue).
5930 					 */
5931 					if (hold_rlock == 0) {
5932 						SCTP_INP_READ_LOCK(inp);
5933 						hold_rlock = 1;
5934 					}
5935 				}
5936 				TAILQ_REMOVE(&inp->read_queue, control, next);
5937 				/* Add back any hiddend data */
5938 				if (control->held_length) {
5939 					held_length = 0;
5940 					control->held_length = 0;
5941 					wakeup_read_socket = 1;
5942 				}
5943 				if (control->aux_data) {
5944 					sctp_m_free(control->aux_data);
5945 					control->aux_data = NULL;
5946 				}
5947 				no_rcv_needed = control->do_not_ref_stcb;
5948 				sctp_free_remote_addr(control->whoFrom);
5949 				control->data = NULL;
5950 				sctp_free_a_readq(stcb, control);
5951 				control = NULL;
5952 				if ((freed_so_far >= rwnd_req) &&
5953 				    (no_rcv_needed == 0))
5954 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5955 
5956 			} else {
5957 				/*
5958 				 * The user did not read all of this
5959 				 * message, turn off the returned MSG_EOR
5960 				 * since we are leaving more behind on the
5961 				 * control to read.
5962 				 */
5963 #ifdef INVARIANTS
5964 				if (control->end_added &&
5965 				    (control->data == NULL) &&
5966 				    (control->tail_mbuf == NULL)) {
5967 					panic("Gak, control->length is corrupt?");
5968 				}
5969 #endif
5970 				no_rcv_needed = control->do_not_ref_stcb;
5971 				out_flags &= ~MSG_EOR;
5972 			}
5973 		}
5974 		if (out_flags & MSG_EOR) {
5975 			goto release;
5976 		}
5977 		if ((uio->uio_resid == 0) ||
5978 		    ((in_eeor_mode) &&
5979 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5980 			goto release;
5981 		}
5982 		/*
5983 		 * If I hit here the receiver wants more and this message is
5984 		 * NOT done (pd-api). So two questions. Can we block? if not
5985 		 * we are done. Did the user NOT set MSG_WAITALL?
5986 		 */
5987 		if (block_allowed == 0) {
5988 			goto release;
5989 		}
5990 		/*
5991 		 * We need to wait for more data a few things: - We don't
5992 		 * sbunlock() so we don't get someone else reading. - We
5993 		 * must be sure to account for the case where what is added
5994 		 * is NOT to our control when we wakeup.
5995 		 */
5996 
5997 		/*
5998 		 * Do we need to tell the transport a rwnd update might be
5999 		 * needed before we go to sleep?
6000 		 */
6001 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6002 		    ((freed_so_far >= rwnd_req) &&
6003 		    (control->do_not_ref_stcb == 0) &&
6004 		    (no_rcv_needed == 0))) {
6005 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6006 		}
6007 wait_some_more:
6008 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6009 			goto release;
6010 		}
6011 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6012 			goto release;
6013 
6014 		if (hold_rlock == 1) {
6015 			SCTP_INP_READ_UNLOCK(inp);
6016 			hold_rlock = 0;
6017 		}
6018 		if (hold_sblock == 0) {
6019 			SOCKBUF_LOCK(&so->so_rcv);
6020 			hold_sblock = 1;
6021 		}
6022 		if ((copied_so_far) && (control->length == 0) &&
6023 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6024 			goto release;
6025 		}
6026 		if (so->so_rcv.sb_cc <= control->held_length) {
6027 			error = sbwait(&so->so_rcv);
6028 			if (error) {
6029 				goto release;
6030 			}
6031 			control->held_length = 0;
6032 		}
6033 		if (hold_sblock) {
6034 			SOCKBUF_UNLOCK(&so->so_rcv);
6035 			hold_sblock = 0;
6036 		}
6037 		if (control->length == 0) {
6038 			/* still nothing here */
6039 			if (control->end_added == 1) {
6040 				/* he aborted, or is done i.e.did a shutdown */
6041 				out_flags |= MSG_EOR;
6042 				if (control->pdapi_aborted) {
6043 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6044 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6045 
6046 					out_flags |= MSG_TRUNC;
6047 				} else {
6048 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6049 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6050 				}
6051 				goto done_with_control;
6052 			}
6053 			if (so->so_rcv.sb_cc > held_length) {
6054 				control->held_length = so->so_rcv.sb_cc;
6055 				held_length = 0;
6056 			}
6057 			goto wait_some_more;
6058 		} else if (control->data == NULL) {
6059 			/*
6060 			 * we must re-sync since data is probably being
6061 			 * added
6062 			 */
6063 			SCTP_INP_READ_LOCK(inp);
6064 			if ((control->length > 0) && (control->data == NULL)) {
6065 				/*
6066 				 * big trouble.. we have the lock and its
6067 				 * corrupt?
6068 				 */
6069 #ifdef INVARIANTS
6070 				panic("Impossible data==NULL length !=0");
6071 #endif
6072 				out_flags |= MSG_EOR;
6073 				out_flags |= MSG_TRUNC;
6074 				control->length = 0;
6075 				SCTP_INP_READ_UNLOCK(inp);
6076 				goto done_with_control;
6077 			}
6078 			SCTP_INP_READ_UNLOCK(inp);
6079 			/* We will fall around to get more data */
6080 		}
6081 		goto get_more_data;
6082 	} else {
6083 		/*-
6084 		 * Give caller back the mbuf chain,
6085 		 * store in uio_resid the length
6086 		 */
6087 		wakeup_read_socket = 0;
6088 		if ((control->end_added == 0) ||
6089 		    (TAILQ_NEXT(control, next) == NULL)) {
6090 			/* Need to get rlock */
6091 			if (hold_rlock == 0) {
6092 				SCTP_INP_READ_LOCK(inp);
6093 				hold_rlock = 1;
6094 			}
6095 		}
6096 		if (control->end_added) {
6097 			out_flags |= MSG_EOR;
6098 			if ((control->do_not_ref_stcb == 0) &&
6099 			    (control->stcb != NULL) &&
6100 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6101 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6102 		}
6103 		if (control->spec_flags & M_NOTIFICATION) {
6104 			out_flags |= MSG_NOTIFICATION;
6105 		}
6106 		uio->uio_resid = control->length;
6107 		*mp = control->data;
6108 		m = control->data;
6109 		while (m) {
6110 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6111 				sctp_sblog(&so->so_rcv,
6112 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6113 			}
6114 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6115 			freed_so_far += SCTP_BUF_LEN(m);
6116 			freed_so_far += MSIZE;
6117 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6118 				sctp_sblog(&so->so_rcv,
6119 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6120 			}
6121 			m = SCTP_BUF_NEXT(m);
6122 		}
6123 		control->data = control->tail_mbuf = NULL;
6124 		control->length = 0;
6125 		if (out_flags & MSG_EOR) {
6126 			/* Done with this control */
6127 			goto done_with_control;
6128 		}
6129 	}
6130 release:
6131 	if (hold_rlock == 1) {
6132 		SCTP_INP_READ_UNLOCK(inp);
6133 		hold_rlock = 0;
6134 	}
6135 	if (hold_sblock == 1) {
6136 		SOCKBUF_UNLOCK(&so->so_rcv);
6137 		hold_sblock = 0;
6138 	}
6139 	sbunlock(&so->so_rcv);
6140 	sockbuf_lock = 0;
6141 
6142 release_unlocked:
6143 	if (hold_sblock) {
6144 		SOCKBUF_UNLOCK(&so->so_rcv);
6145 		hold_sblock = 0;
6146 	}
6147 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6148 		if ((freed_so_far >= rwnd_req) &&
6149 		    (control && (control->do_not_ref_stcb == 0)) &&
6150 		    (no_rcv_needed == 0))
6151 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6152 	}
6153 out:
6154 	if (msg_flags) {
6155 		*msg_flags = out_flags;
6156 	}
6157 	if (((out_flags & MSG_EOR) == 0) &&
6158 	    ((in_flags & MSG_PEEK) == 0) &&
6159 	    (sinfo) &&
6160 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6161 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6162 		struct sctp_extrcvinfo *s_extra;
6163 
6164 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6165 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6166 	}
6167 	if (hold_rlock == 1) {
6168 		SCTP_INP_READ_UNLOCK(inp);
6169 	}
6170 	if (hold_sblock) {
6171 		SOCKBUF_UNLOCK(&so->so_rcv);
6172 	}
6173 	if (sockbuf_lock) {
6174 		sbunlock(&so->so_rcv);
6175 	}
6176 	if (freecnt_applied) {
6177 		/*
6178 		 * The lock on the socket buffer protects us so the free
6179 		 * code will stop. But since we used the socketbuf lock and
6180 		 * the sender uses the tcb_lock to increment, we need to use
6181 		 * the atomic add to the refcnt.
6182 		 */
6183 		if (stcb == NULL) {
6184 #ifdef INVARIANTS
6185 			panic("stcb for refcnt has gone NULL?");
6186 			goto stage_left;
6187 #else
6188 			goto stage_left;
6189 #endif
6190 		}
6191 		atomic_add_int(&stcb->asoc.refcnt, -1);
6192 		/* Save the value back for next time */
6193 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6194 	}
6195 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6196 		if (stcb) {
6197 			sctp_misc_ints(SCTP_SORECV_DONE,
6198 			    freed_so_far,
6199 			    ((uio) ? (slen - uio->uio_resid) : slen),
6200 			    stcb->asoc.my_rwnd,
6201 			    so->so_rcv.sb_cc);
6202 		} else {
6203 			sctp_misc_ints(SCTP_SORECV_DONE,
6204 			    freed_so_far,
6205 			    ((uio) ? (slen - uio->uio_resid) : slen),
6206 			    0,
6207 			    so->so_rcv.sb_cc);
6208 		}
6209 	}
6210 stage_left:
6211 	if (wakeup_read_socket) {
6212 		sctp_sorwakeup(inp, so);
6213 	}
6214 	return (error);
6215 }
6216 
6217 
6218 #ifdef SCTP_MBUF_LOGGING
6219 struct mbuf *
6220 sctp_m_free(struct mbuf *m)
6221 {
6222 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6223 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6224 	}
6225 	return (m_free(m));
6226 }
6227 
6228 void
6229 sctp_m_freem(struct mbuf *mb)
6230 {
6231 	while (mb != NULL)
6232 		mb = sctp_m_free(mb);
6233 }
6234 
6235 #endif
6236 
6237 int
6238 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6239 {
6240 	/*
6241 	 * Given a local address. For all associations that holds the
6242 	 * address, request a peer-set-primary.
6243 	 */
6244 	struct sctp_ifa *ifa;
6245 	struct sctp_laddr *wi;
6246 
6247 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6248 	if (ifa == NULL) {
6249 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6250 		return (EADDRNOTAVAIL);
6251 	}
6252 	/*
6253 	 * Now that we have the ifa we must awaken the iterator with this
6254 	 * message.
6255 	 */
6256 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6257 	if (wi == NULL) {
6258 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6259 		return (ENOMEM);
6260 	}
6261 	/* Now incr the count and int wi structure */
6262 	SCTP_INCR_LADDR_COUNT();
6263 	bzero(wi, sizeof(*wi));
6264 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6265 	wi->ifa = ifa;
6266 	wi->action = SCTP_SET_PRIM_ADDR;
6267 	atomic_add_int(&ifa->refcount, 1);
6268 
6269 	/* Now add it to the work queue */
6270 	SCTP_WQ_ADDR_LOCK();
6271 	/*
6272 	 * Should this really be a tailq? As it is we will process the
6273 	 * newest first :-0
6274 	 */
6275 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6276 	SCTP_WQ_ADDR_UNLOCK();
6277 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6278 	    (struct sctp_inpcb *)NULL,
6279 	    (struct sctp_tcb *)NULL,
6280 	    (struct sctp_nets *)NULL);
6281 	return (0);
6282 }
6283 
6284 
6285 int
6286 sctp_soreceive(struct socket *so,
6287     struct sockaddr **psa,
6288     struct uio *uio,
6289     struct mbuf **mp0,
6290     struct mbuf **controlp,
6291     int *flagsp)
6292 {
6293 	int error, fromlen;
6294 	uint8_t sockbuf[256];
6295 	struct sockaddr *from;
6296 	struct sctp_extrcvinfo sinfo;
6297 	int filling_sinfo = 1;
6298 	struct sctp_inpcb *inp;
6299 
6300 	inp = (struct sctp_inpcb *)so->so_pcb;
6301 	/* pickup the assoc we are reading from */
6302 	if (inp == NULL) {
6303 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6304 		return (EINVAL);
6305 	}
6306 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6307 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6308 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6309 	    (controlp == NULL)) {
6310 		/* user does not want the sndrcv ctl */
6311 		filling_sinfo = 0;
6312 	}
6313 	if (psa) {
6314 		from = (struct sockaddr *)sockbuf;
6315 		fromlen = sizeof(sockbuf);
6316 		from->sa_len = 0;
6317 	} else {
6318 		from = NULL;
6319 		fromlen = 0;
6320 	}
6321 
6322 	if (filling_sinfo) {
6323 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6324 	}
6325 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6326 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6327 	if (controlp != NULL) {
6328 		/* copy back the sinfo in a CMSG format */
6329 		if (filling_sinfo)
6330 			*controlp = sctp_build_ctl_nchunk(inp,
6331 			    (struct sctp_sndrcvinfo *)&sinfo);
6332 		else
6333 			*controlp = NULL;
6334 	}
6335 	if (psa) {
6336 		/* copy back the address info */
6337 		if (from && from->sa_len) {
6338 			*psa = sodupsockaddr(from, M_NOWAIT);
6339 		} else {
6340 			*psa = NULL;
6341 		}
6342 	}
6343 	return (error);
6344 }
6345 
6346 
6347 
6348 
6349 
6350 int
6351 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6352     int totaddr, int *error)
6353 {
6354 	int added = 0;
6355 	int i;
6356 	struct sctp_inpcb *inp;
6357 	struct sockaddr *sa;
6358 	size_t incr = 0;
6359 
6360 #ifdef INET
6361 	struct sockaddr_in *sin;
6362 
6363 #endif
6364 #ifdef INET6
6365 	struct sockaddr_in6 *sin6;
6366 
6367 #endif
6368 
6369 	sa = addr;
6370 	inp = stcb->sctp_ep;
6371 	*error = 0;
6372 	for (i = 0; i < totaddr; i++) {
6373 		switch (sa->sa_family) {
6374 #ifdef INET
6375 		case AF_INET:
6376 			incr = sizeof(struct sockaddr_in);
6377 			sin = (struct sockaddr_in *)sa;
6378 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6379 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6380 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6381 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6382 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6383 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6384 				*error = EINVAL;
6385 				goto out_now;
6386 			}
6387 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6388 				/* assoc gone no un-lock */
6389 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6390 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6391 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6392 				*error = ENOBUFS;
6393 				goto out_now;
6394 			}
6395 			added++;
6396 			break;
6397 #endif
6398 #ifdef INET6
6399 		case AF_INET6:
6400 			incr = sizeof(struct sockaddr_in6);
6401 			sin6 = (struct sockaddr_in6 *)sa;
6402 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6403 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6404 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6405 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6406 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6407 				*error = EINVAL;
6408 				goto out_now;
6409 			}
6410 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6411 				/* assoc gone no un-lock */
6412 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6413 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6414 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6415 				*error = ENOBUFS;
6416 				goto out_now;
6417 			}
6418 			added++;
6419 			break;
6420 #endif
6421 		default:
6422 			break;
6423 		}
6424 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6425 	}
6426 out_now:
6427 	return (added);
6428 }
6429 
6430 struct sctp_tcb *
6431 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6432     int *totaddr, int *num_v4, int *num_v6, int *error,
6433     int limit, int *bad_addr)
6434 {
6435 	struct sockaddr *sa;
6436 	struct sctp_tcb *stcb = NULL;
6437 	size_t incr, at, i;
6438 
6439 	at = incr = 0;
6440 	sa = addr;
6441 
6442 	*error = *num_v6 = *num_v4 = 0;
6443 	/* account and validate addresses */
6444 	for (i = 0; i < (size_t)*totaddr; i++) {
6445 		switch (sa->sa_family) {
6446 #ifdef INET
6447 		case AF_INET:
6448 			(*num_v4) += 1;
6449 			incr = sizeof(struct sockaddr_in);
6450 			if (sa->sa_len != incr) {
6451 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6452 				*error = EINVAL;
6453 				*bad_addr = 1;
6454 				return (NULL);
6455 			}
6456 			break;
6457 #endif
6458 #ifdef INET6
6459 		case AF_INET6:
6460 			{
6461 				struct sockaddr_in6 *sin6;
6462 
6463 				sin6 = (struct sockaddr_in6 *)sa;
6464 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6465 					/* Must be non-mapped for connectx */
6466 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467 					*error = EINVAL;
6468 					*bad_addr = 1;
6469 					return (NULL);
6470 				}
6471 				(*num_v6) += 1;
6472 				incr = sizeof(struct sockaddr_in6);
6473 				if (sa->sa_len != incr) {
6474 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6475 					*error = EINVAL;
6476 					*bad_addr = 1;
6477 					return (NULL);
6478 				}
6479 				break;
6480 			}
6481 #endif
6482 		default:
6483 			*totaddr = i;
6484 			/* we are done */
6485 			break;
6486 		}
6487 		if (i == (size_t)*totaddr) {
6488 			break;
6489 		}
6490 		SCTP_INP_INCR_REF(inp);
6491 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6492 		if (stcb != NULL) {
6493 			/* Already have or am bring up an association */
6494 			return (stcb);
6495 		} else {
6496 			SCTP_INP_DECR_REF(inp);
6497 		}
6498 		if ((at + incr) > (size_t)limit) {
6499 			*totaddr = i;
6500 			break;
6501 		}
6502 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6503 	}
6504 	return ((struct sctp_tcb *)NULL);
6505 }
6506 
6507 /*
6508  * sctp_bindx(ADD) for one address.
6509  * assumes all arguments are valid/checked by caller.
6510  */
6511 void
6512 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6513     struct sockaddr *sa, sctp_assoc_t assoc_id,
6514     uint32_t vrf_id, int *error, void *p)
6515 {
6516 	struct sockaddr *addr_touse;
6517 
6518 #if defined(INET) && defined(INET6)
6519 	struct sockaddr_in sin;
6520 
6521 #endif
6522 
6523 	/* see if we're bound all already! */
6524 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6525 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6526 		*error = EINVAL;
6527 		return;
6528 	}
6529 	addr_touse = sa;
6530 #ifdef INET6
6531 	if (sa->sa_family == AF_INET6) {
6532 #ifdef INET
6533 		struct sockaddr_in6 *sin6;
6534 
6535 #endif
6536 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6537 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6538 			*error = EINVAL;
6539 			return;
6540 		}
6541 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6542 			/* can only bind v6 on PF_INET6 sockets */
6543 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6544 			*error = EINVAL;
6545 			return;
6546 		}
6547 #ifdef INET
6548 		sin6 = (struct sockaddr_in6 *)addr_touse;
6549 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6550 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6551 			    SCTP_IPV6_V6ONLY(inp)) {
6552 				/* can't bind v4-mapped on PF_INET sockets */
6553 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6554 				*error = EINVAL;
6555 				return;
6556 			}
6557 			in6_sin6_2_sin(&sin, sin6);
6558 			addr_touse = (struct sockaddr *)&sin;
6559 		}
6560 #endif
6561 	}
6562 #endif
6563 #ifdef INET
6564 	if (sa->sa_family == AF_INET) {
6565 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6566 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567 			*error = EINVAL;
6568 			return;
6569 		}
6570 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6571 		    SCTP_IPV6_V6ONLY(inp)) {
6572 			/* can't bind v4 on PF_INET sockets */
6573 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6574 			*error = EINVAL;
6575 			return;
6576 		}
6577 	}
6578 #endif
6579 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6580 		if (p == NULL) {
6581 			/* Can't get proc for Net/Open BSD */
6582 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6583 			*error = EINVAL;
6584 			return;
6585 		}
6586 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6587 		return;
6588 	}
6589 	/*
6590 	 * No locks required here since bind and mgmt_ep_sa all do their own
6591 	 * locking. If we do something for the FIX: below we may need to
6592 	 * lock in that case.
6593 	 */
6594 	if (assoc_id == 0) {
6595 		/* add the address */
6596 		struct sctp_inpcb *lep;
6597 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6598 
6599 		/* validate the incoming port */
6600 		if ((lsin->sin_port != 0) &&
6601 		    (lsin->sin_port != inp->sctp_lport)) {
6602 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6603 			*error = EINVAL;
6604 			return;
6605 		} else {
6606 			/* user specified 0 port, set it to existing port */
6607 			lsin->sin_port = inp->sctp_lport;
6608 		}
6609 
6610 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6611 		if (lep != NULL) {
6612 			/*
6613 			 * We must decrement the refcount since we have the
6614 			 * ep already and are binding. No remove going on
6615 			 * here.
6616 			 */
6617 			SCTP_INP_DECR_REF(lep);
6618 		}
6619 		if (lep == inp) {
6620 			/* already bound to it.. ok */
6621 			return;
6622 		} else if (lep == NULL) {
6623 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6624 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6625 			    SCTP_ADD_IP_ADDRESS,
6626 			    vrf_id, NULL);
6627 		} else {
6628 			*error = EADDRINUSE;
6629 		}
6630 		if (*error)
6631 			return;
6632 	} else {
6633 		/*
6634 		 * FIX: decide whether we allow assoc based bindx
6635 		 */
6636 	}
6637 }
6638 
6639 /*
6640  * sctp_bindx(DELETE) for one address.
6641  * assumes all arguments are valid/checked by caller.
6642  */
6643 void
6644 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6645     struct sockaddr *sa, sctp_assoc_t assoc_id,
6646     uint32_t vrf_id, int *error)
6647 {
6648 	struct sockaddr *addr_touse;
6649 
6650 #if defined(INET) && defined(INET6)
6651 	struct sockaddr_in sin;
6652 
6653 #endif
6654 
6655 	/* see if we're bound all already! */
6656 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6657 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6658 		*error = EINVAL;
6659 		return;
6660 	}
6661 	addr_touse = sa;
6662 #ifdef INET6
6663 	if (sa->sa_family == AF_INET6) {
6664 #ifdef INET
6665 		struct sockaddr_in6 *sin6;
6666 
6667 #endif
6668 
6669 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6670 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6671 			*error = EINVAL;
6672 			return;
6673 		}
6674 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6675 			/* can only bind v6 on PF_INET6 sockets */
6676 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6677 			*error = EINVAL;
6678 			return;
6679 		}
6680 #ifdef INET
6681 		sin6 = (struct sockaddr_in6 *)addr_touse;
6682 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6683 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6684 			    SCTP_IPV6_V6ONLY(inp)) {
6685 				/* can't bind mapped-v4 on PF_INET sockets */
6686 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6687 				*error = EINVAL;
6688 				return;
6689 			}
6690 			in6_sin6_2_sin(&sin, sin6);
6691 			addr_touse = (struct sockaddr *)&sin;
6692 		}
6693 #endif
6694 	}
6695 #endif
6696 #ifdef INET
6697 	if (sa->sa_family == AF_INET) {
6698 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6699 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6700 			*error = EINVAL;
6701 			return;
6702 		}
6703 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6704 		    SCTP_IPV6_V6ONLY(inp)) {
6705 			/* can't bind v4 on PF_INET sockets */
6706 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6707 			*error = EINVAL;
6708 			return;
6709 		}
6710 	}
6711 #endif
6712 	/*
6713 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6714 	 * below is ever changed we may need to lock before calling
6715 	 * association level binding.
6716 	 */
6717 	if (assoc_id == 0) {
6718 		/* delete the address */
6719 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6720 		    SCTP_DEL_IP_ADDRESS,
6721 		    vrf_id, NULL);
6722 	} else {
6723 		/*
6724 		 * FIX: decide whether we allow assoc based bindx
6725 		 */
6726 	}
6727 }
6728 
6729 /*
6730  * returns the valid local address count for an assoc, taking into account
6731  * all scoping rules
6732  */
6733 int
6734 sctp_local_addr_count(struct sctp_tcb *stcb)
6735 {
6736 	int loopback_scope;
6737 
6738 #if defined(INET)
6739 	int ipv4_local_scope, ipv4_addr_legal;
6740 
6741 #endif
6742 #if defined (INET6)
6743 	int local_scope, site_scope, ipv6_addr_legal;
6744 
6745 #endif
6746 	struct sctp_vrf *vrf;
6747 	struct sctp_ifn *sctp_ifn;
6748 	struct sctp_ifa *sctp_ifa;
6749 	int count = 0;
6750 
6751 	/* Turn on all the appropriate scopes */
6752 	loopback_scope = stcb->asoc.scope.loopback_scope;
6753 #if defined(INET)
6754 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6755 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6756 #endif
6757 #if defined(INET6)
6758 	local_scope = stcb->asoc.scope.local_scope;
6759 	site_scope = stcb->asoc.scope.site_scope;
6760 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6761 #endif
6762 	SCTP_IPI_ADDR_RLOCK();
6763 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6764 	if (vrf == NULL) {
6765 		/* no vrf, no addresses */
6766 		SCTP_IPI_ADDR_RUNLOCK();
6767 		return (0);
6768 	}
6769 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6770 		/*
6771 		 * bound all case: go through all ifns on the vrf
6772 		 */
6773 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6774 			if ((loopback_scope == 0) &&
6775 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6776 				continue;
6777 			}
6778 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6779 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6780 					continue;
6781 				switch (sctp_ifa->address.sa.sa_family) {
6782 #ifdef INET
6783 				case AF_INET:
6784 					if (ipv4_addr_legal) {
6785 						struct sockaddr_in *sin;
6786 
6787 						sin = &sctp_ifa->address.sin;
6788 						if (sin->sin_addr.s_addr == 0) {
6789 							/*
6790 							 * skip unspecified
6791 							 * addrs
6792 							 */
6793 							continue;
6794 						}
6795 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6796 						    &sin->sin_addr) != 0) {
6797 							continue;
6798 						}
6799 						if ((ipv4_local_scope == 0) &&
6800 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6801 							continue;
6802 						}
6803 						/* count this one */
6804 						count++;
6805 					} else {
6806 						continue;
6807 					}
6808 					break;
6809 #endif
6810 #ifdef INET6
6811 				case AF_INET6:
6812 					if (ipv6_addr_legal) {
6813 						struct sockaddr_in6 *sin6;
6814 
6815 						sin6 = &sctp_ifa->address.sin6;
6816 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6817 							continue;
6818 						}
6819 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6820 						    &sin6->sin6_addr) != 0) {
6821 							continue;
6822 						}
6823 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6824 							if (local_scope == 0)
6825 								continue;
6826 							if (sin6->sin6_scope_id == 0) {
6827 								if (sa6_recoverscope(sin6) != 0)
6828 									/*
6829 									 *
6830 									 * bad
6831 									 *
6832 									 * li
6833 									 * nk
6834 									 *
6835 									 * loc
6836 									 * al
6837 									 *
6838 									 * add
6839 									 * re
6840 									 * ss
6841 									 * */
6842 									continue;
6843 							}
6844 						}
6845 						if ((site_scope == 0) &&
6846 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6847 							continue;
6848 						}
6849 						/* count this one */
6850 						count++;
6851 					}
6852 					break;
6853 #endif
6854 				default:
6855 					/* TSNH */
6856 					break;
6857 				}
6858 			}
6859 		}
6860 	} else {
6861 		/*
6862 		 * subset bound case
6863 		 */
6864 		struct sctp_laddr *laddr;
6865 
6866 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6867 		    sctp_nxt_addr) {
6868 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6869 				continue;
6870 			}
6871 			/* count this one */
6872 			count++;
6873 		}
6874 	}
6875 	SCTP_IPI_ADDR_RUNLOCK();
6876 	return (count);
6877 }
6878 
6879 #if defined(SCTP_LOCAL_TRACE_BUF)
6880 
6881 void
6882 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6883 {
6884 	uint32_t saveindex, newindex;
6885 
6886 	do {
6887 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6888 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6889 			newindex = 1;
6890 		} else {
6891 			newindex = saveindex + 1;
6892 		}
6893 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6894 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6895 		saveindex = 0;
6896 	}
6897 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6898 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6899 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6900 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6901 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6902 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6903 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6904 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6905 }
6906 
6907 #endif
6908 static void
6909 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6910     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6911 {
6912 	struct ip *iph;
6913 
6914 #ifdef INET6
6915 	struct ip6_hdr *ip6;
6916 
6917 #endif
6918 	struct mbuf *sp, *last;
6919 	struct udphdr *uhdr;
6920 	uint16_t port;
6921 
6922 	if ((m->m_flags & M_PKTHDR) == 0) {
6923 		/* Can't handle one that is not a pkt hdr */
6924 		goto out;
6925 	}
6926 	/* Pull the src port */
6927 	iph = mtod(m, struct ip *);
6928 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6929 	port = uhdr->uh_sport;
6930 	/*
6931 	 * Split out the mbuf chain. Leave the IP header in m, place the
6932 	 * rest in the sp.
6933 	 */
6934 	sp = m_split(m, off, M_NOWAIT);
6935 	if (sp == NULL) {
6936 		/* Gak, drop packet, we can't do a split */
6937 		goto out;
6938 	}
6939 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6940 		/* Gak, packet can't have an SCTP header in it - too small */
6941 		m_freem(sp);
6942 		goto out;
6943 	}
6944 	/* Now pull up the UDP header and SCTP header together */
6945 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6946 	if (sp == NULL) {
6947 		/* Gak pullup failed */
6948 		goto out;
6949 	}
6950 	/* Trim out the UDP header */
6951 	m_adj(sp, sizeof(struct udphdr));
6952 
6953 	/* Now reconstruct the mbuf chain */
6954 	for (last = m; last->m_next; last = last->m_next);
6955 	last->m_next = sp;
6956 	m->m_pkthdr.len += sp->m_pkthdr.len;
6957 	iph = mtod(m, struct ip *);
6958 	switch (iph->ip_v) {
6959 #ifdef INET
6960 	case IPVERSION:
6961 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6962 		sctp_input_with_port(m, off, port);
6963 		break;
6964 #endif
6965 #ifdef INET6
6966 	case IPV6_VERSION >> 4:
6967 		ip6 = mtod(m, struct ip6_hdr *);
6968 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6969 		sctp6_input_with_port(&m, &off, port);
6970 		break;
6971 #endif
6972 	default:
6973 		goto out;
6974 		break;
6975 	}
6976 	return;
6977 out:
6978 	m_freem(m);
6979 }
6980 
6981 void
6982 sctp_over_udp_stop(void)
6983 {
6984 	/*
6985 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6986 	 * for writting!
6987 	 */
6988 #ifdef INET
6989 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6990 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6991 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6992 	}
6993 #endif
6994 #ifdef INET6
6995 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6996 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6997 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6998 	}
6999 #endif
7000 }
7001 
7002 int
7003 sctp_over_udp_start(void)
7004 {
7005 	uint16_t port;
7006 	int ret;
7007 
7008 #ifdef INET
7009 	struct sockaddr_in sin;
7010 
7011 #endif
7012 #ifdef INET6
7013 	struct sockaddr_in6 sin6;
7014 
7015 #endif
7016 	/*
7017 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7018 	 * for writting!
7019 	 */
7020 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7021 	if (ntohs(port) == 0) {
7022 		/* Must have a port set */
7023 		return (EINVAL);
7024 	}
7025 #ifdef INET
7026 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7027 		/* Already running -- must stop first */
7028 		return (EALREADY);
7029 	}
7030 #endif
7031 #ifdef INET6
7032 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7033 		/* Already running -- must stop first */
7034 		return (EALREADY);
7035 	}
7036 #endif
7037 #ifdef INET
7038 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7039 	    SOCK_DGRAM, IPPROTO_UDP,
7040 	    curthread->td_ucred, curthread))) {
7041 		sctp_over_udp_stop();
7042 		return (ret);
7043 	}
7044 	/* Call the special UDP hook. */
7045 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7046 	    sctp_recv_udp_tunneled_packet, NULL))) {
7047 		sctp_over_udp_stop();
7048 		return (ret);
7049 	}
7050 	/* Ok, we have a socket, bind it to the port. */
7051 	memset(&sin, 0, sizeof(struct sockaddr_in));
7052 	sin.sin_len = sizeof(struct sockaddr_in);
7053 	sin.sin_family = AF_INET;
7054 	sin.sin_port = htons(port);
7055 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7056 	    (struct sockaddr *)&sin, curthread))) {
7057 		sctp_over_udp_stop();
7058 		return (ret);
7059 	}
7060 #endif
7061 #ifdef INET6
7062 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7063 	    SOCK_DGRAM, IPPROTO_UDP,
7064 	    curthread->td_ucred, curthread))) {
7065 		sctp_over_udp_stop();
7066 		return (ret);
7067 	}
7068 	/* Call the special UDP hook. */
7069 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7070 	    sctp_recv_udp_tunneled_packet, NULL))) {
7071 		sctp_over_udp_stop();
7072 		return (ret);
7073 	}
7074 	/* Ok, we have a socket, bind it to the port. */
7075 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7076 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7077 	sin6.sin6_family = AF_INET6;
7078 	sin6.sin6_port = htons(port);
7079 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7080 	    (struct sockaddr *)&sin6, curthread))) {
7081 		sctp_over_udp_stop();
7082 		return (ret);
7083 	}
7084 #endif
7085 	return (0);
7086 }
7087