xref: /freebsd/sys/netinet/sctputil.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int
897 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
898     uint32_t override_tag, uint32_t vrf_id)
899 {
900 	struct sctp_association *asoc;
901 
902 	/*
903 	 * Anything set to zero is taken care of by the allocation routine's
904 	 * bzero
905 	 */
906 
907 	/*
908 	 * Up front select what scoping to apply on addresses I tell my peer
909 	 * Not sure what to do with these right now, we will need to come up
910 	 * with a way to set them. We may need to pass them through from the
911 	 * caller in the sctp_aloc_assoc() function.
912 	 */
913 	int i;
914 
915 #if defined(SCTP_DETAILED_STR_STATS)
916 	int j;
917 
918 #endif
919 
920 	asoc = &stcb->asoc;
921 	/* init all variables to a known value. */
922 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
923 	asoc->max_burst = inp->sctp_ep.max_burst;
924 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
925 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
926 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
927 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
928 	asoc->ecn_supported = inp->ecn_supported;
929 	asoc->prsctp_supported = inp->prsctp_supported;
930 	asoc->auth_supported = inp->auth_supported;
931 	asoc->asconf_supported = inp->asconf_supported;
932 	asoc->reconfig_supported = inp->reconfig_supported;
933 	asoc->nrsack_supported = inp->nrsack_supported;
934 	asoc->pktdrop_supported = inp->pktdrop_supported;
935 	asoc->sctp_cmt_pf = (uint8_t) 0;
936 	asoc->sctp_frag_point = inp->sctp_frag_point;
937 	asoc->sctp_features = inp->sctp_features;
938 	asoc->default_dscp = inp->sctp_ep.default_dscp;
939 	asoc->max_cwnd = inp->max_cwnd;
940 #ifdef INET6
941 	if (inp->sctp_ep.default_flowlabel) {
942 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
943 	} else {
944 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
945 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
946 			asoc->default_flowlabel &= 0x000fffff;
947 			asoc->default_flowlabel |= 0x80000000;
948 		} else {
949 			asoc->default_flowlabel = 0;
950 		}
951 	}
952 #endif
953 	asoc->sb_send_resv = 0;
954 	if (override_tag) {
955 		asoc->my_vtag = override_tag;
956 	} else {
957 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
958 	}
959 	/* Get the nonce tags */
960 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
961 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
962 	asoc->vrf_id = vrf_id;
963 
964 #ifdef SCTP_ASOCLOG_OF_TSNS
965 	asoc->tsn_in_at = 0;
966 	asoc->tsn_out_at = 0;
967 	asoc->tsn_in_wrapped = 0;
968 	asoc->tsn_out_wrapped = 0;
969 	asoc->cumack_log_at = 0;
970 	asoc->cumack_log_atsnt = 0;
971 #endif
972 #ifdef SCTP_FS_SPEC_LOG
973 	asoc->fs_index = 0;
974 #endif
975 	asoc->refcnt = 0;
976 	asoc->assoc_up_sent = 0;
977 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
978 	    sctp_select_initial_TSN(&inp->sctp_ep);
979 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
980 	/* we are optimisitic here */
981 	asoc->peer_supports_nat = 0;
982 	asoc->sent_queue_retran_cnt = 0;
983 
984 	/* for CMT */
985 	asoc->last_net_cmt_send_started = NULL;
986 
987 	/* This will need to be adjusted */
988 	asoc->last_acked_seq = asoc->init_seq_number - 1;
989 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
990 	asoc->asconf_seq_in = asoc->last_acked_seq;
991 
992 	/* here we are different, we hold the next one we expect */
993 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
994 
995 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
996 	asoc->initial_rto = inp->sctp_ep.initial_rto;
997 
998 	asoc->max_init_times = inp->sctp_ep.max_init_times;
999 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1000 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1001 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1002 	asoc->free_chunk_cnt = 0;
1003 
1004 	asoc->iam_blocking = 0;
1005 	asoc->context = inp->sctp_context;
1006 	asoc->local_strreset_support = inp->local_strreset_support;
1007 	asoc->def_send = inp->def_send;
1008 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1009 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1010 	asoc->pr_sctp_cnt = 0;
1011 	asoc->total_output_queue_size = 0;
1012 
1013 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1014 		asoc->scope.ipv6_addr_legal = 1;
1015 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1016 			asoc->scope.ipv4_addr_legal = 1;
1017 		} else {
1018 			asoc->scope.ipv4_addr_legal = 0;
1019 		}
1020 	} else {
1021 		asoc->scope.ipv6_addr_legal = 0;
1022 		asoc->scope.ipv4_addr_legal = 1;
1023 	}
1024 
1025 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1026 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1027 
1028 	asoc->smallest_mtu = inp->sctp_frag_point;
1029 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1030 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1031 
1032 	asoc->locked_on_sending = NULL;
1033 	asoc->stream_locked_on = 0;
1034 	asoc->ecn_echo_cnt_onq = 0;
1035 	asoc->stream_locked = 0;
1036 
1037 	asoc->send_sack = 1;
1038 
1039 	LIST_INIT(&asoc->sctp_restricted_addrs);
1040 
1041 	TAILQ_INIT(&asoc->nets);
1042 	TAILQ_INIT(&asoc->pending_reply_queue);
1043 	TAILQ_INIT(&asoc->asconf_ack_sent);
1044 	/* Setup to fill the hb random cache at first HB */
1045 	asoc->hb_random_idx = 4;
1046 
1047 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1048 
1049 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1050 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1051 
1052 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1053 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1054 
1055 	/*
1056 	 * Now the stream parameters, here we allocate space for all streams
1057 	 * that we request by default.
1058 	 */
1059 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1060 	    inp->sctp_ep.pre_open_stream_count;
1061 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1062 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1063 	    SCTP_M_STRMO);
1064 	if (asoc->strmout == NULL) {
1065 		/* big trouble no memory */
1066 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1067 		return (ENOMEM);
1068 	}
1069 	for (i = 0; i < asoc->streamoutcnt; i++) {
1070 		/*
1071 		 * inbound side must be set to 0xffff, also NOTE when we get
1072 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1073 		 * count (streamoutcnt) but first check if we sent to any of
1074 		 * the upper streams that were dropped (if some were). Those
1075 		 * that were dropped must be notified to the upper layer as
1076 		 * failed to send.
1077 		 */
1078 		asoc->strmout[i].next_sequence_send = 0x0;
1079 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1080 		asoc->strmout[i].chunks_on_queues = 0;
1081 #if defined(SCTP_DETAILED_STR_STATS)
1082 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1083 			asoc->strmout[i].abandoned_sent[j] = 0;
1084 			asoc->strmout[i].abandoned_unsent[j] = 0;
1085 		}
1086 #else
1087 		asoc->strmout[i].abandoned_sent[0] = 0;
1088 		asoc->strmout[i].abandoned_unsent[0] = 0;
1089 #endif
1090 		asoc->strmout[i].stream_no = i;
1091 		asoc->strmout[i].last_msg_incomplete = 0;
1092 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1093 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1094 	}
1095 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1096 
1097 	/* Now the mapping array */
1098 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1099 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1100 	    SCTP_M_MAP);
1101 	if (asoc->mapping_array == NULL) {
1102 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1103 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1104 		return (ENOMEM);
1105 	}
1106 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1107 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1108 	    SCTP_M_MAP);
1109 	if (asoc->nr_mapping_array == NULL) {
1110 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1111 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1116 
1117 	/* Now the init of the other outqueues */
1118 	TAILQ_INIT(&asoc->free_chunks);
1119 	TAILQ_INIT(&asoc->control_send_queue);
1120 	TAILQ_INIT(&asoc->asconf_send_queue);
1121 	TAILQ_INIT(&asoc->send_queue);
1122 	TAILQ_INIT(&asoc->sent_queue);
1123 	TAILQ_INIT(&asoc->reasmqueue);
1124 	TAILQ_INIT(&asoc->resetHead);
1125 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1126 	TAILQ_INIT(&asoc->asconf_queue);
1127 	/* authentication fields */
1128 	asoc->authinfo.random = NULL;
1129 	asoc->authinfo.active_keyid = 0;
1130 	asoc->authinfo.assoc_key = NULL;
1131 	asoc->authinfo.assoc_keyid = 0;
1132 	asoc->authinfo.recv_key = NULL;
1133 	asoc->authinfo.recv_keyid = 0;
1134 	LIST_INIT(&asoc->shared_keys);
1135 	asoc->marked_retrans = 0;
1136 	asoc->port = inp->sctp_ep.port;
1137 	asoc->timoinit = 0;
1138 	asoc->timodata = 0;
1139 	asoc->timosack = 0;
1140 	asoc->timoshutdown = 0;
1141 	asoc->timoheartbeat = 0;
1142 	asoc->timocookie = 0;
1143 	asoc->timoshutdownack = 0;
1144 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1145 	asoc->discontinuity_time = asoc->start_time;
1146 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1147 		asoc->abandoned_unsent[i] = 0;
1148 		asoc->abandoned_sent[i] = 0;
1149 	}
1150 	/*
1151 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1152 	 * freed later when the association is freed.
1153 	 */
1154 	return (0);
1155 }
1156 
1157 void
1158 sctp_print_mapping_array(struct sctp_association *asoc)
1159 {
1160 	unsigned int i, limit;
1161 
1162 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1163 	    asoc->mapping_array_size,
1164 	    asoc->mapping_array_base_tsn,
1165 	    asoc->cumulative_tsn,
1166 	    asoc->highest_tsn_inside_map,
1167 	    asoc->highest_tsn_inside_nr_map);
1168 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1169 		if (asoc->mapping_array[limit - 1] != 0) {
1170 			break;
1171 		}
1172 	}
1173 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1174 	for (i = 0; i < limit; i++) {
1175 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1176 	}
1177 	if (limit % 16)
1178 		SCTP_PRINTF("\n");
1179 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1180 		if (asoc->nr_mapping_array[limit - 1]) {
1181 			break;
1182 		}
1183 	}
1184 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1185 	for (i = 0; i < limit; i++) {
1186 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1187 	}
1188 	if (limit % 16)
1189 		SCTP_PRINTF("\n");
1190 }
1191 
1192 int
1193 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1194 {
1195 	/* mapping array needs to grow */
1196 	uint8_t *new_array1, *new_array2;
1197 	uint32_t new_size;
1198 
1199 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1200 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1201 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1202 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1203 		/* can't get more, forget it */
1204 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1205 		if (new_array1) {
1206 			SCTP_FREE(new_array1, SCTP_M_MAP);
1207 		}
1208 		if (new_array2) {
1209 			SCTP_FREE(new_array2, SCTP_M_MAP);
1210 		}
1211 		return (-1);
1212 	}
1213 	memset(new_array1, 0, new_size);
1214 	memset(new_array2, 0, new_size);
1215 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1216 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1217 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1218 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1219 	asoc->mapping_array = new_array1;
1220 	asoc->nr_mapping_array = new_array2;
1221 	asoc->mapping_array_size = new_size;
1222 	return (0);
1223 }
1224 
1225 
1226 static void
1227 sctp_iterator_work(struct sctp_iterator *it)
1228 {
1229 	int iteration_count = 0;
1230 	int inp_skip = 0;
1231 	int first_in = 1;
1232 	struct sctp_inpcb *tinp;
1233 
1234 	SCTP_INP_INFO_RLOCK();
1235 	SCTP_ITERATOR_LOCK();
1236 	if (it->inp) {
1237 		SCTP_INP_RLOCK(it->inp);
1238 		SCTP_INP_DECR_REF(it->inp);
1239 	}
1240 	if (it->inp == NULL) {
1241 		/* iterator is complete */
1242 done_with_iterator:
1243 		SCTP_ITERATOR_UNLOCK();
1244 		SCTP_INP_INFO_RUNLOCK();
1245 		if (it->function_atend != NULL) {
1246 			(*it->function_atend) (it->pointer, it->val);
1247 		}
1248 		SCTP_FREE(it, SCTP_M_ITER);
1249 		return;
1250 	}
1251 select_a_new_ep:
1252 	if (first_in) {
1253 		first_in = 0;
1254 	} else {
1255 		SCTP_INP_RLOCK(it->inp);
1256 	}
1257 	while (((it->pcb_flags) &&
1258 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1259 	    ((it->pcb_features) &&
1260 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1261 		/* endpoint flags or features don't match, so keep looking */
1262 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1263 			SCTP_INP_RUNLOCK(it->inp);
1264 			goto done_with_iterator;
1265 		}
1266 		tinp = it->inp;
1267 		it->inp = LIST_NEXT(it->inp, sctp_list);
1268 		SCTP_INP_RUNLOCK(tinp);
1269 		if (it->inp == NULL) {
1270 			goto done_with_iterator;
1271 		}
1272 		SCTP_INP_RLOCK(it->inp);
1273 	}
1274 	/* now go through each assoc which is in the desired state */
1275 	if (it->done_current_ep == 0) {
1276 		if (it->function_inp != NULL)
1277 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1278 		it->done_current_ep = 1;
1279 	}
1280 	if (it->stcb == NULL) {
1281 		/* run the per instance function */
1282 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1283 	}
1284 	if ((inp_skip) || it->stcb == NULL) {
1285 		if (it->function_inp_end != NULL) {
1286 			inp_skip = (*it->function_inp_end) (it->inp,
1287 			    it->pointer,
1288 			    it->val);
1289 		}
1290 		SCTP_INP_RUNLOCK(it->inp);
1291 		goto no_stcb;
1292 	}
1293 	while (it->stcb) {
1294 		SCTP_TCB_LOCK(it->stcb);
1295 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1296 			/* not in the right state... keep looking */
1297 			SCTP_TCB_UNLOCK(it->stcb);
1298 			goto next_assoc;
1299 		}
1300 		/* see if we have limited out the iterator loop */
1301 		iteration_count++;
1302 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1303 			/* Pause to let others grab the lock */
1304 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1305 			SCTP_TCB_UNLOCK(it->stcb);
1306 			SCTP_INP_INCR_REF(it->inp);
1307 			SCTP_INP_RUNLOCK(it->inp);
1308 			SCTP_ITERATOR_UNLOCK();
1309 			SCTP_INP_INFO_RUNLOCK();
1310 			SCTP_INP_INFO_RLOCK();
1311 			SCTP_ITERATOR_LOCK();
1312 			if (sctp_it_ctl.iterator_flags) {
1313 				/* We won't be staying here */
1314 				SCTP_INP_DECR_REF(it->inp);
1315 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1316 				if (sctp_it_ctl.iterator_flags &
1317 				    SCTP_ITERATOR_STOP_CUR_IT) {
1318 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1319 					goto done_with_iterator;
1320 				}
1321 				if (sctp_it_ctl.iterator_flags &
1322 				    SCTP_ITERATOR_STOP_CUR_INP) {
1323 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1324 					goto no_stcb;
1325 				}
1326 				/* If we reach here huh? */
1327 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1328 				    sctp_it_ctl.iterator_flags);
1329 				sctp_it_ctl.iterator_flags = 0;
1330 			}
1331 			SCTP_INP_RLOCK(it->inp);
1332 			SCTP_INP_DECR_REF(it->inp);
1333 			SCTP_TCB_LOCK(it->stcb);
1334 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1335 			iteration_count = 0;
1336 		}
1337 		/* run function on this one */
1338 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1339 
1340 		/*
1341 		 * we lie here, it really needs to have its own type but
1342 		 * first I must verify that this won't effect things :-0
1343 		 */
1344 		if (it->no_chunk_output == 0)
1345 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1346 
1347 		SCTP_TCB_UNLOCK(it->stcb);
1348 next_assoc:
1349 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1350 		if (it->stcb == NULL) {
1351 			/* Run last function */
1352 			if (it->function_inp_end != NULL) {
1353 				inp_skip = (*it->function_inp_end) (it->inp,
1354 				    it->pointer,
1355 				    it->val);
1356 			}
1357 		}
1358 	}
1359 	SCTP_INP_RUNLOCK(it->inp);
1360 no_stcb:
1361 	/* done with all assocs on this endpoint, move on to next endpoint */
1362 	it->done_current_ep = 0;
1363 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1364 		it->inp = NULL;
1365 	} else {
1366 		it->inp = LIST_NEXT(it->inp, sctp_list);
1367 	}
1368 	if (it->inp == NULL) {
1369 		goto done_with_iterator;
1370 	}
1371 	goto select_a_new_ep;
1372 }
1373 
1374 void
1375 sctp_iterator_worker(void)
1376 {
1377 	struct sctp_iterator *it, *nit;
1378 
1379 	/* This function is called with the WQ lock in place */
1380 
1381 	sctp_it_ctl.iterator_running = 1;
1382 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1383 		sctp_it_ctl.cur_it = it;
1384 		/* now lets work on this one */
1385 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1386 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1387 		CURVNET_SET(it->vn);
1388 		sctp_iterator_work(it);
1389 		sctp_it_ctl.cur_it = NULL;
1390 		CURVNET_RESTORE();
1391 		SCTP_IPI_ITERATOR_WQ_LOCK();
1392 		/* sa_ignore FREED_MEMORY */
1393 	}
1394 	sctp_it_ctl.iterator_running = 0;
1395 	return;
1396 }
1397 
1398 
1399 static void
1400 sctp_handle_addr_wq(void)
1401 {
1402 	/* deal with the ADDR wq from the rtsock calls */
1403 	struct sctp_laddr *wi, *nwi;
1404 	struct sctp_asconf_iterator *asc;
1405 
1406 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1407 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1408 	if (asc == NULL) {
1409 		/* Try later, no memory */
1410 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1411 		    (struct sctp_inpcb *)NULL,
1412 		    (struct sctp_tcb *)NULL,
1413 		    (struct sctp_nets *)NULL);
1414 		return;
1415 	}
1416 	LIST_INIT(&asc->list_of_work);
1417 	asc->cnt = 0;
1418 
1419 	SCTP_WQ_ADDR_LOCK();
1420 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1421 		LIST_REMOVE(wi, sctp_nxt_addr);
1422 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1423 		asc->cnt++;
1424 	}
1425 	SCTP_WQ_ADDR_UNLOCK();
1426 
1427 	if (asc->cnt == 0) {
1428 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1429 	} else {
1430 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1431 		    sctp_asconf_iterator_stcb,
1432 		    NULL,	/* No ep end for boundall */
1433 		    SCTP_PCB_FLAGS_BOUNDALL,
1434 		    SCTP_PCB_ANY_FEATURES,
1435 		    SCTP_ASOC_ANY_STATE,
1436 		    (void *)asc, 0,
1437 		    sctp_asconf_iterator_end, NULL, 0);
1438 	}
1439 }
1440 
1441 void
1442 sctp_timeout_handler(void *t)
1443 {
1444 	struct sctp_inpcb *inp;
1445 	struct sctp_tcb *stcb;
1446 	struct sctp_nets *net;
1447 	struct sctp_timer *tmr;
1448 	struct mbuf *op_err;
1449 
1450 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1451 	struct socket *so;
1452 
1453 #endif
1454 	int did_output;
1455 
1456 	tmr = (struct sctp_timer *)t;
1457 	inp = (struct sctp_inpcb *)tmr->ep;
1458 	stcb = (struct sctp_tcb *)tmr->tcb;
1459 	net = (struct sctp_nets *)tmr->net;
1460 	CURVNET_SET((struct vnet *)tmr->vnet);
1461 	did_output = 1;
1462 
1463 #ifdef SCTP_AUDITING_ENABLED
1464 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1465 	sctp_auditing(3, inp, stcb, net);
1466 #endif
1467 
1468 	/* sanity checks... */
1469 	if (tmr->self != (void *)tmr) {
1470 		/*
1471 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1472 		 * (void *)tmr);
1473 		 */
1474 		CURVNET_RESTORE();
1475 		return;
1476 	}
1477 	tmr->stopped_from = 0xa001;
1478 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1479 		/*
1480 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1481 		 * tmr->type);
1482 		 */
1483 		CURVNET_RESTORE();
1484 		return;
1485 	}
1486 	tmr->stopped_from = 0xa002;
1487 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1488 		CURVNET_RESTORE();
1489 		return;
1490 	}
1491 	/* if this is an iterator timeout, get the struct and clear inp */
1492 	tmr->stopped_from = 0xa003;
1493 	if (inp) {
1494 		SCTP_INP_INCR_REF(inp);
1495 		if ((inp->sctp_socket == NULL) &&
1496 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1499 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1500 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1501 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1502 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1503 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1504 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1505 		    ) {
1506 			SCTP_INP_DECR_REF(inp);
1507 			CURVNET_RESTORE();
1508 			return;
1509 		}
1510 	}
1511 	tmr->stopped_from = 0xa004;
1512 	if (stcb) {
1513 		atomic_add_int(&stcb->asoc.refcnt, 1);
1514 		if (stcb->asoc.state == 0) {
1515 			atomic_add_int(&stcb->asoc.refcnt, -1);
1516 			if (inp) {
1517 				SCTP_INP_DECR_REF(inp);
1518 			}
1519 			CURVNET_RESTORE();
1520 			return;
1521 		}
1522 	}
1523 	tmr->stopped_from = 0xa005;
1524 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1525 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1526 		if (inp) {
1527 			SCTP_INP_DECR_REF(inp);
1528 		}
1529 		if (stcb) {
1530 			atomic_add_int(&stcb->asoc.refcnt, -1);
1531 		}
1532 		CURVNET_RESTORE();
1533 		return;
1534 	}
1535 	tmr->stopped_from = 0xa006;
1536 
1537 	if (stcb) {
1538 		SCTP_TCB_LOCK(stcb);
1539 		atomic_add_int(&stcb->asoc.refcnt, -1);
1540 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1541 		    ((stcb->asoc.state == 0) ||
1542 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1543 			SCTP_TCB_UNLOCK(stcb);
1544 			if (inp) {
1545 				SCTP_INP_DECR_REF(inp);
1546 			}
1547 			CURVNET_RESTORE();
1548 			return;
1549 		}
1550 	}
1551 	/* record in stopped what t-o occured */
1552 	tmr->stopped_from = tmr->type;
1553 
1554 	/* mark as being serviced now */
1555 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1556 		/*
1557 		 * Callout has been rescheduled.
1558 		 */
1559 		goto get_out;
1560 	}
1561 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1562 		/*
1563 		 * Not active, so no action.
1564 		 */
1565 		goto get_out;
1566 	}
1567 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1568 
1569 	/* call the handler for the appropriate timer type */
1570 	switch (tmr->type) {
1571 	case SCTP_TIMER_TYPE_ZERO_COPY:
1572 		if (inp == NULL) {
1573 			break;
1574 		}
1575 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1576 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1577 		}
1578 		break;
1579 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1580 		if (inp == NULL) {
1581 			break;
1582 		}
1583 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1584 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1585 		}
1586 		break;
1587 	case SCTP_TIMER_TYPE_ADDR_WQ:
1588 		sctp_handle_addr_wq();
1589 		break;
1590 	case SCTP_TIMER_TYPE_SEND:
1591 		if ((stcb == NULL) || (inp == NULL)) {
1592 			break;
1593 		}
1594 		SCTP_STAT_INCR(sctps_timodata);
1595 		stcb->asoc.timodata++;
1596 		stcb->asoc.num_send_timers_up--;
1597 		if (stcb->asoc.num_send_timers_up < 0) {
1598 			stcb->asoc.num_send_timers_up = 0;
1599 		}
1600 		SCTP_TCB_LOCK_ASSERT(stcb);
1601 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1602 			/* no need to unlock on tcb its gone */
1603 
1604 			goto out_decr;
1605 		}
1606 		SCTP_TCB_LOCK_ASSERT(stcb);
1607 #ifdef SCTP_AUDITING_ENABLED
1608 		sctp_auditing(4, inp, stcb, net);
1609 #endif
1610 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1611 		if ((stcb->asoc.num_send_timers_up == 0) &&
1612 		    (stcb->asoc.sent_queue_cnt > 0)) {
1613 			struct sctp_tmit_chunk *chk;
1614 
1615 			/*
1616 			 * safeguard. If there on some on the sent queue
1617 			 * somewhere but no timers running something is
1618 			 * wrong... so we start a timer on the first chunk
1619 			 * on the send queue on whatever net it is sent to.
1620 			 */
1621 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1622 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1623 			    chk->whoTo);
1624 		}
1625 		break;
1626 	case SCTP_TIMER_TYPE_INIT:
1627 		if ((stcb == NULL) || (inp == NULL)) {
1628 			break;
1629 		}
1630 		SCTP_STAT_INCR(sctps_timoinit);
1631 		stcb->asoc.timoinit++;
1632 		if (sctp_t1init_timer(inp, stcb, net)) {
1633 			/* no need to unlock on tcb its gone */
1634 			goto out_decr;
1635 		}
1636 		/* We do output but not here */
1637 		did_output = 0;
1638 		break;
1639 	case SCTP_TIMER_TYPE_RECV:
1640 		if ((stcb == NULL) || (inp == NULL)) {
1641 			break;
1642 		}
1643 		SCTP_STAT_INCR(sctps_timosack);
1644 		stcb->asoc.timosack++;
1645 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1646 #ifdef SCTP_AUDITING_ENABLED
1647 		sctp_auditing(4, inp, stcb, net);
1648 #endif
1649 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1650 		break;
1651 	case SCTP_TIMER_TYPE_SHUTDOWN:
1652 		if ((stcb == NULL) || (inp == NULL)) {
1653 			break;
1654 		}
1655 		if (sctp_shutdown_timer(inp, stcb, net)) {
1656 			/* no need to unlock on tcb its gone */
1657 			goto out_decr;
1658 		}
1659 		SCTP_STAT_INCR(sctps_timoshutdown);
1660 		stcb->asoc.timoshutdown++;
1661 #ifdef SCTP_AUDITING_ENABLED
1662 		sctp_auditing(4, inp, stcb, net);
1663 #endif
1664 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1665 		break;
1666 	case SCTP_TIMER_TYPE_HEARTBEAT:
1667 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1668 			break;
1669 		}
1670 		SCTP_STAT_INCR(sctps_timoheartbeat);
1671 		stcb->asoc.timoheartbeat++;
1672 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1673 			/* no need to unlock on tcb its gone */
1674 			goto out_decr;
1675 		}
1676 #ifdef SCTP_AUDITING_ENABLED
1677 		sctp_auditing(4, inp, stcb, net);
1678 #endif
1679 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1680 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1681 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1682 		}
1683 		break;
1684 	case SCTP_TIMER_TYPE_COOKIE:
1685 		if ((stcb == NULL) || (inp == NULL)) {
1686 			break;
1687 		}
1688 		if (sctp_cookie_timer(inp, stcb, net)) {
1689 			/* no need to unlock on tcb its gone */
1690 			goto out_decr;
1691 		}
1692 		SCTP_STAT_INCR(sctps_timocookie);
1693 		stcb->asoc.timocookie++;
1694 #ifdef SCTP_AUDITING_ENABLED
1695 		sctp_auditing(4, inp, stcb, net);
1696 #endif
1697 		/*
1698 		 * We consider T3 and Cookie timer pretty much the same with
1699 		 * respect to where from in chunk_output.
1700 		 */
1701 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1702 		break;
1703 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1704 		{
1705 			struct timeval tv;
1706 			int i, secret;
1707 
1708 			if (inp == NULL) {
1709 				break;
1710 			}
1711 			SCTP_STAT_INCR(sctps_timosecret);
1712 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1713 			SCTP_INP_WLOCK(inp);
1714 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1715 			inp->sctp_ep.last_secret_number =
1716 			    inp->sctp_ep.current_secret_number;
1717 			inp->sctp_ep.current_secret_number++;
1718 			if (inp->sctp_ep.current_secret_number >=
1719 			    SCTP_HOW_MANY_SECRETS) {
1720 				inp->sctp_ep.current_secret_number = 0;
1721 			}
1722 			secret = (int)inp->sctp_ep.current_secret_number;
1723 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1724 				inp->sctp_ep.secret_key[secret][i] =
1725 				    sctp_select_initial_TSN(&inp->sctp_ep);
1726 			}
1727 			SCTP_INP_WUNLOCK(inp);
1728 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1729 		}
1730 		did_output = 0;
1731 		break;
1732 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1733 		if ((stcb == NULL) || (inp == NULL)) {
1734 			break;
1735 		}
1736 		SCTP_STAT_INCR(sctps_timopathmtu);
1737 		sctp_pathmtu_timer(inp, stcb, net);
1738 		did_output = 0;
1739 		break;
1740 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1741 		if ((stcb == NULL) || (inp == NULL)) {
1742 			break;
1743 		}
1744 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1745 			/* no need to unlock on tcb its gone */
1746 			goto out_decr;
1747 		}
1748 		SCTP_STAT_INCR(sctps_timoshutdownack);
1749 		stcb->asoc.timoshutdownack++;
1750 #ifdef SCTP_AUDITING_ENABLED
1751 		sctp_auditing(4, inp, stcb, net);
1752 #endif
1753 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1754 		break;
1755 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1756 		if ((stcb == NULL) || (inp == NULL)) {
1757 			break;
1758 		}
1759 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1760 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1761 		    "Shutdown guard timer expired");
1762 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1763 		/* no need to unlock on tcb its gone */
1764 		goto out_decr;
1765 
1766 	case SCTP_TIMER_TYPE_STRRESET:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		if (sctp_strreset_timer(inp, stcb, net)) {
1771 			/* no need to unlock on tcb its gone */
1772 			goto out_decr;
1773 		}
1774 		SCTP_STAT_INCR(sctps_timostrmrst);
1775 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1776 		break;
1777 	case SCTP_TIMER_TYPE_ASCONF:
1778 		if ((stcb == NULL) || (inp == NULL)) {
1779 			break;
1780 		}
1781 		if (sctp_asconf_timer(inp, stcb, net)) {
1782 			/* no need to unlock on tcb its gone */
1783 			goto out_decr;
1784 		}
1785 		SCTP_STAT_INCR(sctps_timoasconf);
1786 #ifdef SCTP_AUDITING_ENABLED
1787 		sctp_auditing(4, inp, stcb, net);
1788 #endif
1789 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1790 		break;
1791 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1792 		if ((stcb == NULL) || (inp == NULL)) {
1793 			break;
1794 		}
1795 		sctp_delete_prim_timer(inp, stcb, net);
1796 		SCTP_STAT_INCR(sctps_timodelprim);
1797 		break;
1798 
1799 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1800 		if ((stcb == NULL) || (inp == NULL)) {
1801 			break;
1802 		}
1803 		SCTP_STAT_INCR(sctps_timoautoclose);
1804 		sctp_autoclose_timer(inp, stcb, net);
1805 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1806 		did_output = 0;
1807 		break;
1808 	case SCTP_TIMER_TYPE_ASOCKILL:
1809 		if ((stcb == NULL) || (inp == NULL)) {
1810 			break;
1811 		}
1812 		SCTP_STAT_INCR(sctps_timoassockill);
1813 		/* Can we free it yet? */
1814 		SCTP_INP_DECR_REF(inp);
1815 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1816 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1817 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1818 		so = SCTP_INP_SO(inp);
1819 		atomic_add_int(&stcb->asoc.refcnt, 1);
1820 		SCTP_TCB_UNLOCK(stcb);
1821 		SCTP_SOCKET_LOCK(so, 1);
1822 		SCTP_TCB_LOCK(stcb);
1823 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1824 #endif
1825 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1826 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1827 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1828 		SCTP_SOCKET_UNLOCK(so, 1);
1829 #endif
1830 		/*
1831 		 * free asoc, always unlocks (or destroy's) so prevent
1832 		 * duplicate unlock or unlock of a free mtx :-0
1833 		 */
1834 		stcb = NULL;
1835 		goto out_no_decr;
1836 	case SCTP_TIMER_TYPE_INPKILL:
1837 		SCTP_STAT_INCR(sctps_timoinpkill);
1838 		if (inp == NULL) {
1839 			break;
1840 		}
1841 		/*
1842 		 * special case, take away our increment since WE are the
1843 		 * killer
1844 		 */
1845 		SCTP_INP_DECR_REF(inp);
1846 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1847 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1848 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1849 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1850 		inp = NULL;
1851 		goto out_no_decr;
1852 	default:
1853 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1854 		    tmr->type);
1855 		break;
1856 	}
1857 #ifdef SCTP_AUDITING_ENABLED
1858 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1859 	if (inp)
1860 		sctp_auditing(5, inp, stcb, net);
1861 #endif
1862 	if ((did_output) && stcb) {
1863 		/*
1864 		 * Now we need to clean up the control chunk chain if an
1865 		 * ECNE is on it. It must be marked as UNSENT again so next
1866 		 * call will continue to send it until such time that we get
1867 		 * a CWR, to remove it. It is, however, less likely that we
1868 		 * will find a ecn echo on the chain though.
1869 		 */
1870 		sctp_fix_ecn_echo(&stcb->asoc);
1871 	}
1872 get_out:
1873 	if (stcb) {
1874 		SCTP_TCB_UNLOCK(stcb);
1875 	}
1876 out_decr:
1877 	if (inp) {
1878 		SCTP_INP_DECR_REF(inp);
1879 	}
1880 out_no_decr:
1881 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1882 	    tmr->type);
1883 	CURVNET_RESTORE();
1884 }
1885 
1886 void
1887 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1888     struct sctp_nets *net)
1889 {
1890 	uint32_t to_ticks;
1891 	struct sctp_timer *tmr;
1892 
1893 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1894 		return;
1895 
1896 	tmr = NULL;
1897 	if (stcb) {
1898 		SCTP_TCB_LOCK_ASSERT(stcb);
1899 	}
1900 	switch (t_type) {
1901 	case SCTP_TIMER_TYPE_ZERO_COPY:
1902 		tmr = &inp->sctp_ep.zero_copy_timer;
1903 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1904 		break;
1905 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1906 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1907 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1908 		break;
1909 	case SCTP_TIMER_TYPE_ADDR_WQ:
1910 		/* Only 1 tick away :-) */
1911 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1912 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1913 		break;
1914 	case SCTP_TIMER_TYPE_SEND:
1915 		/* Here we use the RTO timer */
1916 		{
1917 			int rto_val;
1918 
1919 			if ((stcb == NULL) || (net == NULL)) {
1920 				return;
1921 			}
1922 			tmr = &net->rxt_timer;
1923 			if (net->RTO == 0) {
1924 				rto_val = stcb->asoc.initial_rto;
1925 			} else {
1926 				rto_val = net->RTO;
1927 			}
1928 			to_ticks = MSEC_TO_TICKS(rto_val);
1929 		}
1930 		break;
1931 	case SCTP_TIMER_TYPE_INIT:
1932 		/*
1933 		 * Here we use the INIT timer default usually about 1
1934 		 * minute.
1935 		 */
1936 		if ((stcb == NULL) || (net == NULL)) {
1937 			return;
1938 		}
1939 		tmr = &net->rxt_timer;
1940 		if (net->RTO == 0) {
1941 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1942 		} else {
1943 			to_ticks = MSEC_TO_TICKS(net->RTO);
1944 		}
1945 		break;
1946 	case SCTP_TIMER_TYPE_RECV:
1947 		/*
1948 		 * Here we use the Delayed-Ack timer value from the inp
1949 		 * ususually about 200ms.
1950 		 */
1951 		if (stcb == NULL) {
1952 			return;
1953 		}
1954 		tmr = &stcb->asoc.dack_timer;
1955 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1956 		break;
1957 	case SCTP_TIMER_TYPE_SHUTDOWN:
1958 		/* Here we use the RTO of the destination. */
1959 		if ((stcb == NULL) || (net == NULL)) {
1960 			return;
1961 		}
1962 		if (net->RTO == 0) {
1963 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1964 		} else {
1965 			to_ticks = MSEC_TO_TICKS(net->RTO);
1966 		}
1967 		tmr = &net->rxt_timer;
1968 		break;
1969 	case SCTP_TIMER_TYPE_HEARTBEAT:
1970 		/*
1971 		 * the net is used here so that we can add in the RTO. Even
1972 		 * though we use a different timer. We also add the HB timer
1973 		 * PLUS a random jitter.
1974 		 */
1975 		if ((stcb == NULL) || (net == NULL)) {
1976 			return;
1977 		} else {
1978 			uint32_t rndval;
1979 			uint32_t jitter;
1980 
1981 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1982 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1983 				return;
1984 			}
1985 			if (net->RTO == 0) {
1986 				to_ticks = stcb->asoc.initial_rto;
1987 			} else {
1988 				to_ticks = net->RTO;
1989 			}
1990 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1991 			jitter = rndval % to_ticks;
1992 			if (jitter >= (to_ticks >> 1)) {
1993 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1994 			} else {
1995 				to_ticks = to_ticks - jitter;
1996 			}
1997 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1998 			    !(net->dest_state & SCTP_ADDR_PF)) {
1999 				to_ticks += net->heart_beat_delay;
2000 			}
2001 			/*
2002 			 * Now we must convert the to_ticks that are now in
2003 			 * ms to ticks.
2004 			 */
2005 			to_ticks = MSEC_TO_TICKS(to_ticks);
2006 			tmr = &net->hb_timer;
2007 		}
2008 		break;
2009 	case SCTP_TIMER_TYPE_COOKIE:
2010 		/*
2011 		 * Here we can use the RTO timer from the network since one
2012 		 * RTT was compelete. If a retran happened then we will be
2013 		 * using the RTO initial value.
2014 		 */
2015 		if ((stcb == NULL) || (net == NULL)) {
2016 			return;
2017 		}
2018 		if (net->RTO == 0) {
2019 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2020 		} else {
2021 			to_ticks = MSEC_TO_TICKS(net->RTO);
2022 		}
2023 		tmr = &net->rxt_timer;
2024 		break;
2025 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2026 		/*
2027 		 * nothing needed but the endpoint here ususually about 60
2028 		 * minutes.
2029 		 */
2030 		tmr = &inp->sctp_ep.signature_change;
2031 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2032 		break;
2033 	case SCTP_TIMER_TYPE_ASOCKILL:
2034 		if (stcb == NULL) {
2035 			return;
2036 		}
2037 		tmr = &stcb->asoc.strreset_timer;
2038 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2039 		break;
2040 	case SCTP_TIMER_TYPE_INPKILL:
2041 		/*
2042 		 * The inp is setup to die. We re-use the signature_chage
2043 		 * timer since that has stopped and we are in the GONE
2044 		 * state.
2045 		 */
2046 		tmr = &inp->sctp_ep.signature_change;
2047 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2048 		break;
2049 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2050 		/*
2051 		 * Here we use the value found in the EP for PMTU ususually
2052 		 * about 10 minutes.
2053 		 */
2054 		if ((stcb == NULL) || (net == NULL)) {
2055 			return;
2056 		}
2057 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2058 			return;
2059 		}
2060 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2061 		tmr = &net->pmtu_timer;
2062 		break;
2063 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2064 		/* Here we use the RTO of the destination */
2065 		if ((stcb == NULL) || (net == NULL)) {
2066 			return;
2067 		}
2068 		if (net->RTO == 0) {
2069 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2070 		} else {
2071 			to_ticks = MSEC_TO_TICKS(net->RTO);
2072 		}
2073 		tmr = &net->rxt_timer;
2074 		break;
2075 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2076 		/*
2077 		 * Here we use the endpoints shutdown guard timer usually
2078 		 * about 3 minutes.
2079 		 */
2080 		if (stcb == NULL) {
2081 			return;
2082 		}
2083 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2084 		tmr = &stcb->asoc.shut_guard_timer;
2085 		break;
2086 	case SCTP_TIMER_TYPE_STRRESET:
2087 		/*
2088 		 * Here the timer comes from the stcb but its value is from
2089 		 * the net's RTO.
2090 		 */
2091 		if ((stcb == NULL) || (net == NULL)) {
2092 			return;
2093 		}
2094 		if (net->RTO == 0) {
2095 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2096 		} else {
2097 			to_ticks = MSEC_TO_TICKS(net->RTO);
2098 		}
2099 		tmr = &stcb->asoc.strreset_timer;
2100 		break;
2101 	case SCTP_TIMER_TYPE_ASCONF:
2102 		/*
2103 		 * Here the timer comes from the stcb but its value is from
2104 		 * the net's RTO.
2105 		 */
2106 		if ((stcb == NULL) || (net == NULL)) {
2107 			return;
2108 		}
2109 		if (net->RTO == 0) {
2110 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2111 		} else {
2112 			to_ticks = MSEC_TO_TICKS(net->RTO);
2113 		}
2114 		tmr = &stcb->asoc.asconf_timer;
2115 		break;
2116 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2117 		if ((stcb == NULL) || (net != NULL)) {
2118 			return;
2119 		}
2120 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2121 		tmr = &stcb->asoc.delete_prim_timer;
2122 		break;
2123 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2124 		if (stcb == NULL) {
2125 			return;
2126 		}
2127 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2128 			/*
2129 			 * Really an error since stcb is NOT set to
2130 			 * autoclose
2131 			 */
2132 			return;
2133 		}
2134 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2135 		tmr = &stcb->asoc.autoclose_timer;
2136 		break;
2137 	default:
2138 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2139 		    __FUNCTION__, t_type);
2140 		return;
2141 		break;
2142 	}
2143 	if ((to_ticks <= 0) || (tmr == NULL)) {
2144 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2145 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2146 		return;
2147 	}
2148 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2149 		/*
2150 		 * we do NOT allow you to have it already running. if it is
2151 		 * we leave the current one up unchanged
2152 		 */
2153 		return;
2154 	}
2155 	/* At this point we can proceed */
2156 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2157 		stcb->asoc.num_send_timers_up++;
2158 	}
2159 	tmr->stopped_from = 0;
2160 	tmr->type = t_type;
2161 	tmr->ep = (void *)inp;
2162 	tmr->tcb = (void *)stcb;
2163 	tmr->net = (void *)net;
2164 	tmr->self = (void *)tmr;
2165 	tmr->vnet = (void *)curvnet;
2166 	tmr->ticks = sctp_get_tick_count();
2167 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2168 	return;
2169 }
2170 
2171 void
2172 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2173     struct sctp_nets *net, uint32_t from)
2174 {
2175 	struct sctp_timer *tmr;
2176 
2177 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2178 	    (inp == NULL))
2179 		return;
2180 
2181 	tmr = NULL;
2182 	if (stcb) {
2183 		SCTP_TCB_LOCK_ASSERT(stcb);
2184 	}
2185 	switch (t_type) {
2186 	case SCTP_TIMER_TYPE_ZERO_COPY:
2187 		tmr = &inp->sctp_ep.zero_copy_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2190 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2191 		break;
2192 	case SCTP_TIMER_TYPE_ADDR_WQ:
2193 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2194 		break;
2195 	case SCTP_TIMER_TYPE_SEND:
2196 		if ((stcb == NULL) || (net == NULL)) {
2197 			return;
2198 		}
2199 		tmr = &net->rxt_timer;
2200 		break;
2201 	case SCTP_TIMER_TYPE_INIT:
2202 		if ((stcb == NULL) || (net == NULL)) {
2203 			return;
2204 		}
2205 		tmr = &net->rxt_timer;
2206 		break;
2207 	case SCTP_TIMER_TYPE_RECV:
2208 		if (stcb == NULL) {
2209 			return;
2210 		}
2211 		tmr = &stcb->asoc.dack_timer;
2212 		break;
2213 	case SCTP_TIMER_TYPE_SHUTDOWN:
2214 		if ((stcb == NULL) || (net == NULL)) {
2215 			return;
2216 		}
2217 		tmr = &net->rxt_timer;
2218 		break;
2219 	case SCTP_TIMER_TYPE_HEARTBEAT:
2220 		if ((stcb == NULL) || (net == NULL)) {
2221 			return;
2222 		}
2223 		tmr = &net->hb_timer;
2224 		break;
2225 	case SCTP_TIMER_TYPE_COOKIE:
2226 		if ((stcb == NULL) || (net == NULL)) {
2227 			return;
2228 		}
2229 		tmr = &net->rxt_timer;
2230 		break;
2231 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2232 		/* nothing needed but the endpoint here */
2233 		tmr = &inp->sctp_ep.signature_change;
2234 		/*
2235 		 * We re-use the newcookie timer for the INP kill timer. We
2236 		 * must assure that we do not kill it by accident.
2237 		 */
2238 		break;
2239 	case SCTP_TIMER_TYPE_ASOCKILL:
2240 		/*
2241 		 * Stop the asoc kill timer.
2242 		 */
2243 		if (stcb == NULL) {
2244 			return;
2245 		}
2246 		tmr = &stcb->asoc.strreset_timer;
2247 		break;
2248 
2249 	case SCTP_TIMER_TYPE_INPKILL:
2250 		/*
2251 		 * The inp is setup to die. We re-use the signature_chage
2252 		 * timer since that has stopped and we are in the GONE
2253 		 * state.
2254 		 */
2255 		tmr = &inp->sctp_ep.signature_change;
2256 		break;
2257 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2258 		if ((stcb == NULL) || (net == NULL)) {
2259 			return;
2260 		}
2261 		tmr = &net->pmtu_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2270 		if (stcb == NULL) {
2271 			return;
2272 		}
2273 		tmr = &stcb->asoc.shut_guard_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_STRRESET:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.strreset_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_ASCONF:
2282 		if (stcb == NULL) {
2283 			return;
2284 		}
2285 		tmr = &stcb->asoc.asconf_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2288 		if (stcb == NULL) {
2289 			return;
2290 		}
2291 		tmr = &stcb->asoc.delete_prim_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2294 		if (stcb == NULL) {
2295 			return;
2296 		}
2297 		tmr = &stcb->asoc.autoclose_timer;
2298 		break;
2299 	default:
2300 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2301 		    __FUNCTION__, t_type);
2302 		break;
2303 	}
2304 	if (tmr == NULL) {
2305 		return;
2306 	}
2307 	if ((tmr->type != t_type) && tmr->type) {
2308 		/*
2309 		 * Ok we have a timer that is under joint use. Cookie timer
2310 		 * per chance with the SEND timer. We therefore are NOT
2311 		 * running the timer that the caller wants stopped.  So just
2312 		 * return.
2313 		 */
2314 		return;
2315 	}
2316 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2317 		stcb->asoc.num_send_timers_up--;
2318 		if (stcb->asoc.num_send_timers_up < 0) {
2319 			stcb->asoc.num_send_timers_up = 0;
2320 		}
2321 	}
2322 	tmr->self = NULL;
2323 	tmr->stopped_from = from;
2324 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2325 	return;
2326 }
2327 
2328 uint32_t
2329 sctp_calculate_len(struct mbuf *m)
2330 {
2331 	uint32_t tlen = 0;
2332 	struct mbuf *at;
2333 
2334 	at = m;
2335 	while (at) {
2336 		tlen += SCTP_BUF_LEN(at);
2337 		at = SCTP_BUF_NEXT(at);
2338 	}
2339 	return (tlen);
2340 }
2341 
2342 void
2343 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2344     struct sctp_association *asoc, uint32_t mtu)
2345 {
2346 	/*
2347 	 * Reset the P-MTU size on this association, this involves changing
2348 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2349 	 * allow the DF flag to be cleared.
2350 	 */
2351 	struct sctp_tmit_chunk *chk;
2352 	unsigned int eff_mtu, ovh;
2353 
2354 	asoc->smallest_mtu = mtu;
2355 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2356 		ovh = SCTP_MIN_OVERHEAD;
2357 	} else {
2358 		ovh = SCTP_MIN_V4_OVERHEAD;
2359 	}
2360 	eff_mtu = mtu - ovh;
2361 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2362 		if (chk->send_size > eff_mtu) {
2363 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2364 		}
2365 	}
2366 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2367 		if (chk->send_size > eff_mtu) {
2368 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2369 		}
2370 	}
2371 }
2372 
2373 
2374 /*
2375  * given an association and starting time of the current RTT period return
2376  * RTO in number of msecs net should point to the current network
2377  */
2378 
2379 uint32_t
2380 sctp_calculate_rto(struct sctp_tcb *stcb,
2381     struct sctp_association *asoc,
2382     struct sctp_nets *net,
2383     struct timeval *told,
2384     int safe, int rtt_from_sack)
2385 {
2386 	/*-
2387 	 * given an association and the starting time of the current RTT
2388 	 * period (in value1/value2) return RTO in number of msecs.
2389 	 */
2390 	int32_t rtt;		/* RTT in ms */
2391 	uint32_t new_rto;
2392 	int first_measure = 0;
2393 	struct timeval now, then, *old;
2394 
2395 	/* Copy it out for sparc64 */
2396 	if (safe == sctp_align_unsafe_makecopy) {
2397 		old = &then;
2398 		memcpy(&then, told, sizeof(struct timeval));
2399 	} else if (safe == sctp_align_safe_nocopy) {
2400 		old = told;
2401 	} else {
2402 		/* error */
2403 		SCTP_PRINTF("Huh, bad rto calc call\n");
2404 		return (0);
2405 	}
2406 	/************************/
2407 	/* 1. calculate new RTT */
2408 	/************************/
2409 	/* get the current time */
2410 	if (stcb->asoc.use_precise_time) {
2411 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2412 	} else {
2413 		(void)SCTP_GETTIME_TIMEVAL(&now);
2414 	}
2415 	timevalsub(&now, old);
2416 	/* store the current RTT in us */
2417 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2418 	        (uint64_t) now.tv_usec;
2419 
2420 	/* compute rtt in ms */
2421 	rtt = (int32_t) (net->rtt / 1000);
2422 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2423 		/*
2424 		 * Tell the CC module that a new update has just occurred
2425 		 * from a sack
2426 		 */
2427 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2428 	}
2429 	/*
2430 	 * Do we need to determine the lan? We do this only on sacks i.e.
2431 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2432 	 */
2433 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2434 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2435 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2436 			net->lan_type = SCTP_LAN_INTERNET;
2437 		} else {
2438 			net->lan_type = SCTP_LAN_LOCAL;
2439 		}
2440 	}
2441 	/***************************/
2442 	/* 2. update RTTVAR & SRTT */
2443 	/***************************/
2444 	/*-
2445 	 * Compute the scaled average lastsa and the
2446 	 * scaled variance lastsv as described in van Jacobson
2447 	 * Paper "Congestion Avoidance and Control", Annex A.
2448 	 *
2449 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2450 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2451 	 */
2452 	if (net->RTO_measured) {
2453 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2454 		net->lastsa += rtt;
2455 		if (rtt < 0) {
2456 			rtt = -rtt;
2457 		}
2458 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2459 		net->lastsv += rtt;
2460 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2461 			rto_logging(net, SCTP_LOG_RTTVAR);
2462 		}
2463 	} else {
2464 		/* First RTO measurment */
2465 		net->RTO_measured = 1;
2466 		first_measure = 1;
2467 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2468 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2469 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2470 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2471 		}
2472 	}
2473 	if (net->lastsv == 0) {
2474 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2475 	}
2476 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2477 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2478 	    (stcb->asoc.sat_network_lockout == 0)) {
2479 		stcb->asoc.sat_network = 1;
2480 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2481 		stcb->asoc.sat_network = 0;
2482 		stcb->asoc.sat_network_lockout = 1;
2483 	}
2484 	/* bound it, per C6/C7 in Section 5.3.1 */
2485 	if (new_rto < stcb->asoc.minrto) {
2486 		new_rto = stcb->asoc.minrto;
2487 	}
2488 	if (new_rto > stcb->asoc.maxrto) {
2489 		new_rto = stcb->asoc.maxrto;
2490 	}
2491 	/* we are now returning the RTO */
2492 	return (new_rto);
2493 }
2494 
2495 /*
2496  * return a pointer to a contiguous piece of data from the given mbuf chain
2497  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2498  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2499  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2500  */
2501 caddr_t
2502 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2503 {
2504 	uint32_t count;
2505 	uint8_t *ptr;
2506 
2507 	ptr = in_ptr;
2508 	if ((off < 0) || (len <= 0))
2509 		return (NULL);
2510 
2511 	/* find the desired start location */
2512 	while ((m != NULL) && (off > 0)) {
2513 		if (off < SCTP_BUF_LEN(m))
2514 			break;
2515 		off -= SCTP_BUF_LEN(m);
2516 		m = SCTP_BUF_NEXT(m);
2517 	}
2518 	if (m == NULL)
2519 		return (NULL);
2520 
2521 	/* is the current mbuf large enough (eg. contiguous)? */
2522 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2523 		return (mtod(m, caddr_t)+off);
2524 	} else {
2525 		/* else, it spans more than one mbuf, so save a temp copy... */
2526 		while ((m != NULL) && (len > 0)) {
2527 			count = min(SCTP_BUF_LEN(m) - off, len);
2528 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2529 			len -= count;
2530 			ptr += count;
2531 			off = 0;
2532 			m = SCTP_BUF_NEXT(m);
2533 		}
2534 		if ((m == NULL) && (len > 0))
2535 			return (NULL);
2536 		else
2537 			return ((caddr_t)in_ptr);
2538 	}
2539 }
2540 
2541 
2542 
2543 struct sctp_paramhdr *
2544 sctp_get_next_param(struct mbuf *m,
2545     int offset,
2546     struct sctp_paramhdr *pull,
2547     int pull_limit)
2548 {
2549 	/* This just provides a typed signature to Peter's Pull routine */
2550 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2551 	    (uint8_t *) pull));
2552 }
2553 
2554 
2555 struct mbuf *
2556 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2557 {
2558 	struct mbuf *m_last;
2559 	caddr_t dp;
2560 
2561 	if (padlen > 3) {
2562 		return (NULL);
2563 	}
2564 	if (padlen <= M_TRAILINGSPACE(m)) {
2565 		/*
2566 		 * The easy way. We hope the majority of the time we hit
2567 		 * here :)
2568 		 */
2569 		m_last = m;
2570 	} else {
2571 		/* Hard way we must grow the mbuf chain */
2572 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2573 		if (m_last == NULL) {
2574 			return (NULL);
2575 		}
2576 		SCTP_BUF_LEN(m_last) = 0;
2577 		SCTP_BUF_NEXT(m_last) = NULL;
2578 		SCTP_BUF_NEXT(m) = m_last;
2579 	}
2580 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2581 	SCTP_BUF_LEN(m_last) += padlen;
2582 	memset(dp, 0, padlen);
2583 	return (m_last);
2584 }
2585 
2586 struct mbuf *
2587 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2588 {
2589 	/* find the last mbuf in chain and pad it */
2590 	struct mbuf *m_at;
2591 
2592 	if (last_mbuf != NULL) {
2593 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2594 	} else {
2595 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2596 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2597 				return (sctp_add_pad_tombuf(m_at, padval));
2598 			}
2599 		}
2600 	}
2601 	return (NULL);
2602 }
2603 
2604 static void
2605 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2606     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2607 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2608     SCTP_UNUSED
2609 #endif
2610 )
2611 {
2612 	struct mbuf *m_notify;
2613 	struct sctp_assoc_change *sac;
2614 	struct sctp_queued_to_read *control;
2615 	size_t notif_len, abort_len;
2616 	unsigned int i;
2617 
2618 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2619 	struct socket *so;
2620 
2621 #endif
2622 
2623 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2624 		notif_len = sizeof(struct sctp_assoc_change);
2625 		if (abort != NULL) {
2626 			abort_len = ntohs(abort->ch.chunk_length);
2627 		} else {
2628 			abort_len = 0;
2629 		}
2630 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2631 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2632 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2633 			notif_len += abort_len;
2634 		}
2635 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2636 		if (m_notify == NULL) {
2637 			/* Retry with smaller value. */
2638 			notif_len = sizeof(struct sctp_assoc_change);
2639 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2640 			if (m_notify == NULL) {
2641 				goto set_error;
2642 			}
2643 		}
2644 		SCTP_BUF_NEXT(m_notify) = NULL;
2645 		sac = mtod(m_notify, struct sctp_assoc_change *);
2646 		memset(sac, 0, notif_len);
2647 		sac->sac_type = SCTP_ASSOC_CHANGE;
2648 		sac->sac_flags = 0;
2649 		sac->sac_length = sizeof(struct sctp_assoc_change);
2650 		sac->sac_state = state;
2651 		sac->sac_error = error;
2652 		/* XXX verify these stream counts */
2653 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2654 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2655 		sac->sac_assoc_id = sctp_get_associd(stcb);
2656 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2657 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2658 				i = 0;
2659 				if (stcb->asoc.prsctp_supported == 1) {
2660 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2661 				}
2662 				if (stcb->asoc.auth_supported == 1) {
2663 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2664 				}
2665 				if (stcb->asoc.asconf_supported == 1) {
2666 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2667 				}
2668 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2669 				if (stcb->asoc.reconfig_supported == 1) {
2670 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2671 				}
2672 				sac->sac_length += i;
2673 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2674 				memcpy(sac->sac_info, abort, abort_len);
2675 				sac->sac_length += abort_len;
2676 			}
2677 		}
2678 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2679 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2680 		    0, 0, stcb->asoc.context, 0, 0, 0,
2681 		    m_notify);
2682 		if (control != NULL) {
2683 			control->length = SCTP_BUF_LEN(m_notify);
2684 			/* not that we need this */
2685 			control->tail_mbuf = m_notify;
2686 			control->spec_flags = M_NOTIFICATION;
2687 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2688 			    control,
2689 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2690 			    so_locked);
2691 		} else {
2692 			sctp_m_freem(m_notify);
2693 		}
2694 	}
2695 	/*
2696 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2697 	 * comes in.
2698 	 */
2699 set_error:
2700 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2701 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2702 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2703 		SOCK_LOCK(stcb->sctp_socket);
2704 		if (from_peer) {
2705 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2706 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2707 				stcb->sctp_socket->so_error = ECONNREFUSED;
2708 			} else {
2709 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2710 				stcb->sctp_socket->so_error = ECONNRESET;
2711 			}
2712 		} else {
2713 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2714 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2715 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2716 				stcb->sctp_socket->so_error = ETIMEDOUT;
2717 			} else {
2718 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2719 				stcb->sctp_socket->so_error = ECONNABORTED;
2720 			}
2721 		}
2722 	}
2723 	/* Wake ANY sleepers */
2724 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2725 	so = SCTP_INP_SO(stcb->sctp_ep);
2726 	if (!so_locked) {
2727 		atomic_add_int(&stcb->asoc.refcnt, 1);
2728 		SCTP_TCB_UNLOCK(stcb);
2729 		SCTP_SOCKET_LOCK(so, 1);
2730 		SCTP_TCB_LOCK(stcb);
2731 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2732 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2733 			SCTP_SOCKET_UNLOCK(so, 1);
2734 			return;
2735 		}
2736 	}
2737 #endif
2738 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2739 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2740 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2741 		socantrcvmore_locked(stcb->sctp_socket);
2742 	}
2743 	sorwakeup(stcb->sctp_socket);
2744 	sowwakeup(stcb->sctp_socket);
2745 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2746 	if (!so_locked) {
2747 		SCTP_SOCKET_UNLOCK(so, 1);
2748 	}
2749 #endif
2750 }
2751 
2752 static void
2753 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2754     struct sockaddr *sa, uint32_t error, int so_locked
2755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2756     SCTP_UNUSED
2757 #endif
2758 )
2759 {
2760 	struct mbuf *m_notify;
2761 	struct sctp_paddr_change *spc;
2762 	struct sctp_queued_to_read *control;
2763 
2764 	if ((stcb == NULL) ||
2765 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2766 		/* event not enabled */
2767 		return;
2768 	}
2769 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2770 	if (m_notify == NULL)
2771 		return;
2772 	SCTP_BUF_LEN(m_notify) = 0;
2773 	spc = mtod(m_notify, struct sctp_paddr_change *);
2774 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2775 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2776 	spc->spc_flags = 0;
2777 	spc->spc_length = sizeof(struct sctp_paddr_change);
2778 	switch (sa->sa_family) {
2779 #ifdef INET
2780 	case AF_INET:
2781 #ifdef INET6
2782 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2783 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2784 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2785 		} else {
2786 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2787 		}
2788 #else
2789 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2790 #endif
2791 		break;
2792 #endif
2793 #ifdef INET6
2794 	case AF_INET6:
2795 		{
2796 			struct sockaddr_in6 *sin6;
2797 
2798 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2799 
2800 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2801 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2802 				if (sin6->sin6_scope_id == 0) {
2803 					/* recover scope_id for user */
2804 					(void)sa6_recoverscope(sin6);
2805 				} else {
2806 					/* clear embedded scope_id for user */
2807 					in6_clearscope(&sin6->sin6_addr);
2808 				}
2809 			}
2810 			break;
2811 		}
2812 #endif
2813 	default:
2814 		/* TSNH */
2815 		break;
2816 	}
2817 	spc->spc_state = state;
2818 	spc->spc_error = error;
2819 	spc->spc_assoc_id = sctp_get_associd(stcb);
2820 
2821 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2822 	SCTP_BUF_NEXT(m_notify) = NULL;
2823 
2824 	/* append to socket */
2825 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2826 	    0, 0, stcb->asoc.context, 0, 0, 0,
2827 	    m_notify);
2828 	if (control == NULL) {
2829 		/* no memory */
2830 		sctp_m_freem(m_notify);
2831 		return;
2832 	}
2833 	control->length = SCTP_BUF_LEN(m_notify);
2834 	control->spec_flags = M_NOTIFICATION;
2835 	/* not that we need this */
2836 	control->tail_mbuf = m_notify;
2837 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2838 	    control,
2839 	    &stcb->sctp_socket->so_rcv, 1,
2840 	    SCTP_READ_LOCK_NOT_HELD,
2841 	    so_locked);
2842 }
2843 
2844 
2845 static void
2846 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2847     struct sctp_tmit_chunk *chk, int so_locked
2848 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2849     SCTP_UNUSED
2850 #endif
2851 )
2852 {
2853 	struct mbuf *m_notify;
2854 	struct sctp_send_failed *ssf;
2855 	struct sctp_send_failed_event *ssfe;
2856 	struct sctp_queued_to_read *control;
2857 	int length;
2858 
2859 	if ((stcb == NULL) ||
2860 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2861 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2862 		/* event not enabled */
2863 		return;
2864 	}
2865 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2866 		length = sizeof(struct sctp_send_failed_event);
2867 	} else {
2868 		length = sizeof(struct sctp_send_failed);
2869 	}
2870 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2871 	if (m_notify == NULL)
2872 		/* no space left */
2873 		return;
2874 	SCTP_BUF_LEN(m_notify) = 0;
2875 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2876 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2877 		memset(ssfe, 0, length);
2878 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2879 		if (sent) {
2880 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2881 		} else {
2882 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2883 		}
2884 		length += chk->send_size;
2885 		length -= sizeof(struct sctp_data_chunk);
2886 		ssfe->ssfe_length = length;
2887 		ssfe->ssfe_error = error;
2888 		/* not exactly what the user sent in, but should be close :) */
2889 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2890 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2891 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2892 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2893 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2894 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2895 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2896 	} else {
2897 		ssf = mtod(m_notify, struct sctp_send_failed *);
2898 		memset(ssf, 0, length);
2899 		ssf->ssf_type = SCTP_SEND_FAILED;
2900 		if (sent) {
2901 			ssf->ssf_flags = SCTP_DATA_SENT;
2902 		} else {
2903 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2904 		}
2905 		length += chk->send_size;
2906 		length -= sizeof(struct sctp_data_chunk);
2907 		ssf->ssf_length = length;
2908 		ssf->ssf_error = error;
2909 		/* not exactly what the user sent in, but should be close :) */
2910 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2911 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2912 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2913 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2914 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2915 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2916 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2917 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2918 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2919 	}
2920 	if (chk->data) {
2921 		/*
2922 		 * trim off the sctp chunk header(it should be there)
2923 		 */
2924 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2925 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2926 			sctp_mbuf_crush(chk->data);
2927 			chk->send_size -= sizeof(struct sctp_data_chunk);
2928 		}
2929 	}
2930 	SCTP_BUF_NEXT(m_notify) = chk->data;
2931 	/* Steal off the mbuf */
2932 	chk->data = NULL;
2933 	/*
2934 	 * For this case, we check the actual socket buffer, since the assoc
2935 	 * is going away we don't want to overfill the socket buffer for a
2936 	 * non-reader
2937 	 */
2938 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2939 		sctp_m_freem(m_notify);
2940 		return;
2941 	}
2942 	/* append to socket */
2943 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2944 	    0, 0, stcb->asoc.context, 0, 0, 0,
2945 	    m_notify);
2946 	if (control == NULL) {
2947 		/* no memory */
2948 		sctp_m_freem(m_notify);
2949 		return;
2950 	}
2951 	control->spec_flags = M_NOTIFICATION;
2952 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2953 	    control,
2954 	    &stcb->sctp_socket->so_rcv, 1,
2955 	    SCTP_READ_LOCK_NOT_HELD,
2956 	    so_locked);
2957 }
2958 
2959 
2960 static void
2961 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2962     struct sctp_stream_queue_pending *sp, int so_locked
2963 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2964     SCTP_UNUSED
2965 #endif
2966 )
2967 {
2968 	struct mbuf *m_notify;
2969 	struct sctp_send_failed *ssf;
2970 	struct sctp_send_failed_event *ssfe;
2971 	struct sctp_queued_to_read *control;
2972 	int length;
2973 
2974 	if ((stcb == NULL) ||
2975 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2976 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2977 		/* event not enabled */
2978 		return;
2979 	}
2980 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2981 		length = sizeof(struct sctp_send_failed_event);
2982 	} else {
2983 		length = sizeof(struct sctp_send_failed);
2984 	}
2985 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2986 	if (m_notify == NULL) {
2987 		/* no space left */
2988 		return;
2989 	}
2990 	SCTP_BUF_LEN(m_notify) = 0;
2991 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2992 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2993 		memset(ssfe, 0, length);
2994 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2995 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2996 		length += sp->length;
2997 		ssfe->ssfe_length = length;
2998 		ssfe->ssfe_error = error;
2999 		/* not exactly what the user sent in, but should be close :) */
3000 		ssfe->ssfe_info.snd_sid = sp->stream;
3001 		if (sp->some_taken) {
3002 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3003 		} else {
3004 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3005 		}
3006 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3007 		ssfe->ssfe_info.snd_context = sp->context;
3008 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3009 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3010 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3011 	} else {
3012 		ssf = mtod(m_notify, struct sctp_send_failed *);
3013 		memset(ssf, 0, length);
3014 		ssf->ssf_type = SCTP_SEND_FAILED;
3015 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3016 		length += sp->length;
3017 		ssf->ssf_length = length;
3018 		ssf->ssf_error = error;
3019 		/* not exactly what the user sent in, but should be close :) */
3020 		ssf->ssf_info.sinfo_stream = sp->stream;
3021 		ssf->ssf_info.sinfo_ssn = 0;
3022 		if (sp->some_taken) {
3023 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3024 		} else {
3025 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3026 		}
3027 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3028 		ssf->ssf_info.sinfo_context = sp->context;
3029 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3030 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3031 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3032 	}
3033 	SCTP_BUF_NEXT(m_notify) = sp->data;
3034 
3035 	/* Steal off the mbuf */
3036 	sp->data = NULL;
3037 	/*
3038 	 * For this case, we check the actual socket buffer, since the assoc
3039 	 * is going away we don't want to overfill the socket buffer for a
3040 	 * non-reader
3041 	 */
3042 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3043 		sctp_m_freem(m_notify);
3044 		return;
3045 	}
3046 	/* append to socket */
3047 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3048 	    0, 0, stcb->asoc.context, 0, 0, 0,
3049 	    m_notify);
3050 	if (control == NULL) {
3051 		/* no memory */
3052 		sctp_m_freem(m_notify);
3053 		return;
3054 	}
3055 	control->spec_flags = M_NOTIFICATION;
3056 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3057 	    control,
3058 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3059 }
3060 
3061 
3062 
3063 static void
3064 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3065 {
3066 	struct mbuf *m_notify;
3067 	struct sctp_adaptation_event *sai;
3068 	struct sctp_queued_to_read *control;
3069 
3070 	if ((stcb == NULL) ||
3071 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3072 		/* event not enabled */
3073 		return;
3074 	}
3075 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3076 	if (m_notify == NULL)
3077 		/* no space left */
3078 		return;
3079 	SCTP_BUF_LEN(m_notify) = 0;
3080 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3081 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3082 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3083 	sai->sai_flags = 0;
3084 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3085 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3086 	sai->sai_assoc_id = sctp_get_associd(stcb);
3087 
3088 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3089 	SCTP_BUF_NEXT(m_notify) = NULL;
3090 
3091 	/* append to socket */
3092 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3093 	    0, 0, stcb->asoc.context, 0, 0, 0,
3094 	    m_notify);
3095 	if (control == NULL) {
3096 		/* no memory */
3097 		sctp_m_freem(m_notify);
3098 		return;
3099 	}
3100 	control->length = SCTP_BUF_LEN(m_notify);
3101 	control->spec_flags = M_NOTIFICATION;
3102 	/* not that we need this */
3103 	control->tail_mbuf = m_notify;
3104 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3105 	    control,
3106 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3107 }
3108 
3109 /* This always must be called with the read-queue LOCKED in the INP */
3110 static void
3111 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3112     uint32_t val, int so_locked
3113 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3114     SCTP_UNUSED
3115 #endif
3116 )
3117 {
3118 	struct mbuf *m_notify;
3119 	struct sctp_pdapi_event *pdapi;
3120 	struct sctp_queued_to_read *control;
3121 	struct sockbuf *sb;
3122 
3123 	if ((stcb == NULL) ||
3124 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3125 		/* event not enabled */
3126 		return;
3127 	}
3128 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3129 		return;
3130 	}
3131 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3132 	if (m_notify == NULL)
3133 		/* no space left */
3134 		return;
3135 	SCTP_BUF_LEN(m_notify) = 0;
3136 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3137 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3138 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3139 	pdapi->pdapi_flags = 0;
3140 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3141 	pdapi->pdapi_indication = error;
3142 	pdapi->pdapi_stream = (val >> 16);
3143 	pdapi->pdapi_seq = (val & 0x0000ffff);
3144 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3145 
3146 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3147 	SCTP_BUF_NEXT(m_notify) = NULL;
3148 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3149 	    0, 0, stcb->asoc.context, 0, 0, 0,
3150 	    m_notify);
3151 	if (control == NULL) {
3152 		/* no memory */
3153 		sctp_m_freem(m_notify);
3154 		return;
3155 	}
3156 	control->spec_flags = M_NOTIFICATION;
3157 	control->length = SCTP_BUF_LEN(m_notify);
3158 	/* not that we need this */
3159 	control->tail_mbuf = m_notify;
3160 	control->held_length = 0;
3161 	control->length = 0;
3162 	sb = &stcb->sctp_socket->so_rcv;
3163 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3164 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3165 	}
3166 	sctp_sballoc(stcb, sb, m_notify);
3167 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3168 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3169 	}
3170 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3171 	control->end_added = 1;
3172 	if (stcb->asoc.control_pdapi)
3173 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3174 	else {
3175 		/* we really should not see this case */
3176 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3177 	}
3178 	if (stcb->sctp_ep && stcb->sctp_socket) {
3179 		/* This should always be the case */
3180 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3181 		struct socket *so;
3182 
3183 		so = SCTP_INP_SO(stcb->sctp_ep);
3184 		if (!so_locked) {
3185 			atomic_add_int(&stcb->asoc.refcnt, 1);
3186 			SCTP_TCB_UNLOCK(stcb);
3187 			SCTP_SOCKET_LOCK(so, 1);
3188 			SCTP_TCB_LOCK(stcb);
3189 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3190 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3191 				SCTP_SOCKET_UNLOCK(so, 1);
3192 				return;
3193 			}
3194 		}
3195 #endif
3196 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3197 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3198 		if (!so_locked) {
3199 			SCTP_SOCKET_UNLOCK(so, 1);
3200 		}
3201 #endif
3202 	}
3203 }
3204 
3205 static void
3206 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3207 {
3208 	struct mbuf *m_notify;
3209 	struct sctp_shutdown_event *sse;
3210 	struct sctp_queued_to_read *control;
3211 
3212 	/*
3213 	 * For TCP model AND UDP connected sockets we will send an error up
3214 	 * when an SHUTDOWN completes
3215 	 */
3216 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3217 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3218 		/* mark socket closed for read/write and wakeup! */
3219 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3220 		struct socket *so;
3221 
3222 		so = SCTP_INP_SO(stcb->sctp_ep);
3223 		atomic_add_int(&stcb->asoc.refcnt, 1);
3224 		SCTP_TCB_UNLOCK(stcb);
3225 		SCTP_SOCKET_LOCK(so, 1);
3226 		SCTP_TCB_LOCK(stcb);
3227 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3228 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3229 			SCTP_SOCKET_UNLOCK(so, 1);
3230 			return;
3231 		}
3232 #endif
3233 		socantsendmore(stcb->sctp_socket);
3234 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3235 		SCTP_SOCKET_UNLOCK(so, 1);
3236 #endif
3237 	}
3238 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3239 		/* event not enabled */
3240 		return;
3241 	}
3242 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3243 	if (m_notify == NULL)
3244 		/* no space left */
3245 		return;
3246 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3247 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3248 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3249 	sse->sse_flags = 0;
3250 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3251 	sse->sse_assoc_id = sctp_get_associd(stcb);
3252 
3253 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3254 	SCTP_BUF_NEXT(m_notify) = NULL;
3255 
3256 	/* append to socket */
3257 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3258 	    0, 0, stcb->asoc.context, 0, 0, 0,
3259 	    m_notify);
3260 	if (control == NULL) {
3261 		/* no memory */
3262 		sctp_m_freem(m_notify);
3263 		return;
3264 	}
3265 	control->spec_flags = M_NOTIFICATION;
3266 	control->length = SCTP_BUF_LEN(m_notify);
3267 	/* not that we need this */
3268 	control->tail_mbuf = m_notify;
3269 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3270 	    control,
3271 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3272 }
3273 
3274 static void
3275 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3276     int so_locked
3277 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3278     SCTP_UNUSED
3279 #endif
3280 )
3281 {
3282 	struct mbuf *m_notify;
3283 	struct sctp_sender_dry_event *event;
3284 	struct sctp_queued_to_read *control;
3285 
3286 	if ((stcb == NULL) ||
3287 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3288 		/* event not enabled */
3289 		return;
3290 	}
3291 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3292 	if (m_notify == NULL) {
3293 		/* no space left */
3294 		return;
3295 	}
3296 	SCTP_BUF_LEN(m_notify) = 0;
3297 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3298 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3299 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3300 	event->sender_dry_flags = 0;
3301 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3302 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3303 
3304 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3305 	SCTP_BUF_NEXT(m_notify) = NULL;
3306 
3307 	/* append to socket */
3308 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3309 	    0, 0, stcb->asoc.context, 0, 0, 0,
3310 	    m_notify);
3311 	if (control == NULL) {
3312 		/* no memory */
3313 		sctp_m_freem(m_notify);
3314 		return;
3315 	}
3316 	control->length = SCTP_BUF_LEN(m_notify);
3317 	control->spec_flags = M_NOTIFICATION;
3318 	/* not that we need this */
3319 	control->tail_mbuf = m_notify;
3320 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3321 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3322 }
3323 
3324 
3325 void
3326 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3327 {
3328 	struct mbuf *m_notify;
3329 	struct sctp_queued_to_read *control;
3330 	struct sctp_stream_change_event *stradd;
3331 
3332 	if ((stcb == NULL) ||
3333 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3334 		/* event not enabled */
3335 		return;
3336 	}
3337 	if ((stcb->asoc.peer_req_out) && flag) {
3338 		/* Peer made the request, don't tell the local user */
3339 		stcb->asoc.peer_req_out = 0;
3340 		return;
3341 	}
3342 	stcb->asoc.peer_req_out = 0;
3343 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3344 	if (m_notify == NULL)
3345 		/* no space left */
3346 		return;
3347 	SCTP_BUF_LEN(m_notify) = 0;
3348 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3349 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3350 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3351 	stradd->strchange_flags = flag;
3352 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3353 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3354 	stradd->strchange_instrms = numberin;
3355 	stradd->strchange_outstrms = numberout;
3356 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3357 	SCTP_BUF_NEXT(m_notify) = NULL;
3358 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3359 		/* no space */
3360 		sctp_m_freem(m_notify);
3361 		return;
3362 	}
3363 	/* append to socket */
3364 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3365 	    0, 0, stcb->asoc.context, 0, 0, 0,
3366 	    m_notify);
3367 	if (control == NULL) {
3368 		/* no memory */
3369 		sctp_m_freem(m_notify);
3370 		return;
3371 	}
3372 	control->spec_flags = M_NOTIFICATION;
3373 	control->length = SCTP_BUF_LEN(m_notify);
3374 	/* not that we need this */
3375 	control->tail_mbuf = m_notify;
3376 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3377 	    control,
3378 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3379 }
3380 
3381 void
3382 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3383 {
3384 	struct mbuf *m_notify;
3385 	struct sctp_queued_to_read *control;
3386 	struct sctp_assoc_reset_event *strasoc;
3387 
3388 	if ((stcb == NULL) ||
3389 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3390 		/* event not enabled */
3391 		return;
3392 	}
3393 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3394 	if (m_notify == NULL)
3395 		/* no space left */
3396 		return;
3397 	SCTP_BUF_LEN(m_notify) = 0;
3398 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3399 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3400 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3401 	strasoc->assocreset_flags = flag;
3402 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3403 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3404 	strasoc->assocreset_local_tsn = sending_tsn;
3405 	strasoc->assocreset_remote_tsn = recv_tsn;
3406 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3407 	SCTP_BUF_NEXT(m_notify) = NULL;
3408 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3409 		/* no space */
3410 		sctp_m_freem(m_notify);
3411 		return;
3412 	}
3413 	/* append to socket */
3414 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3415 	    0, 0, stcb->asoc.context, 0, 0, 0,
3416 	    m_notify);
3417 	if (control == NULL) {
3418 		/* no memory */
3419 		sctp_m_freem(m_notify);
3420 		return;
3421 	}
3422 	control->spec_flags = M_NOTIFICATION;
3423 	control->length = SCTP_BUF_LEN(m_notify);
3424 	/* not that we need this */
3425 	control->tail_mbuf = m_notify;
3426 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3427 	    control,
3428 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3429 }
3430 
3431 
3432 
3433 static void
3434 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3435     int number_entries, uint16_t * list, int flag)
3436 {
3437 	struct mbuf *m_notify;
3438 	struct sctp_queued_to_read *control;
3439 	struct sctp_stream_reset_event *strreset;
3440 	int len;
3441 
3442 	if ((stcb == NULL) ||
3443 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3444 		/* event not enabled */
3445 		return;
3446 	}
3447 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3448 	if (m_notify == NULL)
3449 		/* no space left */
3450 		return;
3451 	SCTP_BUF_LEN(m_notify) = 0;
3452 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3453 	if (len > M_TRAILINGSPACE(m_notify)) {
3454 		/* never enough room */
3455 		sctp_m_freem(m_notify);
3456 		return;
3457 	}
3458 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3459 	memset(strreset, 0, len);
3460 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3461 	strreset->strreset_flags = flag;
3462 	strreset->strreset_length = len;
3463 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3464 	if (number_entries) {
3465 		int i;
3466 
3467 		for (i = 0; i < number_entries; i++) {
3468 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3469 		}
3470 	}
3471 	SCTP_BUF_LEN(m_notify) = len;
3472 	SCTP_BUF_NEXT(m_notify) = NULL;
3473 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3474 		/* no space */
3475 		sctp_m_freem(m_notify);
3476 		return;
3477 	}
3478 	/* append to socket */
3479 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3480 	    0, 0, stcb->asoc.context, 0, 0, 0,
3481 	    m_notify);
3482 	if (control == NULL) {
3483 		/* no memory */
3484 		sctp_m_freem(m_notify);
3485 		return;
3486 	}
3487 	control->spec_flags = M_NOTIFICATION;
3488 	control->length = SCTP_BUF_LEN(m_notify);
3489 	/* not that we need this */
3490 	control->tail_mbuf = m_notify;
3491 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3492 	    control,
3493 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3494 }
3495 
3496 
3497 static void
3498 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3499 {
3500 	struct mbuf *m_notify;
3501 	struct sctp_remote_error *sre;
3502 	struct sctp_queued_to_read *control;
3503 	size_t notif_len, chunk_len;
3504 
3505 	if ((stcb == NULL) ||
3506 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3507 		return;
3508 	}
3509 	if (chunk != NULL) {
3510 		chunk_len = ntohs(chunk->ch.chunk_length);
3511 	} else {
3512 		chunk_len = 0;
3513 	}
3514 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3515 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3516 	if (m_notify == NULL) {
3517 		/* Retry with smaller value. */
3518 		notif_len = sizeof(struct sctp_remote_error);
3519 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3520 		if (m_notify == NULL) {
3521 			return;
3522 		}
3523 	}
3524 	SCTP_BUF_NEXT(m_notify) = NULL;
3525 	sre = mtod(m_notify, struct sctp_remote_error *);
3526 	memset(sre, 0, notif_len);
3527 	sre->sre_type = SCTP_REMOTE_ERROR;
3528 	sre->sre_flags = 0;
3529 	sre->sre_length = sizeof(struct sctp_remote_error);
3530 	sre->sre_error = error;
3531 	sre->sre_assoc_id = sctp_get_associd(stcb);
3532 	if (notif_len > sizeof(struct sctp_remote_error)) {
3533 		memcpy(sre->sre_data, chunk, chunk_len);
3534 		sre->sre_length += chunk_len;
3535 	}
3536 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3537 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3538 	    0, 0, stcb->asoc.context, 0, 0, 0,
3539 	    m_notify);
3540 	if (control != NULL) {
3541 		control->length = SCTP_BUF_LEN(m_notify);
3542 		/* not that we need this */
3543 		control->tail_mbuf = m_notify;
3544 		control->spec_flags = M_NOTIFICATION;
3545 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3546 		    control,
3547 		    &stcb->sctp_socket->so_rcv, 1,
3548 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3549 	} else {
3550 		sctp_m_freem(m_notify);
3551 	}
3552 }
3553 
3554 
3555 void
3556 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3557     uint32_t error, void *data, int so_locked
3558 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3559     SCTP_UNUSED
3560 #endif
3561 )
3562 {
3563 	if ((stcb == NULL) ||
3564 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3565 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3566 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3567 		/* If the socket is gone we are out of here */
3568 		return;
3569 	}
3570 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3571 		return;
3572 	}
3573 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3574 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3575 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3576 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3577 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3578 			/* Don't report these in front states */
3579 			return;
3580 		}
3581 	}
3582 	switch (notification) {
3583 	case SCTP_NOTIFY_ASSOC_UP:
3584 		if (stcb->asoc.assoc_up_sent == 0) {
3585 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3586 			stcb->asoc.assoc_up_sent = 1;
3587 		}
3588 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3589 			sctp_notify_adaptation_layer(stcb);
3590 		}
3591 		if (stcb->asoc.auth_supported == 0) {
3592 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3593 			    NULL, so_locked);
3594 		}
3595 		break;
3596 	case SCTP_NOTIFY_ASSOC_DOWN:
3597 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3598 		break;
3599 	case SCTP_NOTIFY_INTERFACE_DOWN:
3600 		{
3601 			struct sctp_nets *net;
3602 
3603 			net = (struct sctp_nets *)data;
3604 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3605 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3606 			break;
3607 		}
3608 	case SCTP_NOTIFY_INTERFACE_UP:
3609 		{
3610 			struct sctp_nets *net;
3611 
3612 			net = (struct sctp_nets *)data;
3613 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3614 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3615 			break;
3616 		}
3617 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3618 		{
3619 			struct sctp_nets *net;
3620 
3621 			net = (struct sctp_nets *)data;
3622 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3623 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3624 			break;
3625 		}
3626 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3627 		sctp_notify_send_failed2(stcb, error,
3628 		    (struct sctp_stream_queue_pending *)data, so_locked);
3629 		break;
3630 	case SCTP_NOTIFY_SENT_DG_FAIL:
3631 		sctp_notify_send_failed(stcb, 1, error,
3632 		    (struct sctp_tmit_chunk *)data, so_locked);
3633 		break;
3634 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3635 		sctp_notify_send_failed(stcb, 0, error,
3636 		    (struct sctp_tmit_chunk *)data, so_locked);
3637 		break;
3638 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3639 		{
3640 			uint32_t val;
3641 
3642 			val = *((uint32_t *) data);
3643 
3644 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3645 			break;
3646 		}
3647 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3648 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3649 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3650 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3651 		} else {
3652 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3653 		}
3654 		break;
3655 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3656 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3657 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3658 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3659 		} else {
3660 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3661 		}
3662 		break;
3663 	case SCTP_NOTIFY_ASSOC_RESTART:
3664 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3665 		if (stcb->asoc.auth_supported == 0) {
3666 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3667 			    NULL, so_locked);
3668 		}
3669 		break;
3670 	case SCTP_NOTIFY_STR_RESET_SEND:
3671 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3672 		break;
3673 	case SCTP_NOTIFY_STR_RESET_RECV:
3674 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3675 		break;
3676 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3677 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3678 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3679 		break;
3680 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3681 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3682 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3683 		break;
3684 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3685 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3686 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3687 		break;
3688 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3689 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3690 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3691 		break;
3692 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3693 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3694 		    error, so_locked);
3695 		break;
3696 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3697 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3698 		    error, so_locked);
3699 		break;
3700 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3701 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3702 		    error, so_locked);
3703 		break;
3704 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3705 		sctp_notify_shutdown_event(stcb);
3706 		break;
3707 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3708 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3709 		    (uint16_t) (uintptr_t) data,
3710 		    so_locked);
3711 		break;
3712 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3713 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3714 		    (uint16_t) (uintptr_t) data,
3715 		    so_locked);
3716 		break;
3717 	case SCTP_NOTIFY_NO_PEER_AUTH:
3718 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3719 		    (uint16_t) (uintptr_t) data,
3720 		    so_locked);
3721 		break;
3722 	case SCTP_NOTIFY_SENDER_DRY:
3723 		sctp_notify_sender_dry_event(stcb, so_locked);
3724 		break;
3725 	case SCTP_NOTIFY_REMOTE_ERROR:
3726 		sctp_notify_remote_error(stcb, error, data);
3727 		break;
3728 	default:
3729 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3730 		    __FUNCTION__, notification, notification);
3731 		break;
3732 	}			/* end switch */
3733 }
3734 
3735 void
3736 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3737 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3738     SCTP_UNUSED
3739 #endif
3740 )
3741 {
3742 	struct sctp_association *asoc;
3743 	struct sctp_stream_out *outs;
3744 	struct sctp_tmit_chunk *chk, *nchk;
3745 	struct sctp_stream_queue_pending *sp, *nsp;
3746 	int i;
3747 
3748 	if (stcb == NULL) {
3749 		return;
3750 	}
3751 	asoc = &stcb->asoc;
3752 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3753 		/* already being freed */
3754 		return;
3755 	}
3756 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3757 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3758 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3759 		return;
3760 	}
3761 	/* now through all the gunk freeing chunks */
3762 	if (holds_lock == 0) {
3763 		SCTP_TCB_SEND_LOCK(stcb);
3764 	}
3765 	/* sent queue SHOULD be empty */
3766 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3767 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3768 		asoc->sent_queue_cnt--;
3769 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3770 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3771 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3772 #ifdef INVARIANTS
3773 			} else {
3774 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3775 #endif
3776 			}
3777 		}
3778 		if (chk->data != NULL) {
3779 			sctp_free_bufspace(stcb, asoc, chk, 1);
3780 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3781 			    error, chk, so_locked);
3782 			if (chk->data) {
3783 				sctp_m_freem(chk->data);
3784 				chk->data = NULL;
3785 			}
3786 		}
3787 		sctp_free_a_chunk(stcb, chk, so_locked);
3788 		/* sa_ignore FREED_MEMORY */
3789 	}
3790 	/* pending send queue SHOULD be empty */
3791 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3792 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3793 		asoc->send_queue_cnt--;
3794 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3795 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3796 #ifdef INVARIANTS
3797 		} else {
3798 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3799 #endif
3800 		}
3801 		if (chk->data != NULL) {
3802 			sctp_free_bufspace(stcb, asoc, chk, 1);
3803 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3804 			    error, chk, so_locked);
3805 			if (chk->data) {
3806 				sctp_m_freem(chk->data);
3807 				chk->data = NULL;
3808 			}
3809 		}
3810 		sctp_free_a_chunk(stcb, chk, so_locked);
3811 		/* sa_ignore FREED_MEMORY */
3812 	}
3813 	for (i = 0; i < asoc->streamoutcnt; i++) {
3814 		/* For each stream */
3815 		outs = &asoc->strmout[i];
3816 		/* clean up any sends there */
3817 		asoc->locked_on_sending = NULL;
3818 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3819 			asoc->stream_queue_cnt--;
3820 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3821 			sctp_free_spbufspace(stcb, asoc, sp);
3822 			if (sp->data) {
3823 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3824 				    error, (void *)sp, so_locked);
3825 				if (sp->data) {
3826 					sctp_m_freem(sp->data);
3827 					sp->data = NULL;
3828 					sp->tail_mbuf = NULL;
3829 					sp->length = 0;
3830 				}
3831 			}
3832 			if (sp->net) {
3833 				sctp_free_remote_addr(sp->net);
3834 				sp->net = NULL;
3835 			}
3836 			/* Free the chunk */
3837 			sctp_free_a_strmoq(stcb, sp, so_locked);
3838 			/* sa_ignore FREED_MEMORY */
3839 		}
3840 	}
3841 
3842 	if (holds_lock == 0) {
3843 		SCTP_TCB_SEND_UNLOCK(stcb);
3844 	}
3845 }
3846 
3847 void
3848 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3849     struct sctp_abort_chunk *abort, int so_locked
3850 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3851     SCTP_UNUSED
3852 #endif
3853 )
3854 {
3855 	if (stcb == NULL) {
3856 		return;
3857 	}
3858 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3859 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3860 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3861 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3862 	}
3863 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3864 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3865 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3866 		return;
3867 	}
3868 	/* Tell them we lost the asoc */
3869 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3870 	if (from_peer) {
3871 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3872 	} else {
3873 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3874 	}
3875 }
3876 
3877 void
3878 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3879     struct mbuf *m, int iphlen,
3880     struct sockaddr *src, struct sockaddr *dst,
3881     struct sctphdr *sh, struct mbuf *op_err,
3882     uint8_t mflowtype, uint32_t mflowid,
3883     uint32_t vrf_id, uint16_t port)
3884 {
3885 	uint32_t vtag;
3886 
3887 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3888 	struct socket *so;
3889 
3890 #endif
3891 
3892 	vtag = 0;
3893 	if (stcb != NULL) {
3894 		/* We have a TCB to abort, send notification too */
3895 		vtag = stcb->asoc.peer_vtag;
3896 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3897 		/* get the assoc vrf id and table id */
3898 		vrf_id = stcb->asoc.vrf_id;
3899 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3900 	}
3901 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3902 	    mflowtype, mflowid, inp->fibnum,
3903 	    vrf_id, port);
3904 	if (stcb != NULL) {
3905 		/* Ok, now lets free it */
3906 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3907 		so = SCTP_INP_SO(inp);
3908 		atomic_add_int(&stcb->asoc.refcnt, 1);
3909 		SCTP_TCB_UNLOCK(stcb);
3910 		SCTP_SOCKET_LOCK(so, 1);
3911 		SCTP_TCB_LOCK(stcb);
3912 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3913 #endif
3914 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3915 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3916 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3917 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3918 		}
3919 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3920 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3921 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3922 		SCTP_SOCKET_UNLOCK(so, 1);
3923 #endif
3924 	}
3925 }
3926 
3927 #ifdef SCTP_ASOCLOG_OF_TSNS
3928 void
3929 sctp_print_out_track_log(struct sctp_tcb *stcb)
3930 {
3931 #ifdef NOSIY_PRINTS
3932 	int i;
3933 
3934 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3935 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3936 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3937 		SCTP_PRINTF("None rcvd\n");
3938 		goto none_in;
3939 	}
3940 	if (stcb->asoc.tsn_in_wrapped) {
3941 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3942 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3943 			    stcb->asoc.in_tsnlog[i].tsn,
3944 			    stcb->asoc.in_tsnlog[i].strm,
3945 			    stcb->asoc.in_tsnlog[i].seq,
3946 			    stcb->asoc.in_tsnlog[i].flgs,
3947 			    stcb->asoc.in_tsnlog[i].sz);
3948 		}
3949 	}
3950 	if (stcb->asoc.tsn_in_at) {
3951 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3952 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3953 			    stcb->asoc.in_tsnlog[i].tsn,
3954 			    stcb->asoc.in_tsnlog[i].strm,
3955 			    stcb->asoc.in_tsnlog[i].seq,
3956 			    stcb->asoc.in_tsnlog[i].flgs,
3957 			    stcb->asoc.in_tsnlog[i].sz);
3958 		}
3959 	}
3960 none_in:
3961 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3962 	if ((stcb->asoc.tsn_out_at == 0) &&
3963 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3964 		SCTP_PRINTF("None sent\n");
3965 	}
3966 	if (stcb->asoc.tsn_out_wrapped) {
3967 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3968 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3969 			    stcb->asoc.out_tsnlog[i].tsn,
3970 			    stcb->asoc.out_tsnlog[i].strm,
3971 			    stcb->asoc.out_tsnlog[i].seq,
3972 			    stcb->asoc.out_tsnlog[i].flgs,
3973 			    stcb->asoc.out_tsnlog[i].sz);
3974 		}
3975 	}
3976 	if (stcb->asoc.tsn_out_at) {
3977 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3978 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3979 			    stcb->asoc.out_tsnlog[i].tsn,
3980 			    stcb->asoc.out_tsnlog[i].strm,
3981 			    stcb->asoc.out_tsnlog[i].seq,
3982 			    stcb->asoc.out_tsnlog[i].flgs,
3983 			    stcb->asoc.out_tsnlog[i].sz);
3984 		}
3985 	}
3986 #endif
3987 }
3988 
3989 #endif
3990 
3991 void
3992 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3993     struct mbuf *op_err,
3994     int so_locked
3995 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3996     SCTP_UNUSED
3997 #endif
3998 )
3999 {
4000 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4001 	struct socket *so;
4002 
4003 #endif
4004 
4005 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4006 	so = SCTP_INP_SO(inp);
4007 #endif
4008 	if (stcb == NULL) {
4009 		/* Got to have a TCB */
4010 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4011 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4012 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4013 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4014 			}
4015 		}
4016 		return;
4017 	} else {
4018 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4019 	}
4020 	/* notify the ulp */
4021 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4022 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4023 	}
4024 	/* notify the peer */
4025 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4026 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4027 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4028 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4029 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4030 	}
4031 	/* now free the asoc */
4032 #ifdef SCTP_ASOCLOG_OF_TSNS
4033 	sctp_print_out_track_log(stcb);
4034 #endif
4035 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4036 	if (!so_locked) {
4037 		atomic_add_int(&stcb->asoc.refcnt, 1);
4038 		SCTP_TCB_UNLOCK(stcb);
4039 		SCTP_SOCKET_LOCK(so, 1);
4040 		SCTP_TCB_LOCK(stcb);
4041 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4042 	}
4043 #endif
4044 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4045 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4046 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4047 	if (!so_locked) {
4048 		SCTP_SOCKET_UNLOCK(so, 1);
4049 	}
4050 #endif
4051 }
4052 
4053 void
4054 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4055     struct sockaddr *src, struct sockaddr *dst,
4056     struct sctphdr *sh, struct sctp_inpcb *inp,
4057     struct mbuf *cause,
4058     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4059     uint32_t vrf_id, uint16_t port)
4060 {
4061 	struct sctp_chunkhdr *ch, chunk_buf;
4062 	unsigned int chk_length;
4063 	int contains_init_chunk;
4064 
4065 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4066 	/* Generate a TO address for future reference */
4067 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4068 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4069 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4070 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4071 		}
4072 	}
4073 	contains_init_chunk = 0;
4074 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4075 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4076 	while (ch != NULL) {
4077 		chk_length = ntohs(ch->chunk_length);
4078 		if (chk_length < sizeof(*ch)) {
4079 			/* break to abort land */
4080 			break;
4081 		}
4082 		switch (ch->chunk_type) {
4083 		case SCTP_INIT:
4084 			contains_init_chunk = 1;
4085 			break;
4086 		case SCTP_PACKET_DROPPED:
4087 			/* we don't respond to pkt-dropped */
4088 			return;
4089 		case SCTP_ABORT_ASSOCIATION:
4090 			/* we don't respond with an ABORT to an ABORT */
4091 			return;
4092 		case SCTP_SHUTDOWN_COMPLETE:
4093 			/*
4094 			 * we ignore it since we are not waiting for it and
4095 			 * peer is gone
4096 			 */
4097 			return;
4098 		case SCTP_SHUTDOWN_ACK:
4099 			sctp_send_shutdown_complete2(src, dst, sh,
4100 			    mflowtype, mflowid, fibnum,
4101 			    vrf_id, port);
4102 			return;
4103 		default:
4104 			break;
4105 		}
4106 		offset += SCTP_SIZE32(chk_length);
4107 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4108 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4109 	}
4110 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4111 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4112 	    (contains_init_chunk == 0))) {
4113 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4114 		    mflowtype, mflowid, fibnum,
4115 		    vrf_id, port);
4116 	}
4117 }
4118 
4119 /*
4120  * check the inbound datagram to make sure there is not an abort inside it,
4121  * if there is return 1, else return 0.
4122  */
4123 int
4124 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4125 {
4126 	struct sctp_chunkhdr *ch;
4127 	struct sctp_init_chunk *init_chk, chunk_buf;
4128 	int offset;
4129 	unsigned int chk_length;
4130 
4131 	offset = iphlen + sizeof(struct sctphdr);
4132 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4133 	    (uint8_t *) & chunk_buf);
4134 	while (ch != NULL) {
4135 		chk_length = ntohs(ch->chunk_length);
4136 		if (chk_length < sizeof(*ch)) {
4137 			/* packet is probably corrupt */
4138 			break;
4139 		}
4140 		/* we seem to be ok, is it an abort? */
4141 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4142 			/* yep, tell them */
4143 			return (1);
4144 		}
4145 		if (ch->chunk_type == SCTP_INITIATION) {
4146 			/* need to update the Vtag */
4147 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4148 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4149 			if (init_chk != NULL) {
4150 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4151 			}
4152 		}
4153 		/* Nope, move to the next chunk */
4154 		offset += SCTP_SIZE32(chk_length);
4155 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4156 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4157 	}
4158 	return (0);
4159 }
4160 
4161 /*
4162  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4163  * set (i.e. it's 0) so, create this function to compare link local scopes
4164  */
4165 #ifdef INET6
4166 uint32_t
4167 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4168 {
4169 	struct sockaddr_in6 a, b;
4170 
4171 	/* save copies */
4172 	a = *addr1;
4173 	b = *addr2;
4174 
4175 	if (a.sin6_scope_id == 0)
4176 		if (sa6_recoverscope(&a)) {
4177 			/* can't get scope, so can't match */
4178 			return (0);
4179 		}
4180 	if (b.sin6_scope_id == 0)
4181 		if (sa6_recoverscope(&b)) {
4182 			/* can't get scope, so can't match */
4183 			return (0);
4184 		}
4185 	if (a.sin6_scope_id != b.sin6_scope_id)
4186 		return (0);
4187 
4188 	return (1);
4189 }
4190 
4191 /*
4192  * returns a sockaddr_in6 with embedded scope recovered and removed
4193  */
4194 struct sockaddr_in6 *
4195 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4196 {
4197 	/* check and strip embedded scope junk */
4198 	if (addr->sin6_family == AF_INET6) {
4199 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4200 			if (addr->sin6_scope_id == 0) {
4201 				*store = *addr;
4202 				if (!sa6_recoverscope(store)) {
4203 					/* use the recovered scope */
4204 					addr = store;
4205 				}
4206 			} else {
4207 				/* else, return the original "to" addr */
4208 				in6_clearscope(&addr->sin6_addr);
4209 			}
4210 		}
4211 	}
4212 	return (addr);
4213 }
4214 
4215 #endif
4216 
4217 /*
4218  * are the two addresses the same?  currently a "scopeless" check returns: 1
4219  * if same, 0 if not
4220  */
4221 int
4222 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4223 {
4224 
4225 	/* must be valid */
4226 	if (sa1 == NULL || sa2 == NULL)
4227 		return (0);
4228 
4229 	/* must be the same family */
4230 	if (sa1->sa_family != sa2->sa_family)
4231 		return (0);
4232 
4233 	switch (sa1->sa_family) {
4234 #ifdef INET6
4235 	case AF_INET6:
4236 		{
4237 			/* IPv6 addresses */
4238 			struct sockaddr_in6 *sin6_1, *sin6_2;
4239 
4240 			sin6_1 = (struct sockaddr_in6 *)sa1;
4241 			sin6_2 = (struct sockaddr_in6 *)sa2;
4242 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4243 			    sin6_2));
4244 		}
4245 #endif
4246 #ifdef INET
4247 	case AF_INET:
4248 		{
4249 			/* IPv4 addresses */
4250 			struct sockaddr_in *sin_1, *sin_2;
4251 
4252 			sin_1 = (struct sockaddr_in *)sa1;
4253 			sin_2 = (struct sockaddr_in *)sa2;
4254 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4255 		}
4256 #endif
4257 	default:
4258 		/* we don't do these... */
4259 		return (0);
4260 	}
4261 }
4262 
4263 void
4264 sctp_print_address(struct sockaddr *sa)
4265 {
4266 #ifdef INET6
4267 	char ip6buf[INET6_ADDRSTRLEN];
4268 
4269 #endif
4270 
4271 	switch (sa->sa_family) {
4272 #ifdef INET6
4273 	case AF_INET6:
4274 		{
4275 			struct sockaddr_in6 *sin6;
4276 
4277 			sin6 = (struct sockaddr_in6 *)sa;
4278 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4279 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4280 			    ntohs(sin6->sin6_port),
4281 			    sin6->sin6_scope_id);
4282 			break;
4283 		}
4284 #endif
4285 #ifdef INET
4286 	case AF_INET:
4287 		{
4288 			struct sockaddr_in *sin;
4289 			unsigned char *p;
4290 
4291 			sin = (struct sockaddr_in *)sa;
4292 			p = (unsigned char *)&sin->sin_addr;
4293 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4294 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4295 			break;
4296 		}
4297 #endif
4298 	default:
4299 		SCTP_PRINTF("?\n");
4300 		break;
4301 	}
4302 }
4303 
4304 void
4305 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4306     struct sctp_inpcb *new_inp,
4307     struct sctp_tcb *stcb,
4308     int waitflags)
4309 {
4310 	/*
4311 	 * go through our old INP and pull off any control structures that
4312 	 * belong to stcb and move then to the new inp.
4313 	 */
4314 	struct socket *old_so, *new_so;
4315 	struct sctp_queued_to_read *control, *nctl;
4316 	struct sctp_readhead tmp_queue;
4317 	struct mbuf *m;
4318 	int error = 0;
4319 
4320 	old_so = old_inp->sctp_socket;
4321 	new_so = new_inp->sctp_socket;
4322 	TAILQ_INIT(&tmp_queue);
4323 	error = sblock(&old_so->so_rcv, waitflags);
4324 	if (error) {
4325 		/*
4326 		 * Gak, can't get sblock, we have a problem. data will be
4327 		 * left stranded.. and we don't dare look at it since the
4328 		 * other thread may be reading something. Oh well, its a
4329 		 * screwed up app that does a peeloff OR a accept while
4330 		 * reading from the main socket... actually its only the
4331 		 * peeloff() case, since I think read will fail on a
4332 		 * listening socket..
4333 		 */
4334 		return;
4335 	}
4336 	/* lock the socket buffers */
4337 	SCTP_INP_READ_LOCK(old_inp);
4338 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4339 		/* Pull off all for out target stcb */
4340 		if (control->stcb == stcb) {
4341 			/* remove it we want it */
4342 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4343 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4344 			m = control->data;
4345 			while (m) {
4346 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4347 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4348 				}
4349 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4350 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4351 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4352 				}
4353 				m = SCTP_BUF_NEXT(m);
4354 			}
4355 		}
4356 	}
4357 	SCTP_INP_READ_UNLOCK(old_inp);
4358 	/* Remove the sb-lock on the old socket */
4359 
4360 	sbunlock(&old_so->so_rcv);
4361 	/* Now we move them over to the new socket buffer */
4362 	SCTP_INP_READ_LOCK(new_inp);
4363 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4364 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4365 		m = control->data;
4366 		while (m) {
4367 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4368 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4369 			}
4370 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4371 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4372 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4373 			}
4374 			m = SCTP_BUF_NEXT(m);
4375 		}
4376 	}
4377 	SCTP_INP_READ_UNLOCK(new_inp);
4378 }
4379 
4380 void
4381 sctp_add_to_readq(struct sctp_inpcb *inp,
4382     struct sctp_tcb *stcb,
4383     struct sctp_queued_to_read *control,
4384     struct sockbuf *sb,
4385     int end,
4386     int inp_read_lock_held,
4387     int so_locked
4388 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4389     SCTP_UNUSED
4390 #endif
4391 )
4392 {
4393 	/*
4394 	 * Here we must place the control on the end of the socket read
4395 	 * queue AND increment sb_cc so that select will work properly on
4396 	 * read.
4397 	 */
4398 	struct mbuf *m, *prev = NULL;
4399 
4400 	if (inp == NULL) {
4401 		/* Gak, TSNH!! */
4402 #ifdef INVARIANTS
4403 		panic("Gak, inp NULL on add_to_readq");
4404 #endif
4405 		return;
4406 	}
4407 	if (inp_read_lock_held == 0)
4408 		SCTP_INP_READ_LOCK(inp);
4409 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4410 		sctp_free_remote_addr(control->whoFrom);
4411 		if (control->data) {
4412 			sctp_m_freem(control->data);
4413 			control->data = NULL;
4414 		}
4415 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4416 		if (inp_read_lock_held == 0)
4417 			SCTP_INP_READ_UNLOCK(inp);
4418 		return;
4419 	}
4420 	if (!(control->spec_flags & M_NOTIFICATION)) {
4421 		atomic_add_int(&inp->total_recvs, 1);
4422 		if (!control->do_not_ref_stcb) {
4423 			atomic_add_int(&stcb->total_recvs, 1);
4424 		}
4425 	}
4426 	m = control->data;
4427 	control->held_length = 0;
4428 	control->length = 0;
4429 	while (m) {
4430 		if (SCTP_BUF_LEN(m) == 0) {
4431 			/* Skip mbufs with NO length */
4432 			if (prev == NULL) {
4433 				/* First one */
4434 				control->data = sctp_m_free(m);
4435 				m = control->data;
4436 			} else {
4437 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4438 				m = SCTP_BUF_NEXT(prev);
4439 			}
4440 			if (m == NULL) {
4441 				control->tail_mbuf = prev;
4442 			}
4443 			continue;
4444 		}
4445 		prev = m;
4446 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4447 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4448 		}
4449 		sctp_sballoc(stcb, sb, m);
4450 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4451 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4452 		}
4453 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4454 		m = SCTP_BUF_NEXT(m);
4455 	}
4456 	if (prev != NULL) {
4457 		control->tail_mbuf = prev;
4458 	} else {
4459 		/* Everything got collapsed out?? */
4460 		sctp_free_remote_addr(control->whoFrom);
4461 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4462 		if (inp_read_lock_held == 0)
4463 			SCTP_INP_READ_UNLOCK(inp);
4464 		return;
4465 	}
4466 	if (end) {
4467 		control->end_added = 1;
4468 	}
4469 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4470 	if (inp_read_lock_held == 0)
4471 		SCTP_INP_READ_UNLOCK(inp);
4472 	if (inp && inp->sctp_socket) {
4473 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4474 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4475 		} else {
4476 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4477 			struct socket *so;
4478 
4479 			so = SCTP_INP_SO(inp);
4480 			if (!so_locked) {
4481 				if (stcb) {
4482 					atomic_add_int(&stcb->asoc.refcnt, 1);
4483 					SCTP_TCB_UNLOCK(stcb);
4484 				}
4485 				SCTP_SOCKET_LOCK(so, 1);
4486 				if (stcb) {
4487 					SCTP_TCB_LOCK(stcb);
4488 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4489 				}
4490 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4491 					SCTP_SOCKET_UNLOCK(so, 1);
4492 					return;
4493 				}
4494 			}
4495 #endif
4496 			sctp_sorwakeup(inp, inp->sctp_socket);
4497 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4498 			if (!so_locked) {
4499 				SCTP_SOCKET_UNLOCK(so, 1);
4500 			}
4501 #endif
4502 		}
4503 	}
4504 }
4505 
4506 
4507 int
4508 sctp_append_to_readq(struct sctp_inpcb *inp,
4509     struct sctp_tcb *stcb,
4510     struct sctp_queued_to_read *control,
4511     struct mbuf *m,
4512     int end,
4513     int ctls_cumack,
4514     struct sockbuf *sb)
4515 {
4516 	/*
4517 	 * A partial delivery API event is underway. OR we are appending on
4518 	 * the reassembly queue.
4519 	 *
4520 	 * If PDAPI this means we need to add m to the end of the data.
4521 	 * Increase the length in the control AND increment the sb_cc.
4522 	 * Otherwise sb is NULL and all we need to do is put it at the end
4523 	 * of the mbuf chain.
4524 	 */
4525 	int len = 0;
4526 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4527 
4528 	if (inp) {
4529 		SCTP_INP_READ_LOCK(inp);
4530 	}
4531 	if (control == NULL) {
4532 get_out:
4533 		if (inp) {
4534 			SCTP_INP_READ_UNLOCK(inp);
4535 		}
4536 		return (-1);
4537 	}
4538 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4539 		SCTP_INP_READ_UNLOCK(inp);
4540 		return (0);
4541 	}
4542 	if (control->end_added) {
4543 		/* huh this one is complete? */
4544 		goto get_out;
4545 	}
4546 	mm = m;
4547 	if (mm == NULL) {
4548 		goto get_out;
4549 	}
4550 	while (mm) {
4551 		if (SCTP_BUF_LEN(mm) == 0) {
4552 			/* Skip mbufs with NO lenght */
4553 			if (prev == NULL) {
4554 				/* First one */
4555 				m = sctp_m_free(mm);
4556 				mm = m;
4557 			} else {
4558 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4559 				mm = SCTP_BUF_NEXT(prev);
4560 			}
4561 			continue;
4562 		}
4563 		prev = mm;
4564 		len += SCTP_BUF_LEN(mm);
4565 		if (sb) {
4566 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4567 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4568 			}
4569 			sctp_sballoc(stcb, sb, mm);
4570 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4571 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4572 			}
4573 		}
4574 		mm = SCTP_BUF_NEXT(mm);
4575 	}
4576 	if (prev) {
4577 		tail = prev;
4578 	} else {
4579 		/* Really there should always be a prev */
4580 		if (m == NULL) {
4581 			/* Huh nothing left? */
4582 #ifdef INVARIANTS
4583 			panic("Nothing left to add?");
4584 #else
4585 			goto get_out;
4586 #endif
4587 		}
4588 		tail = m;
4589 	}
4590 	if (control->tail_mbuf) {
4591 		/* append */
4592 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4593 		control->tail_mbuf = tail;
4594 	} else {
4595 		/* nothing there */
4596 #ifdef INVARIANTS
4597 		if (control->data != NULL) {
4598 			panic("This should NOT happen");
4599 		}
4600 #endif
4601 		control->data = m;
4602 		control->tail_mbuf = tail;
4603 	}
4604 	atomic_add_int(&control->length, len);
4605 	if (end) {
4606 		/* message is complete */
4607 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4608 			stcb->asoc.control_pdapi = NULL;
4609 		}
4610 		control->held_length = 0;
4611 		control->end_added = 1;
4612 	}
4613 	if (stcb == NULL) {
4614 		control->do_not_ref_stcb = 1;
4615 	}
4616 	/*
4617 	 * When we are appending in partial delivery, the cum-ack is used
4618 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4619 	 * is populated in the outbound sinfo structure from the true cumack
4620 	 * if the association exists...
4621 	 */
4622 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4623 	if (inp) {
4624 		SCTP_INP_READ_UNLOCK(inp);
4625 	}
4626 	if (inp && inp->sctp_socket) {
4627 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4628 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4629 		} else {
4630 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4631 			struct socket *so;
4632 
4633 			so = SCTP_INP_SO(inp);
4634 			if (stcb) {
4635 				atomic_add_int(&stcb->asoc.refcnt, 1);
4636 				SCTP_TCB_UNLOCK(stcb);
4637 			}
4638 			SCTP_SOCKET_LOCK(so, 1);
4639 			if (stcb) {
4640 				SCTP_TCB_LOCK(stcb);
4641 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4642 			}
4643 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4644 				SCTP_SOCKET_UNLOCK(so, 1);
4645 				return (0);
4646 			}
4647 #endif
4648 			sctp_sorwakeup(inp, inp->sctp_socket);
4649 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4650 			SCTP_SOCKET_UNLOCK(so, 1);
4651 #endif
4652 		}
4653 	}
4654 	return (0);
4655 }
4656 
4657 
4658 
4659 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4660  *************ALTERNATE ROUTING CODE
4661  */
4662 
4663 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4664  *************ALTERNATE ROUTING CODE
4665  */
4666 
4667 struct mbuf *
4668 sctp_generate_cause(uint16_t code, char *info)
4669 {
4670 	struct mbuf *m;
4671 	struct sctp_gen_error_cause *cause;
4672 	size_t info_len, len;
4673 
4674 	if ((code == 0) || (info == NULL)) {
4675 		return (NULL);
4676 	}
4677 	info_len = strlen(info);
4678 	len = sizeof(struct sctp_paramhdr) + info_len;
4679 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4680 	if (m != NULL) {
4681 		SCTP_BUF_LEN(m) = len;
4682 		cause = mtod(m, struct sctp_gen_error_cause *);
4683 		cause->code = htons(code);
4684 		cause->length = htons((uint16_t) len);
4685 		memcpy(cause->info, info, info_len);
4686 	}
4687 	return (m);
4688 }
4689 
4690 struct mbuf *
4691 sctp_generate_no_user_data_cause(uint32_t tsn)
4692 {
4693 	struct mbuf *m;
4694 	struct sctp_error_no_user_data *no_user_data_cause;
4695 	size_t len;
4696 
4697 	len = sizeof(struct sctp_error_no_user_data);
4698 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4699 	if (m != NULL) {
4700 		SCTP_BUF_LEN(m) = len;
4701 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4702 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4703 		no_user_data_cause->cause.length = htons((uint16_t) len);
4704 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4705 	}
4706 	return (m);
4707 }
4708 
4709 #ifdef SCTP_MBCNT_LOGGING
4710 void
4711 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4712     struct sctp_tmit_chunk *tp1, int chk_cnt)
4713 {
4714 	if (tp1->data == NULL) {
4715 		return;
4716 	}
4717 	asoc->chunks_on_out_queue -= chk_cnt;
4718 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4719 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4720 		    asoc->total_output_queue_size,
4721 		    tp1->book_size,
4722 		    0,
4723 		    tp1->mbcnt);
4724 	}
4725 	if (asoc->total_output_queue_size >= tp1->book_size) {
4726 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4727 	} else {
4728 		asoc->total_output_queue_size = 0;
4729 	}
4730 
4731 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4732 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4733 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4734 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4735 		} else {
4736 			stcb->sctp_socket->so_snd.sb_cc = 0;
4737 
4738 		}
4739 	}
4740 }
4741 
4742 #endif
4743 
4744 int
4745 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4746     uint8_t sent, int so_locked
4747 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4748     SCTP_UNUSED
4749 #endif
4750 )
4751 {
4752 	struct sctp_stream_out *strq;
4753 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4754 	struct sctp_stream_queue_pending *sp;
4755 	uint16_t stream = 0, seq = 0;
4756 	uint8_t foundeom = 0;
4757 	int ret_sz = 0;
4758 	int notdone;
4759 	int do_wakeup_routine = 0;
4760 
4761 	stream = tp1->rec.data.stream_number;
4762 	seq = tp1->rec.data.stream_seq;
4763 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4764 		stcb->asoc.abandoned_sent[0]++;
4765 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4766 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4767 #if defined(SCTP_DETAILED_STR_STATS)
4768 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4769 #endif
4770 	} else {
4771 		stcb->asoc.abandoned_unsent[0]++;
4772 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4773 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4774 #if defined(SCTP_DETAILED_STR_STATS)
4775 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4776 #endif
4777 	}
4778 	do {
4779 		ret_sz += tp1->book_size;
4780 		if (tp1->data != NULL) {
4781 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4782 				sctp_flight_size_decrease(tp1);
4783 				sctp_total_flight_decrease(stcb, tp1);
4784 			}
4785 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4786 			stcb->asoc.peers_rwnd += tp1->send_size;
4787 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4788 			if (sent) {
4789 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4790 			} else {
4791 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4792 			}
4793 			if (tp1->data) {
4794 				sctp_m_freem(tp1->data);
4795 				tp1->data = NULL;
4796 			}
4797 			do_wakeup_routine = 1;
4798 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4799 				stcb->asoc.sent_queue_cnt_removeable--;
4800 			}
4801 		}
4802 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4803 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4804 		    SCTP_DATA_NOT_FRAG) {
4805 			/* not frag'ed we ae done   */
4806 			notdone = 0;
4807 			foundeom = 1;
4808 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4809 			/* end of frag, we are done */
4810 			notdone = 0;
4811 			foundeom = 1;
4812 		} else {
4813 			/*
4814 			 * Its a begin or middle piece, we must mark all of
4815 			 * it
4816 			 */
4817 			notdone = 1;
4818 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4819 		}
4820 	} while (tp1 && notdone);
4821 	if (foundeom == 0) {
4822 		/*
4823 		 * The multi-part message was scattered across the send and
4824 		 * sent queue.
4825 		 */
4826 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4827 			if ((tp1->rec.data.stream_number != stream) ||
4828 			    (tp1->rec.data.stream_seq != seq)) {
4829 				break;
4830 			}
4831 			/*
4832 			 * save to chk in case we have some on stream out
4833 			 * queue. If so and we have an un-transmitted one we
4834 			 * don't have to fudge the TSN.
4835 			 */
4836 			chk = tp1;
4837 			ret_sz += tp1->book_size;
4838 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4839 			if (sent) {
4840 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4841 			} else {
4842 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4843 			}
4844 			if (tp1->data) {
4845 				sctp_m_freem(tp1->data);
4846 				tp1->data = NULL;
4847 			}
4848 			/* No flight involved here book the size to 0 */
4849 			tp1->book_size = 0;
4850 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4851 				foundeom = 1;
4852 			}
4853 			do_wakeup_routine = 1;
4854 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4855 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4856 			/*
4857 			 * on to the sent queue so we can wait for it to be
4858 			 * passed by.
4859 			 */
4860 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4861 			    sctp_next);
4862 			stcb->asoc.send_queue_cnt--;
4863 			stcb->asoc.sent_queue_cnt++;
4864 		}
4865 	}
4866 	if (foundeom == 0) {
4867 		/*
4868 		 * Still no eom found. That means there is stuff left on the
4869 		 * stream out queue.. yuck.
4870 		 */
4871 		SCTP_TCB_SEND_LOCK(stcb);
4872 		strq = &stcb->asoc.strmout[stream];
4873 		sp = TAILQ_FIRST(&strq->outqueue);
4874 		if (sp != NULL) {
4875 			sp->discard_rest = 1;
4876 			/*
4877 			 * We may need to put a chunk on the queue that
4878 			 * holds the TSN that would have been sent with the
4879 			 * LAST bit.
4880 			 */
4881 			if (chk == NULL) {
4882 				/* Yep, we have to */
4883 				sctp_alloc_a_chunk(stcb, chk);
4884 				if (chk == NULL) {
4885 					/*
4886 					 * we are hosed. All we can do is
4887 					 * nothing.. which will cause an
4888 					 * abort if the peer is paying
4889 					 * attention.
4890 					 */
4891 					goto oh_well;
4892 				}
4893 				memset(chk, 0, sizeof(*chk));
4894 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4895 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4896 				chk->asoc = &stcb->asoc;
4897 				chk->rec.data.stream_seq = strq->next_sequence_send;
4898 				chk->rec.data.stream_number = sp->stream;
4899 				chk->rec.data.payloadtype = sp->ppid;
4900 				chk->rec.data.context = sp->context;
4901 				chk->flags = sp->act_flags;
4902 				chk->whoTo = NULL;
4903 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4904 				strq->chunks_on_queues++;
4905 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4906 				stcb->asoc.sent_queue_cnt++;
4907 				stcb->asoc.pr_sctp_cnt++;
4908 			} else {
4909 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4910 			}
4911 			strq->next_sequence_send++;
4912 	oh_well:
4913 			if (sp->data) {
4914 				/*
4915 				 * Pull any data to free up the SB and allow
4916 				 * sender to "add more" while we will throw
4917 				 * away :-)
4918 				 */
4919 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4920 				ret_sz += sp->length;
4921 				do_wakeup_routine = 1;
4922 				sp->some_taken = 1;
4923 				sctp_m_freem(sp->data);
4924 				sp->data = NULL;
4925 				sp->tail_mbuf = NULL;
4926 				sp->length = 0;
4927 			}
4928 		}
4929 		SCTP_TCB_SEND_UNLOCK(stcb);
4930 	}
4931 	if (do_wakeup_routine) {
4932 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4933 		struct socket *so;
4934 
4935 		so = SCTP_INP_SO(stcb->sctp_ep);
4936 		if (!so_locked) {
4937 			atomic_add_int(&stcb->asoc.refcnt, 1);
4938 			SCTP_TCB_UNLOCK(stcb);
4939 			SCTP_SOCKET_LOCK(so, 1);
4940 			SCTP_TCB_LOCK(stcb);
4941 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4942 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4943 				/* assoc was freed while we were unlocked */
4944 				SCTP_SOCKET_UNLOCK(so, 1);
4945 				return (ret_sz);
4946 			}
4947 		}
4948 #endif
4949 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4950 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4951 		if (!so_locked) {
4952 			SCTP_SOCKET_UNLOCK(so, 1);
4953 		}
4954 #endif
4955 	}
4956 	return (ret_sz);
4957 }
4958 
4959 /*
4960  * checks to see if the given address, sa, is one that is currently known by
4961  * the kernel note: can't distinguish the same address on multiple interfaces
4962  * and doesn't handle multiple addresses with different zone/scope id's note:
4963  * ifa_ifwithaddr() compares the entire sockaddr struct
4964  */
4965 struct sctp_ifa *
4966 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4967     int holds_lock)
4968 {
4969 	struct sctp_laddr *laddr;
4970 
4971 	if (holds_lock == 0) {
4972 		SCTP_INP_RLOCK(inp);
4973 	}
4974 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4975 		if (laddr->ifa == NULL)
4976 			continue;
4977 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4978 			continue;
4979 #ifdef INET
4980 		if (addr->sa_family == AF_INET) {
4981 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4982 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4983 				/* found him. */
4984 				if (holds_lock == 0) {
4985 					SCTP_INP_RUNLOCK(inp);
4986 				}
4987 				return (laddr->ifa);
4988 				break;
4989 			}
4990 		}
4991 #endif
4992 #ifdef INET6
4993 		if (addr->sa_family == AF_INET6) {
4994 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4995 			    &laddr->ifa->address.sin6)) {
4996 				/* found him. */
4997 				if (holds_lock == 0) {
4998 					SCTP_INP_RUNLOCK(inp);
4999 				}
5000 				return (laddr->ifa);
5001 				break;
5002 			}
5003 		}
5004 #endif
5005 	}
5006 	if (holds_lock == 0) {
5007 		SCTP_INP_RUNLOCK(inp);
5008 	}
5009 	return (NULL);
5010 }
5011 
5012 uint32_t
5013 sctp_get_ifa_hash_val(struct sockaddr *addr)
5014 {
5015 	switch (addr->sa_family) {
5016 #ifdef INET
5017 	case AF_INET:
5018 		{
5019 			struct sockaddr_in *sin;
5020 
5021 			sin = (struct sockaddr_in *)addr;
5022 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5023 		}
5024 #endif
5025 #ifdef INET6
5026 	case AF_INET6:
5027 		{
5028 			struct sockaddr_in6 *sin6;
5029 			uint32_t hash_of_addr;
5030 
5031 			sin6 = (struct sockaddr_in6 *)addr;
5032 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5033 			    sin6->sin6_addr.s6_addr32[1] +
5034 			    sin6->sin6_addr.s6_addr32[2] +
5035 			    sin6->sin6_addr.s6_addr32[3]);
5036 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5037 			return (hash_of_addr);
5038 		}
5039 #endif
5040 	default:
5041 		break;
5042 	}
5043 	return (0);
5044 }
5045 
5046 struct sctp_ifa *
5047 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5048 {
5049 	struct sctp_ifa *sctp_ifap;
5050 	struct sctp_vrf *vrf;
5051 	struct sctp_ifalist *hash_head;
5052 	uint32_t hash_of_addr;
5053 
5054 	if (holds_lock == 0)
5055 		SCTP_IPI_ADDR_RLOCK();
5056 
5057 	vrf = sctp_find_vrf(vrf_id);
5058 	if (vrf == NULL) {
5059 		if (holds_lock == 0)
5060 			SCTP_IPI_ADDR_RUNLOCK();
5061 		return (NULL);
5062 	}
5063 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5064 
5065 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5066 	if (hash_head == NULL) {
5067 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5068 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5069 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5070 		sctp_print_address(addr);
5071 		SCTP_PRINTF("No such bucket for address\n");
5072 		if (holds_lock == 0)
5073 			SCTP_IPI_ADDR_RUNLOCK();
5074 
5075 		return (NULL);
5076 	}
5077 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5078 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5079 			continue;
5080 #ifdef INET
5081 		if (addr->sa_family == AF_INET) {
5082 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5083 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5084 				/* found him. */
5085 				if (holds_lock == 0)
5086 					SCTP_IPI_ADDR_RUNLOCK();
5087 				return (sctp_ifap);
5088 				break;
5089 			}
5090 		}
5091 #endif
5092 #ifdef INET6
5093 		if (addr->sa_family == AF_INET6) {
5094 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5095 			    &sctp_ifap->address.sin6)) {
5096 				/* found him. */
5097 				if (holds_lock == 0)
5098 					SCTP_IPI_ADDR_RUNLOCK();
5099 				return (sctp_ifap);
5100 				break;
5101 			}
5102 		}
5103 #endif
5104 	}
5105 	if (holds_lock == 0)
5106 		SCTP_IPI_ADDR_RUNLOCK();
5107 	return (NULL);
5108 }
5109 
5110 static void
5111 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5112     uint32_t rwnd_req)
5113 {
5114 	/* User pulled some data, do we need a rwnd update? */
5115 	int r_unlocked = 0;
5116 	uint32_t dif, rwnd;
5117 	struct socket *so = NULL;
5118 
5119 	if (stcb == NULL)
5120 		return;
5121 
5122 	atomic_add_int(&stcb->asoc.refcnt, 1);
5123 
5124 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5125 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5126 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5127 		/* Pre-check If we are freeing no update */
5128 		goto no_lock;
5129 	}
5130 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5131 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5132 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5133 		goto out;
5134 	}
5135 	so = stcb->sctp_socket;
5136 	if (so == NULL) {
5137 		goto out;
5138 	}
5139 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5140 	/* Have you have freed enough to look */
5141 	*freed_so_far = 0;
5142 	/* Yep, its worth a look and the lock overhead */
5143 
5144 	/* Figure out what the rwnd would be */
5145 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5146 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5147 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5148 	} else {
5149 		dif = 0;
5150 	}
5151 	if (dif >= rwnd_req) {
5152 		if (hold_rlock) {
5153 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5154 			r_unlocked = 1;
5155 		}
5156 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5157 			/*
5158 			 * One last check before we allow the guy possibly
5159 			 * to get in. There is a race, where the guy has not
5160 			 * reached the gate. In that case
5161 			 */
5162 			goto out;
5163 		}
5164 		SCTP_TCB_LOCK(stcb);
5165 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5166 			/* No reports here */
5167 			SCTP_TCB_UNLOCK(stcb);
5168 			goto out;
5169 		}
5170 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5171 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5172 
5173 		sctp_chunk_output(stcb->sctp_ep, stcb,
5174 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5175 		/* make sure no timer is running */
5176 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5177 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5178 		SCTP_TCB_UNLOCK(stcb);
5179 	} else {
5180 		/* Update how much we have pending */
5181 		stcb->freed_by_sorcv_sincelast = dif;
5182 	}
5183 out:
5184 	if (so && r_unlocked && hold_rlock) {
5185 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5186 	}
5187 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5188 no_lock:
5189 	atomic_add_int(&stcb->asoc.refcnt, -1);
5190 	return;
5191 }
5192 
5193 int
5194 sctp_sorecvmsg(struct socket *so,
5195     struct uio *uio,
5196     struct mbuf **mp,
5197     struct sockaddr *from,
5198     int fromlen,
5199     int *msg_flags,
5200     struct sctp_sndrcvinfo *sinfo,
5201     int filling_sinfo)
5202 {
5203 	/*
5204 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5205 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5206 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5207 	 * On the way out we may send out any combination of:
5208 	 * MSG_NOTIFICATION MSG_EOR
5209 	 *
5210 	 */
5211 	struct sctp_inpcb *inp = NULL;
5212 	int my_len = 0;
5213 	int cp_len = 0, error = 0;
5214 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5215 	struct mbuf *m = NULL;
5216 	struct sctp_tcb *stcb = NULL;
5217 	int wakeup_read_socket = 0;
5218 	int freecnt_applied = 0;
5219 	int out_flags = 0, in_flags = 0;
5220 	int block_allowed = 1;
5221 	uint32_t freed_so_far = 0;
5222 	uint32_t copied_so_far = 0;
5223 	int in_eeor_mode = 0;
5224 	int no_rcv_needed = 0;
5225 	uint32_t rwnd_req = 0;
5226 	int hold_sblock = 0;
5227 	int hold_rlock = 0;
5228 	int slen = 0;
5229 	uint32_t held_length = 0;
5230 	int sockbuf_lock = 0;
5231 
5232 	if (uio == NULL) {
5233 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5234 		return (EINVAL);
5235 	}
5236 	if (msg_flags) {
5237 		in_flags = *msg_flags;
5238 		if (in_flags & MSG_PEEK)
5239 			SCTP_STAT_INCR(sctps_read_peeks);
5240 	} else {
5241 		in_flags = 0;
5242 	}
5243 	slen = uio->uio_resid;
5244 
5245 	/* Pull in and set up our int flags */
5246 	if (in_flags & MSG_OOB) {
5247 		/* Out of band's NOT supported */
5248 		return (EOPNOTSUPP);
5249 	}
5250 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5251 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5252 		return (EINVAL);
5253 	}
5254 	if ((in_flags & (MSG_DONTWAIT
5255 	    | MSG_NBIO
5256 	    )) ||
5257 	    SCTP_SO_IS_NBIO(so)) {
5258 		block_allowed = 0;
5259 	}
5260 	/* setup the endpoint */
5261 	inp = (struct sctp_inpcb *)so->so_pcb;
5262 	if (inp == NULL) {
5263 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5264 		return (EFAULT);
5265 	}
5266 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5267 	/* Must be at least a MTU's worth */
5268 	if (rwnd_req < SCTP_MIN_RWND)
5269 		rwnd_req = SCTP_MIN_RWND;
5270 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5271 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5272 		sctp_misc_ints(SCTP_SORECV_ENTER,
5273 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5274 	}
5275 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5276 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5277 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5278 	}
5279 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5280 	if (error) {
5281 		goto release_unlocked;
5282 	}
5283 	sockbuf_lock = 1;
5284 restart:
5285 
5286 
5287 restart_nosblocks:
5288 	if (hold_sblock == 0) {
5289 		SOCKBUF_LOCK(&so->so_rcv);
5290 		hold_sblock = 1;
5291 	}
5292 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5293 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5294 		goto out;
5295 	}
5296 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5297 		if (so->so_error) {
5298 			error = so->so_error;
5299 			if ((in_flags & MSG_PEEK) == 0)
5300 				so->so_error = 0;
5301 			goto out;
5302 		} else {
5303 			if (so->so_rcv.sb_cc == 0) {
5304 				/* indicate EOF */
5305 				error = 0;
5306 				goto out;
5307 			}
5308 		}
5309 	}
5310 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5311 		/* we need to wait for data */
5312 		if ((so->so_rcv.sb_cc == 0) &&
5313 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5314 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5315 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5316 				/*
5317 				 * For active open side clear flags for
5318 				 * re-use passive open is blocked by
5319 				 * connect.
5320 				 */
5321 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5322 					/*
5323 					 * You were aborted, passive side
5324 					 * always hits here
5325 					 */
5326 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5327 					error = ECONNRESET;
5328 				}
5329 				so->so_state &= ~(SS_ISCONNECTING |
5330 				    SS_ISDISCONNECTING |
5331 				    SS_ISCONFIRMING |
5332 				    SS_ISCONNECTED);
5333 				if (error == 0) {
5334 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5335 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5336 						error = ENOTCONN;
5337 					}
5338 				}
5339 				goto out;
5340 			}
5341 		}
5342 		error = sbwait(&so->so_rcv);
5343 		if (error) {
5344 			goto out;
5345 		}
5346 		held_length = 0;
5347 		goto restart_nosblocks;
5348 	} else if (so->so_rcv.sb_cc == 0) {
5349 		if (so->so_error) {
5350 			error = so->so_error;
5351 			if ((in_flags & MSG_PEEK) == 0)
5352 				so->so_error = 0;
5353 		} else {
5354 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5355 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5356 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5357 					/*
5358 					 * For active open side clear flags
5359 					 * for re-use passive open is
5360 					 * blocked by connect.
5361 					 */
5362 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5363 						/*
5364 						 * You were aborted, passive
5365 						 * side always hits here
5366 						 */
5367 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5368 						error = ECONNRESET;
5369 					}
5370 					so->so_state &= ~(SS_ISCONNECTING |
5371 					    SS_ISDISCONNECTING |
5372 					    SS_ISCONFIRMING |
5373 					    SS_ISCONNECTED);
5374 					if (error == 0) {
5375 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5376 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5377 							error = ENOTCONN;
5378 						}
5379 					}
5380 					goto out;
5381 				}
5382 			}
5383 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5384 			error = EWOULDBLOCK;
5385 		}
5386 		goto out;
5387 	}
5388 	if (hold_sblock == 1) {
5389 		SOCKBUF_UNLOCK(&so->so_rcv);
5390 		hold_sblock = 0;
5391 	}
5392 	/* we possibly have data we can read */
5393 	/* sa_ignore FREED_MEMORY */
5394 	control = TAILQ_FIRST(&inp->read_queue);
5395 	if (control == NULL) {
5396 		/*
5397 		 * This could be happening since the appender did the
5398 		 * increment but as not yet did the tailq insert onto the
5399 		 * read_queue
5400 		 */
5401 		if (hold_rlock == 0) {
5402 			SCTP_INP_READ_LOCK(inp);
5403 		}
5404 		control = TAILQ_FIRST(&inp->read_queue);
5405 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5406 #ifdef INVARIANTS
5407 			panic("Huh, its non zero and nothing on control?");
5408 #endif
5409 			so->so_rcv.sb_cc = 0;
5410 		}
5411 		SCTP_INP_READ_UNLOCK(inp);
5412 		hold_rlock = 0;
5413 		goto restart;
5414 	}
5415 	if ((control->length == 0) &&
5416 	    (control->do_not_ref_stcb)) {
5417 		/*
5418 		 * Clean up code for freeing assoc that left behind a
5419 		 * pdapi.. maybe a peer in EEOR that just closed after
5420 		 * sending and never indicated a EOR.
5421 		 */
5422 		if (hold_rlock == 0) {
5423 			hold_rlock = 1;
5424 			SCTP_INP_READ_LOCK(inp);
5425 		}
5426 		control->held_length = 0;
5427 		if (control->data) {
5428 			/* Hmm there is data here .. fix */
5429 			struct mbuf *m_tmp;
5430 			int cnt = 0;
5431 
5432 			m_tmp = control->data;
5433 			while (m_tmp) {
5434 				cnt += SCTP_BUF_LEN(m_tmp);
5435 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5436 					control->tail_mbuf = m_tmp;
5437 					control->end_added = 1;
5438 				}
5439 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5440 			}
5441 			control->length = cnt;
5442 		} else {
5443 			/* remove it */
5444 			TAILQ_REMOVE(&inp->read_queue, control, next);
5445 			/* Add back any hiddend data */
5446 			sctp_free_remote_addr(control->whoFrom);
5447 			sctp_free_a_readq(stcb, control);
5448 		}
5449 		if (hold_rlock) {
5450 			hold_rlock = 0;
5451 			SCTP_INP_READ_UNLOCK(inp);
5452 		}
5453 		goto restart;
5454 	}
5455 	if ((control->length == 0) &&
5456 	    (control->end_added == 1)) {
5457 		/*
5458 		 * Do we also need to check for (control->pdapi_aborted ==
5459 		 * 1)?
5460 		 */
5461 		if (hold_rlock == 0) {
5462 			hold_rlock = 1;
5463 			SCTP_INP_READ_LOCK(inp);
5464 		}
5465 		TAILQ_REMOVE(&inp->read_queue, control, next);
5466 		if (control->data) {
5467 #ifdef INVARIANTS
5468 			panic("control->data not null but control->length == 0");
5469 #else
5470 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5471 			sctp_m_freem(control->data);
5472 			control->data = NULL;
5473 #endif
5474 		}
5475 		if (control->aux_data) {
5476 			sctp_m_free(control->aux_data);
5477 			control->aux_data = NULL;
5478 		}
5479 		sctp_free_remote_addr(control->whoFrom);
5480 		sctp_free_a_readq(stcb, control);
5481 		if (hold_rlock) {
5482 			hold_rlock = 0;
5483 			SCTP_INP_READ_UNLOCK(inp);
5484 		}
5485 		goto restart;
5486 	}
5487 	if (control->length == 0) {
5488 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5489 		    (filling_sinfo)) {
5490 			/* find a more suitable one then this */
5491 			ctl = TAILQ_NEXT(control, next);
5492 			while (ctl) {
5493 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5494 				    (ctl->some_taken ||
5495 				    (ctl->spec_flags & M_NOTIFICATION) ||
5496 				    ((ctl->do_not_ref_stcb == 0) &&
5497 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5498 				    ) {
5499 					/*-
5500 					 * If we have a different TCB next, and there is data
5501 					 * present. If we have already taken some (pdapi), OR we can
5502 					 * ref the tcb and no delivery as started on this stream, we
5503 					 * take it. Note we allow a notification on a different
5504 					 * assoc to be delivered..
5505 					 */
5506 					control = ctl;
5507 					goto found_one;
5508 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5509 					    (ctl->length) &&
5510 					    ((ctl->some_taken) ||
5511 					    ((ctl->do_not_ref_stcb == 0) &&
5512 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5513 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5514 					/*-
5515 					 * If we have the same tcb, and there is data present, and we
5516 					 * have the strm interleave feature present. Then if we have
5517 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5518 					 * not started a delivery for this stream, we can take it.
5519 					 * Note we do NOT allow a notificaiton on the same assoc to
5520 					 * be delivered.
5521 					 */
5522 					control = ctl;
5523 					goto found_one;
5524 				}
5525 				ctl = TAILQ_NEXT(ctl, next);
5526 			}
5527 		}
5528 		/*
5529 		 * if we reach here, not suitable replacement is available
5530 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5531 		 * into the our held count, and its time to sleep again.
5532 		 */
5533 		held_length = so->so_rcv.sb_cc;
5534 		control->held_length = so->so_rcv.sb_cc;
5535 		goto restart;
5536 	}
5537 	/* Clear the held length since there is something to read */
5538 	control->held_length = 0;
5539 	if (hold_rlock) {
5540 		SCTP_INP_READ_UNLOCK(inp);
5541 		hold_rlock = 0;
5542 	}
5543 found_one:
5544 	/*
5545 	 * If we reach here, control has a some data for us to read off.
5546 	 * Note that stcb COULD be NULL.
5547 	 */
5548 	control->some_taken++;
5549 	if (hold_sblock) {
5550 		SOCKBUF_UNLOCK(&so->so_rcv);
5551 		hold_sblock = 0;
5552 	}
5553 	stcb = control->stcb;
5554 	if (stcb) {
5555 		if ((control->do_not_ref_stcb == 0) &&
5556 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5557 			if (freecnt_applied == 0)
5558 				stcb = NULL;
5559 		} else if (control->do_not_ref_stcb == 0) {
5560 			/* you can't free it on me please */
5561 			/*
5562 			 * The lock on the socket buffer protects us so the
5563 			 * free code will stop. But since we used the
5564 			 * socketbuf lock and the sender uses the tcb_lock
5565 			 * to increment, we need to use the atomic add to
5566 			 * the refcnt
5567 			 */
5568 			if (freecnt_applied) {
5569 #ifdef INVARIANTS
5570 				panic("refcnt already incremented");
5571 #else
5572 				SCTP_PRINTF("refcnt already incremented?\n");
5573 #endif
5574 			} else {
5575 				atomic_add_int(&stcb->asoc.refcnt, 1);
5576 				freecnt_applied = 1;
5577 			}
5578 			/*
5579 			 * Setup to remember how much we have not yet told
5580 			 * the peer our rwnd has opened up. Note we grab the
5581 			 * value from the tcb from last time. Note too that
5582 			 * sack sending clears this when a sack is sent,
5583 			 * which is fine. Once we hit the rwnd_req, we then
5584 			 * will go to the sctp_user_rcvd() that will not
5585 			 * lock until it KNOWs it MUST send a WUP-SACK.
5586 			 */
5587 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5588 			stcb->freed_by_sorcv_sincelast = 0;
5589 		}
5590 	}
5591 	if (stcb &&
5592 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5593 	    control->do_not_ref_stcb == 0) {
5594 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5595 	}
5596 	/* First lets get off the sinfo and sockaddr info */
5597 	if ((sinfo) && filling_sinfo) {
5598 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5599 		nxt = TAILQ_NEXT(control, next);
5600 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5601 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5602 			struct sctp_extrcvinfo *s_extra;
5603 
5604 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5605 			if ((nxt) &&
5606 			    (nxt->length)) {
5607 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5608 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5609 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5610 				}
5611 				if (nxt->spec_flags & M_NOTIFICATION) {
5612 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5613 				}
5614 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5615 				s_extra->sreinfo_next_length = nxt->length;
5616 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5617 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5618 				if (nxt->tail_mbuf != NULL) {
5619 					if (nxt->end_added) {
5620 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5621 					}
5622 				}
5623 			} else {
5624 				/*
5625 				 * we explicitly 0 this, since the memcpy
5626 				 * got some other things beyond the older
5627 				 * sinfo_ that is on the control's structure
5628 				 * :-D
5629 				 */
5630 				nxt = NULL;
5631 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5632 				s_extra->sreinfo_next_aid = 0;
5633 				s_extra->sreinfo_next_length = 0;
5634 				s_extra->sreinfo_next_ppid = 0;
5635 				s_extra->sreinfo_next_stream = 0;
5636 			}
5637 		}
5638 		/*
5639 		 * update off the real current cum-ack, if we have an stcb.
5640 		 */
5641 		if ((control->do_not_ref_stcb == 0) && stcb)
5642 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5643 		/*
5644 		 * mask off the high bits, we keep the actual chunk bits in
5645 		 * there.
5646 		 */
5647 		sinfo->sinfo_flags &= 0x00ff;
5648 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5649 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5650 		}
5651 	}
5652 #ifdef SCTP_ASOCLOG_OF_TSNS
5653 	{
5654 		int index, newindex;
5655 		struct sctp_pcbtsn_rlog *entry;
5656 
5657 		do {
5658 			index = inp->readlog_index;
5659 			newindex = index + 1;
5660 			if (newindex >= SCTP_READ_LOG_SIZE) {
5661 				newindex = 0;
5662 			}
5663 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5664 		entry = &inp->readlog[index];
5665 		entry->vtag = control->sinfo_assoc_id;
5666 		entry->strm = control->sinfo_stream;
5667 		entry->seq = control->sinfo_ssn;
5668 		entry->sz = control->length;
5669 		entry->flgs = control->sinfo_flags;
5670 	}
5671 #endif
5672 	if ((fromlen > 0) && (from != NULL)) {
5673 		union sctp_sockstore store;
5674 		size_t len;
5675 
5676 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5677 #ifdef INET6
5678 		case AF_INET6:
5679 			len = sizeof(struct sockaddr_in6);
5680 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5681 			store.sin6.sin6_port = control->port_from;
5682 			break;
5683 #endif
5684 #ifdef INET
5685 		case AF_INET:
5686 #ifdef INET6
5687 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5688 				len = sizeof(struct sockaddr_in6);
5689 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5690 				    &store.sin6);
5691 				store.sin6.sin6_port = control->port_from;
5692 			} else {
5693 				len = sizeof(struct sockaddr_in);
5694 				store.sin = control->whoFrom->ro._l_addr.sin;
5695 				store.sin.sin_port = control->port_from;
5696 			}
5697 #else
5698 			len = sizeof(struct sockaddr_in);
5699 			store.sin = control->whoFrom->ro._l_addr.sin;
5700 			store.sin.sin_port = control->port_from;
5701 #endif
5702 			break;
5703 #endif
5704 		default:
5705 			len = 0;
5706 			break;
5707 		}
5708 		memcpy(from, &store, min((size_t)fromlen, len));
5709 #ifdef INET6
5710 		{
5711 			struct sockaddr_in6 lsa6, *from6;
5712 
5713 			from6 = (struct sockaddr_in6 *)from;
5714 			sctp_recover_scope_mac(from6, (&lsa6));
5715 		}
5716 #endif
5717 	}
5718 	/* now copy out what data we can */
5719 	if (mp == NULL) {
5720 		/* copy out each mbuf in the chain up to length */
5721 get_more_data:
5722 		m = control->data;
5723 		while (m) {
5724 			/* Move out all we can */
5725 			cp_len = (int)uio->uio_resid;
5726 			my_len = (int)SCTP_BUF_LEN(m);
5727 			if (cp_len > my_len) {
5728 				/* not enough in this buf */
5729 				cp_len = my_len;
5730 			}
5731 			if (hold_rlock) {
5732 				SCTP_INP_READ_UNLOCK(inp);
5733 				hold_rlock = 0;
5734 			}
5735 			if (cp_len > 0)
5736 				error = uiomove(mtod(m, char *), cp_len, uio);
5737 			/* re-read */
5738 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5739 				goto release;
5740 			}
5741 			if ((control->do_not_ref_stcb == 0) && stcb &&
5742 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5743 				no_rcv_needed = 1;
5744 			}
5745 			if (error) {
5746 				/* error we are out of here */
5747 				goto release;
5748 			}
5749 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5750 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5751 			    ((control->end_added == 0) ||
5752 			    (control->end_added &&
5753 			    (TAILQ_NEXT(control, next) == NULL)))
5754 			    ) {
5755 				SCTP_INP_READ_LOCK(inp);
5756 				hold_rlock = 1;
5757 			}
5758 			if (cp_len == SCTP_BUF_LEN(m)) {
5759 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5760 				    (control->end_added)) {
5761 					out_flags |= MSG_EOR;
5762 					if ((control->do_not_ref_stcb == 0) &&
5763 					    (control->stcb != NULL) &&
5764 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5765 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5766 				}
5767 				if (control->spec_flags & M_NOTIFICATION) {
5768 					out_flags |= MSG_NOTIFICATION;
5769 				}
5770 				/* we ate up the mbuf */
5771 				if (in_flags & MSG_PEEK) {
5772 					/* just looking */
5773 					m = SCTP_BUF_NEXT(m);
5774 					copied_so_far += cp_len;
5775 				} else {
5776 					/* dispose of the mbuf */
5777 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5778 						sctp_sblog(&so->so_rcv,
5779 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5780 					}
5781 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5782 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5783 						sctp_sblog(&so->so_rcv,
5784 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5785 					}
5786 					copied_so_far += cp_len;
5787 					freed_so_far += cp_len;
5788 					freed_so_far += MSIZE;
5789 					atomic_subtract_int(&control->length, cp_len);
5790 					control->data = sctp_m_free(m);
5791 					m = control->data;
5792 					/*
5793 					 * been through it all, must hold sb
5794 					 * lock ok to null tail
5795 					 */
5796 					if (control->data == NULL) {
5797 #ifdef INVARIANTS
5798 						if ((control->end_added == 0) ||
5799 						    (TAILQ_NEXT(control, next) == NULL)) {
5800 							/*
5801 							 * If the end is not
5802 							 * added, OR the
5803 							 * next is NOT null
5804 							 * we MUST have the
5805 							 * lock.
5806 							 */
5807 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5808 								panic("Hmm we don't own the lock?");
5809 							}
5810 						}
5811 #endif
5812 						control->tail_mbuf = NULL;
5813 #ifdef INVARIANTS
5814 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5815 							panic("end_added, nothing left and no MSG_EOR");
5816 						}
5817 #endif
5818 					}
5819 				}
5820 			} else {
5821 				/* Do we need to trim the mbuf? */
5822 				if (control->spec_flags & M_NOTIFICATION) {
5823 					out_flags |= MSG_NOTIFICATION;
5824 				}
5825 				if ((in_flags & MSG_PEEK) == 0) {
5826 					SCTP_BUF_RESV_UF(m, cp_len);
5827 					SCTP_BUF_LEN(m) -= cp_len;
5828 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5829 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5830 					}
5831 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5832 					if ((control->do_not_ref_stcb == 0) &&
5833 					    stcb) {
5834 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5835 					}
5836 					copied_so_far += cp_len;
5837 					freed_so_far += cp_len;
5838 					freed_so_far += MSIZE;
5839 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5840 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5841 						    SCTP_LOG_SBRESULT, 0);
5842 					}
5843 					atomic_subtract_int(&control->length, cp_len);
5844 				} else {
5845 					copied_so_far += cp_len;
5846 				}
5847 			}
5848 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5849 				break;
5850 			}
5851 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5852 			    (control->do_not_ref_stcb == 0) &&
5853 			    (freed_so_far >= rwnd_req)) {
5854 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5855 			}
5856 		}		/* end while(m) */
5857 		/*
5858 		 * At this point we have looked at it all and we either have
5859 		 * a MSG_EOR/or read all the user wants... <OR>
5860 		 * control->length == 0.
5861 		 */
5862 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5863 			/* we are done with this control */
5864 			if (control->length == 0) {
5865 				if (control->data) {
5866 #ifdef INVARIANTS
5867 					panic("control->data not null at read eor?");
5868 #else
5869 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5870 					sctp_m_freem(control->data);
5871 					control->data = NULL;
5872 #endif
5873 				}
5874 		done_with_control:
5875 				if (TAILQ_NEXT(control, next) == NULL) {
5876 					/*
5877 					 * If we don't have a next we need a
5878 					 * lock, if there is a next
5879 					 * interrupt is filling ahead of us
5880 					 * and we don't need a lock to
5881 					 * remove this guy (which is the
5882 					 * head of the queue).
5883 					 */
5884 					if (hold_rlock == 0) {
5885 						SCTP_INP_READ_LOCK(inp);
5886 						hold_rlock = 1;
5887 					}
5888 				}
5889 				TAILQ_REMOVE(&inp->read_queue, control, next);
5890 				/* Add back any hiddend data */
5891 				if (control->held_length) {
5892 					held_length = 0;
5893 					control->held_length = 0;
5894 					wakeup_read_socket = 1;
5895 				}
5896 				if (control->aux_data) {
5897 					sctp_m_free(control->aux_data);
5898 					control->aux_data = NULL;
5899 				}
5900 				no_rcv_needed = control->do_not_ref_stcb;
5901 				sctp_free_remote_addr(control->whoFrom);
5902 				control->data = NULL;
5903 				sctp_free_a_readq(stcb, control);
5904 				control = NULL;
5905 				if ((freed_so_far >= rwnd_req) &&
5906 				    (no_rcv_needed == 0))
5907 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5908 
5909 			} else {
5910 				/*
5911 				 * The user did not read all of this
5912 				 * message, turn off the returned MSG_EOR
5913 				 * since we are leaving more behind on the
5914 				 * control to read.
5915 				 */
5916 #ifdef INVARIANTS
5917 				if (control->end_added &&
5918 				    (control->data == NULL) &&
5919 				    (control->tail_mbuf == NULL)) {
5920 					panic("Gak, control->length is corrupt?");
5921 				}
5922 #endif
5923 				no_rcv_needed = control->do_not_ref_stcb;
5924 				out_flags &= ~MSG_EOR;
5925 			}
5926 		}
5927 		if (out_flags & MSG_EOR) {
5928 			goto release;
5929 		}
5930 		if ((uio->uio_resid == 0) ||
5931 		    ((in_eeor_mode) &&
5932 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5933 			goto release;
5934 		}
5935 		/*
5936 		 * If I hit here the receiver wants more and this message is
5937 		 * NOT done (pd-api). So two questions. Can we block? if not
5938 		 * we are done. Did the user NOT set MSG_WAITALL?
5939 		 */
5940 		if (block_allowed == 0) {
5941 			goto release;
5942 		}
5943 		/*
5944 		 * We need to wait for more data a few things: - We don't
5945 		 * sbunlock() so we don't get someone else reading. - We
5946 		 * must be sure to account for the case where what is added
5947 		 * is NOT to our control when we wakeup.
5948 		 */
5949 
5950 		/*
5951 		 * Do we need to tell the transport a rwnd update might be
5952 		 * needed before we go to sleep?
5953 		 */
5954 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5955 		    ((freed_so_far >= rwnd_req) &&
5956 		    (control->do_not_ref_stcb == 0) &&
5957 		    (no_rcv_needed == 0))) {
5958 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5959 		}
5960 wait_some_more:
5961 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5962 			goto release;
5963 		}
5964 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5965 			goto release;
5966 
5967 		if (hold_rlock == 1) {
5968 			SCTP_INP_READ_UNLOCK(inp);
5969 			hold_rlock = 0;
5970 		}
5971 		if (hold_sblock == 0) {
5972 			SOCKBUF_LOCK(&so->so_rcv);
5973 			hold_sblock = 1;
5974 		}
5975 		if ((copied_so_far) && (control->length == 0) &&
5976 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5977 			goto release;
5978 		}
5979 		if (so->so_rcv.sb_cc <= control->held_length) {
5980 			error = sbwait(&so->so_rcv);
5981 			if (error) {
5982 				goto release;
5983 			}
5984 			control->held_length = 0;
5985 		}
5986 		if (hold_sblock) {
5987 			SOCKBUF_UNLOCK(&so->so_rcv);
5988 			hold_sblock = 0;
5989 		}
5990 		if (control->length == 0) {
5991 			/* still nothing here */
5992 			if (control->end_added == 1) {
5993 				/* he aborted, or is done i.e.did a shutdown */
5994 				out_flags |= MSG_EOR;
5995 				if (control->pdapi_aborted) {
5996 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5997 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5998 
5999 					out_flags |= MSG_TRUNC;
6000 				} else {
6001 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6002 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6003 				}
6004 				goto done_with_control;
6005 			}
6006 			if (so->so_rcv.sb_cc > held_length) {
6007 				control->held_length = so->so_rcv.sb_cc;
6008 				held_length = 0;
6009 			}
6010 			goto wait_some_more;
6011 		} else if (control->data == NULL) {
6012 			/*
6013 			 * we must re-sync since data is probably being
6014 			 * added
6015 			 */
6016 			SCTP_INP_READ_LOCK(inp);
6017 			if ((control->length > 0) && (control->data == NULL)) {
6018 				/*
6019 				 * big trouble.. we have the lock and its
6020 				 * corrupt?
6021 				 */
6022 #ifdef INVARIANTS
6023 				panic("Impossible data==NULL length !=0");
6024 #endif
6025 				out_flags |= MSG_EOR;
6026 				out_flags |= MSG_TRUNC;
6027 				control->length = 0;
6028 				SCTP_INP_READ_UNLOCK(inp);
6029 				goto done_with_control;
6030 			}
6031 			SCTP_INP_READ_UNLOCK(inp);
6032 			/* We will fall around to get more data */
6033 		}
6034 		goto get_more_data;
6035 	} else {
6036 		/*-
6037 		 * Give caller back the mbuf chain,
6038 		 * store in uio_resid the length
6039 		 */
6040 		wakeup_read_socket = 0;
6041 		if ((control->end_added == 0) ||
6042 		    (TAILQ_NEXT(control, next) == NULL)) {
6043 			/* Need to get rlock */
6044 			if (hold_rlock == 0) {
6045 				SCTP_INP_READ_LOCK(inp);
6046 				hold_rlock = 1;
6047 			}
6048 		}
6049 		if (control->end_added) {
6050 			out_flags |= MSG_EOR;
6051 			if ((control->do_not_ref_stcb == 0) &&
6052 			    (control->stcb != NULL) &&
6053 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6054 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6055 		}
6056 		if (control->spec_flags & M_NOTIFICATION) {
6057 			out_flags |= MSG_NOTIFICATION;
6058 		}
6059 		uio->uio_resid = control->length;
6060 		*mp = control->data;
6061 		m = control->data;
6062 		while (m) {
6063 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6064 				sctp_sblog(&so->so_rcv,
6065 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6066 			}
6067 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6068 			freed_so_far += SCTP_BUF_LEN(m);
6069 			freed_so_far += MSIZE;
6070 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6071 				sctp_sblog(&so->so_rcv,
6072 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6073 			}
6074 			m = SCTP_BUF_NEXT(m);
6075 		}
6076 		control->data = control->tail_mbuf = NULL;
6077 		control->length = 0;
6078 		if (out_flags & MSG_EOR) {
6079 			/* Done with this control */
6080 			goto done_with_control;
6081 		}
6082 	}
6083 release:
6084 	if (hold_rlock == 1) {
6085 		SCTP_INP_READ_UNLOCK(inp);
6086 		hold_rlock = 0;
6087 	}
6088 	if (hold_sblock == 1) {
6089 		SOCKBUF_UNLOCK(&so->so_rcv);
6090 		hold_sblock = 0;
6091 	}
6092 	sbunlock(&so->so_rcv);
6093 	sockbuf_lock = 0;
6094 
6095 release_unlocked:
6096 	if (hold_sblock) {
6097 		SOCKBUF_UNLOCK(&so->so_rcv);
6098 		hold_sblock = 0;
6099 	}
6100 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6101 		if ((freed_so_far >= rwnd_req) &&
6102 		    (control && (control->do_not_ref_stcb == 0)) &&
6103 		    (no_rcv_needed == 0))
6104 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6105 	}
6106 out:
6107 	if (msg_flags) {
6108 		*msg_flags = out_flags;
6109 	}
6110 	if (((out_flags & MSG_EOR) == 0) &&
6111 	    ((in_flags & MSG_PEEK) == 0) &&
6112 	    (sinfo) &&
6113 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6114 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6115 		struct sctp_extrcvinfo *s_extra;
6116 
6117 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6118 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6119 	}
6120 	if (hold_rlock == 1) {
6121 		SCTP_INP_READ_UNLOCK(inp);
6122 	}
6123 	if (hold_sblock) {
6124 		SOCKBUF_UNLOCK(&so->so_rcv);
6125 	}
6126 	if (sockbuf_lock) {
6127 		sbunlock(&so->so_rcv);
6128 	}
6129 	if (freecnt_applied) {
6130 		/*
6131 		 * The lock on the socket buffer protects us so the free
6132 		 * code will stop. But since we used the socketbuf lock and
6133 		 * the sender uses the tcb_lock to increment, we need to use
6134 		 * the atomic add to the refcnt.
6135 		 */
6136 		if (stcb == NULL) {
6137 #ifdef INVARIANTS
6138 			panic("stcb for refcnt has gone NULL?");
6139 			goto stage_left;
6140 #else
6141 			goto stage_left;
6142 #endif
6143 		}
6144 		atomic_add_int(&stcb->asoc.refcnt, -1);
6145 		/* Save the value back for next time */
6146 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6147 	}
6148 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6149 		if (stcb) {
6150 			sctp_misc_ints(SCTP_SORECV_DONE,
6151 			    freed_so_far,
6152 			    ((uio) ? (slen - uio->uio_resid) : slen),
6153 			    stcb->asoc.my_rwnd,
6154 			    so->so_rcv.sb_cc);
6155 		} else {
6156 			sctp_misc_ints(SCTP_SORECV_DONE,
6157 			    freed_so_far,
6158 			    ((uio) ? (slen - uio->uio_resid) : slen),
6159 			    0,
6160 			    so->so_rcv.sb_cc);
6161 		}
6162 	}
6163 stage_left:
6164 	if (wakeup_read_socket) {
6165 		sctp_sorwakeup(inp, so);
6166 	}
6167 	return (error);
6168 }
6169 
6170 
6171 #ifdef SCTP_MBUF_LOGGING
6172 struct mbuf *
6173 sctp_m_free(struct mbuf *m)
6174 {
6175 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6176 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6177 	}
6178 	return (m_free(m));
6179 }
6180 
6181 void
6182 sctp_m_freem(struct mbuf *mb)
6183 {
6184 	while (mb != NULL)
6185 		mb = sctp_m_free(mb);
6186 }
6187 
6188 #endif
6189 
6190 int
6191 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6192 {
6193 	/*
6194 	 * Given a local address. For all associations that holds the
6195 	 * address, request a peer-set-primary.
6196 	 */
6197 	struct sctp_ifa *ifa;
6198 	struct sctp_laddr *wi;
6199 
6200 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6201 	if (ifa == NULL) {
6202 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6203 		return (EADDRNOTAVAIL);
6204 	}
6205 	/*
6206 	 * Now that we have the ifa we must awaken the iterator with this
6207 	 * message.
6208 	 */
6209 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6210 	if (wi == NULL) {
6211 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6212 		return (ENOMEM);
6213 	}
6214 	/* Now incr the count and int wi structure */
6215 	SCTP_INCR_LADDR_COUNT();
6216 	bzero(wi, sizeof(*wi));
6217 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6218 	wi->ifa = ifa;
6219 	wi->action = SCTP_SET_PRIM_ADDR;
6220 	atomic_add_int(&ifa->refcount, 1);
6221 
6222 	/* Now add it to the work queue */
6223 	SCTP_WQ_ADDR_LOCK();
6224 	/*
6225 	 * Should this really be a tailq? As it is we will process the
6226 	 * newest first :-0
6227 	 */
6228 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6229 	SCTP_WQ_ADDR_UNLOCK();
6230 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6231 	    (struct sctp_inpcb *)NULL,
6232 	    (struct sctp_tcb *)NULL,
6233 	    (struct sctp_nets *)NULL);
6234 	return (0);
6235 }
6236 
6237 
6238 int
6239 sctp_soreceive(struct socket *so,
6240     struct sockaddr **psa,
6241     struct uio *uio,
6242     struct mbuf **mp0,
6243     struct mbuf **controlp,
6244     int *flagsp)
6245 {
6246 	int error, fromlen;
6247 	uint8_t sockbuf[256];
6248 	struct sockaddr *from;
6249 	struct sctp_extrcvinfo sinfo;
6250 	int filling_sinfo = 1;
6251 	struct sctp_inpcb *inp;
6252 
6253 	inp = (struct sctp_inpcb *)so->so_pcb;
6254 	/* pickup the assoc we are reading from */
6255 	if (inp == NULL) {
6256 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6257 		return (EINVAL);
6258 	}
6259 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6260 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6261 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6262 	    (controlp == NULL)) {
6263 		/* user does not want the sndrcv ctl */
6264 		filling_sinfo = 0;
6265 	}
6266 	if (psa) {
6267 		from = (struct sockaddr *)sockbuf;
6268 		fromlen = sizeof(sockbuf);
6269 		from->sa_len = 0;
6270 	} else {
6271 		from = NULL;
6272 		fromlen = 0;
6273 	}
6274 
6275 	if (filling_sinfo) {
6276 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6277 	}
6278 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6279 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6280 	if (controlp != NULL) {
6281 		/* copy back the sinfo in a CMSG format */
6282 		if (filling_sinfo)
6283 			*controlp = sctp_build_ctl_nchunk(inp,
6284 			    (struct sctp_sndrcvinfo *)&sinfo);
6285 		else
6286 			*controlp = NULL;
6287 	}
6288 	if (psa) {
6289 		/* copy back the address info */
6290 		if (from && from->sa_len) {
6291 			*psa = sodupsockaddr(from, M_NOWAIT);
6292 		} else {
6293 			*psa = NULL;
6294 		}
6295 	}
6296 	return (error);
6297 }
6298 
6299 
6300 
6301 
6302 
6303 int
6304 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6305     int totaddr, int *error)
6306 {
6307 	int added = 0;
6308 	int i;
6309 	struct sctp_inpcb *inp;
6310 	struct sockaddr *sa;
6311 	size_t incr = 0;
6312 
6313 #ifdef INET
6314 	struct sockaddr_in *sin;
6315 
6316 #endif
6317 #ifdef INET6
6318 	struct sockaddr_in6 *sin6;
6319 
6320 #endif
6321 
6322 	sa = addr;
6323 	inp = stcb->sctp_ep;
6324 	*error = 0;
6325 	for (i = 0; i < totaddr; i++) {
6326 		switch (sa->sa_family) {
6327 #ifdef INET
6328 		case AF_INET:
6329 			incr = sizeof(struct sockaddr_in);
6330 			sin = (struct sockaddr_in *)sa;
6331 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6332 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6333 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6334 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6335 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6336 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6337 				*error = EINVAL;
6338 				goto out_now;
6339 			}
6340 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6341 				/* assoc gone no un-lock */
6342 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6343 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6344 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6345 				*error = ENOBUFS;
6346 				goto out_now;
6347 			}
6348 			added++;
6349 			break;
6350 #endif
6351 #ifdef INET6
6352 		case AF_INET6:
6353 			incr = sizeof(struct sockaddr_in6);
6354 			sin6 = (struct sockaddr_in6 *)sa;
6355 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6356 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6357 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6358 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6359 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6360 				*error = EINVAL;
6361 				goto out_now;
6362 			}
6363 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6364 				/* assoc gone no un-lock */
6365 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6366 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6367 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6368 				*error = ENOBUFS;
6369 				goto out_now;
6370 			}
6371 			added++;
6372 			break;
6373 #endif
6374 		default:
6375 			break;
6376 		}
6377 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6378 	}
6379 out_now:
6380 	return (added);
6381 }
6382 
6383 struct sctp_tcb *
6384 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6385     int *totaddr, int *num_v4, int *num_v6, int *error,
6386     int limit, int *bad_addr)
6387 {
6388 	struct sockaddr *sa;
6389 	struct sctp_tcb *stcb = NULL;
6390 	size_t incr, at, i;
6391 
6392 	at = incr = 0;
6393 	sa = addr;
6394 
6395 	*error = *num_v6 = *num_v4 = 0;
6396 	/* account and validate addresses */
6397 	for (i = 0; i < (size_t)*totaddr; i++) {
6398 		switch (sa->sa_family) {
6399 #ifdef INET
6400 		case AF_INET:
6401 			(*num_v4) += 1;
6402 			incr = sizeof(struct sockaddr_in);
6403 			if (sa->sa_len != incr) {
6404 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6405 				*error = EINVAL;
6406 				*bad_addr = 1;
6407 				return (NULL);
6408 			}
6409 			break;
6410 #endif
6411 #ifdef INET6
6412 		case AF_INET6:
6413 			{
6414 				struct sockaddr_in6 *sin6;
6415 
6416 				sin6 = (struct sockaddr_in6 *)sa;
6417 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6418 					/* Must be non-mapped for connectx */
6419 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6420 					*error = EINVAL;
6421 					*bad_addr = 1;
6422 					return (NULL);
6423 				}
6424 				(*num_v6) += 1;
6425 				incr = sizeof(struct sockaddr_in6);
6426 				if (sa->sa_len != incr) {
6427 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6428 					*error = EINVAL;
6429 					*bad_addr = 1;
6430 					return (NULL);
6431 				}
6432 				break;
6433 			}
6434 #endif
6435 		default:
6436 			*totaddr = i;
6437 			/* we are done */
6438 			break;
6439 		}
6440 		if (i == (size_t)*totaddr) {
6441 			break;
6442 		}
6443 		SCTP_INP_INCR_REF(inp);
6444 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6445 		if (stcb != NULL) {
6446 			/* Already have or am bring up an association */
6447 			return (stcb);
6448 		} else {
6449 			SCTP_INP_DECR_REF(inp);
6450 		}
6451 		if ((at + incr) > (size_t)limit) {
6452 			*totaddr = i;
6453 			break;
6454 		}
6455 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6456 	}
6457 	return ((struct sctp_tcb *)NULL);
6458 }
6459 
6460 /*
6461  * sctp_bindx(ADD) for one address.
6462  * assumes all arguments are valid/checked by caller.
6463  */
6464 void
6465 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6466     struct sockaddr *sa, sctp_assoc_t assoc_id,
6467     uint32_t vrf_id, int *error, void *p)
6468 {
6469 	struct sockaddr *addr_touse;
6470 
6471 #if defined(INET) && defined(INET6)
6472 	struct sockaddr_in sin;
6473 
6474 #endif
6475 
6476 	/* see if we're bound all already! */
6477 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6478 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479 		*error = EINVAL;
6480 		return;
6481 	}
6482 	addr_touse = sa;
6483 #ifdef INET6
6484 	if (sa->sa_family == AF_INET6) {
6485 #ifdef INET
6486 		struct sockaddr_in6 *sin6;
6487 
6488 #endif
6489 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6490 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6491 			*error = EINVAL;
6492 			return;
6493 		}
6494 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6495 			/* can only bind v6 on PF_INET6 sockets */
6496 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6497 			*error = EINVAL;
6498 			return;
6499 		}
6500 #ifdef INET
6501 		sin6 = (struct sockaddr_in6 *)addr_touse;
6502 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6503 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6504 			    SCTP_IPV6_V6ONLY(inp)) {
6505 				/* can't bind v4-mapped on PF_INET sockets */
6506 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6507 				*error = EINVAL;
6508 				return;
6509 			}
6510 			in6_sin6_2_sin(&sin, sin6);
6511 			addr_touse = (struct sockaddr *)&sin;
6512 		}
6513 #endif
6514 	}
6515 #endif
6516 #ifdef INET
6517 	if (sa->sa_family == AF_INET) {
6518 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6519 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6520 			*error = EINVAL;
6521 			return;
6522 		}
6523 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6524 		    SCTP_IPV6_V6ONLY(inp)) {
6525 			/* can't bind v4 on PF_INET sockets */
6526 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6527 			*error = EINVAL;
6528 			return;
6529 		}
6530 	}
6531 #endif
6532 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6533 		if (p == NULL) {
6534 			/* Can't get proc for Net/Open BSD */
6535 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6536 			*error = EINVAL;
6537 			return;
6538 		}
6539 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6540 		return;
6541 	}
6542 	/*
6543 	 * No locks required here since bind and mgmt_ep_sa all do their own
6544 	 * locking. If we do something for the FIX: below we may need to
6545 	 * lock in that case.
6546 	 */
6547 	if (assoc_id == 0) {
6548 		/* add the address */
6549 		struct sctp_inpcb *lep;
6550 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6551 
6552 		/* validate the incoming port */
6553 		if ((lsin->sin_port != 0) &&
6554 		    (lsin->sin_port != inp->sctp_lport)) {
6555 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6556 			*error = EINVAL;
6557 			return;
6558 		} else {
6559 			/* user specified 0 port, set it to existing port */
6560 			lsin->sin_port = inp->sctp_lport;
6561 		}
6562 
6563 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6564 		if (lep != NULL) {
6565 			/*
6566 			 * We must decrement the refcount since we have the
6567 			 * ep already and are binding. No remove going on
6568 			 * here.
6569 			 */
6570 			SCTP_INP_DECR_REF(lep);
6571 		}
6572 		if (lep == inp) {
6573 			/* already bound to it.. ok */
6574 			return;
6575 		} else if (lep == NULL) {
6576 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6577 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6578 			    SCTP_ADD_IP_ADDRESS,
6579 			    vrf_id, NULL);
6580 		} else {
6581 			*error = EADDRINUSE;
6582 		}
6583 		if (*error)
6584 			return;
6585 	} else {
6586 		/*
6587 		 * FIX: decide whether we allow assoc based bindx
6588 		 */
6589 	}
6590 }
6591 
6592 /*
6593  * sctp_bindx(DELETE) for one address.
6594  * assumes all arguments are valid/checked by caller.
6595  */
6596 void
6597 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6598     struct sockaddr *sa, sctp_assoc_t assoc_id,
6599     uint32_t vrf_id, int *error)
6600 {
6601 	struct sockaddr *addr_touse;
6602 
6603 #if defined(INET) && defined(INET6)
6604 	struct sockaddr_in sin;
6605 
6606 #endif
6607 
6608 	/* see if we're bound all already! */
6609 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6610 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6611 		*error = EINVAL;
6612 		return;
6613 	}
6614 	addr_touse = sa;
6615 #ifdef INET6
6616 	if (sa->sa_family == AF_INET6) {
6617 #ifdef INET
6618 		struct sockaddr_in6 *sin6;
6619 
6620 #endif
6621 
6622 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6623 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6624 			*error = EINVAL;
6625 			return;
6626 		}
6627 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6628 			/* can only bind v6 on PF_INET6 sockets */
6629 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6630 			*error = EINVAL;
6631 			return;
6632 		}
6633 #ifdef INET
6634 		sin6 = (struct sockaddr_in6 *)addr_touse;
6635 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6636 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6637 			    SCTP_IPV6_V6ONLY(inp)) {
6638 				/* can't bind mapped-v4 on PF_INET sockets */
6639 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6640 				*error = EINVAL;
6641 				return;
6642 			}
6643 			in6_sin6_2_sin(&sin, sin6);
6644 			addr_touse = (struct sockaddr *)&sin;
6645 		}
6646 #endif
6647 	}
6648 #endif
6649 #ifdef INET
6650 	if (sa->sa_family == AF_INET) {
6651 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6652 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6653 			*error = EINVAL;
6654 			return;
6655 		}
6656 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6657 		    SCTP_IPV6_V6ONLY(inp)) {
6658 			/* can't bind v4 on PF_INET sockets */
6659 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6660 			*error = EINVAL;
6661 			return;
6662 		}
6663 	}
6664 #endif
6665 	/*
6666 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6667 	 * below is ever changed we may need to lock before calling
6668 	 * association level binding.
6669 	 */
6670 	if (assoc_id == 0) {
6671 		/* delete the address */
6672 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6673 		    SCTP_DEL_IP_ADDRESS,
6674 		    vrf_id, NULL);
6675 	} else {
6676 		/*
6677 		 * FIX: decide whether we allow assoc based bindx
6678 		 */
6679 	}
6680 }
6681 
6682 /*
6683  * returns the valid local address count for an assoc, taking into account
6684  * all scoping rules
6685  */
6686 int
6687 sctp_local_addr_count(struct sctp_tcb *stcb)
6688 {
6689 	int loopback_scope;
6690 
6691 #if defined(INET)
6692 	int ipv4_local_scope, ipv4_addr_legal;
6693 
6694 #endif
6695 #if defined (INET6)
6696 	int local_scope, site_scope, ipv6_addr_legal;
6697 
6698 #endif
6699 	struct sctp_vrf *vrf;
6700 	struct sctp_ifn *sctp_ifn;
6701 	struct sctp_ifa *sctp_ifa;
6702 	int count = 0;
6703 
6704 	/* Turn on all the appropriate scopes */
6705 	loopback_scope = stcb->asoc.scope.loopback_scope;
6706 #if defined(INET)
6707 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6708 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6709 #endif
6710 #if defined(INET6)
6711 	local_scope = stcb->asoc.scope.local_scope;
6712 	site_scope = stcb->asoc.scope.site_scope;
6713 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6714 #endif
6715 	SCTP_IPI_ADDR_RLOCK();
6716 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6717 	if (vrf == NULL) {
6718 		/* no vrf, no addresses */
6719 		SCTP_IPI_ADDR_RUNLOCK();
6720 		return (0);
6721 	}
6722 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6723 		/*
6724 		 * bound all case: go through all ifns on the vrf
6725 		 */
6726 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6727 			if ((loopback_scope == 0) &&
6728 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6729 				continue;
6730 			}
6731 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6732 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6733 					continue;
6734 				switch (sctp_ifa->address.sa.sa_family) {
6735 #ifdef INET
6736 				case AF_INET:
6737 					if (ipv4_addr_legal) {
6738 						struct sockaddr_in *sin;
6739 
6740 						sin = &sctp_ifa->address.sin;
6741 						if (sin->sin_addr.s_addr == 0) {
6742 							/*
6743 							 * skip unspecified
6744 							 * addrs
6745 							 */
6746 							continue;
6747 						}
6748 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6749 						    &sin->sin_addr) != 0) {
6750 							continue;
6751 						}
6752 						if ((ipv4_local_scope == 0) &&
6753 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6754 							continue;
6755 						}
6756 						/* count this one */
6757 						count++;
6758 					} else {
6759 						continue;
6760 					}
6761 					break;
6762 #endif
6763 #ifdef INET6
6764 				case AF_INET6:
6765 					if (ipv6_addr_legal) {
6766 						struct sockaddr_in6 *sin6;
6767 
6768 						sin6 = &sctp_ifa->address.sin6;
6769 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6770 							continue;
6771 						}
6772 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6773 						    &sin6->sin6_addr) != 0) {
6774 							continue;
6775 						}
6776 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6777 							if (local_scope == 0)
6778 								continue;
6779 							if (sin6->sin6_scope_id == 0) {
6780 								if (sa6_recoverscope(sin6) != 0)
6781 									/*
6782 									 *
6783 									 * bad
6784 									 *
6785 									 * li
6786 									 * nk
6787 									 *
6788 									 * loc
6789 									 * al
6790 									 *
6791 									 * add
6792 									 * re
6793 									 * ss
6794 									 * */
6795 									continue;
6796 							}
6797 						}
6798 						if ((site_scope == 0) &&
6799 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6800 							continue;
6801 						}
6802 						/* count this one */
6803 						count++;
6804 					}
6805 					break;
6806 #endif
6807 				default:
6808 					/* TSNH */
6809 					break;
6810 				}
6811 			}
6812 		}
6813 	} else {
6814 		/*
6815 		 * subset bound case
6816 		 */
6817 		struct sctp_laddr *laddr;
6818 
6819 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6820 		    sctp_nxt_addr) {
6821 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6822 				continue;
6823 			}
6824 			/* count this one */
6825 			count++;
6826 		}
6827 	}
6828 	SCTP_IPI_ADDR_RUNLOCK();
6829 	return (count);
6830 }
6831 
6832 #if defined(SCTP_LOCAL_TRACE_BUF)
6833 
6834 void
6835 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6836 {
6837 	uint32_t saveindex, newindex;
6838 
6839 	do {
6840 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6841 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6842 			newindex = 1;
6843 		} else {
6844 			newindex = saveindex + 1;
6845 		}
6846 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6847 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6848 		saveindex = 0;
6849 	}
6850 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6851 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6852 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6853 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6854 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6855 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6856 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6857 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6858 }
6859 
6860 #endif
6861 static void
6862 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6863     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6864 {
6865 	struct ip *iph;
6866 
6867 #ifdef INET6
6868 	struct ip6_hdr *ip6;
6869 
6870 #endif
6871 	struct mbuf *sp, *last;
6872 	struct udphdr *uhdr;
6873 	uint16_t port;
6874 
6875 	if ((m->m_flags & M_PKTHDR) == 0) {
6876 		/* Can't handle one that is not a pkt hdr */
6877 		goto out;
6878 	}
6879 	/* Pull the src port */
6880 	iph = mtod(m, struct ip *);
6881 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6882 	port = uhdr->uh_sport;
6883 	/*
6884 	 * Split out the mbuf chain. Leave the IP header in m, place the
6885 	 * rest in the sp.
6886 	 */
6887 	sp = m_split(m, off, M_NOWAIT);
6888 	if (sp == NULL) {
6889 		/* Gak, drop packet, we can't do a split */
6890 		goto out;
6891 	}
6892 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6893 		/* Gak, packet can't have an SCTP header in it - too small */
6894 		m_freem(sp);
6895 		goto out;
6896 	}
6897 	/* Now pull up the UDP header and SCTP header together */
6898 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6899 	if (sp == NULL) {
6900 		/* Gak pullup failed */
6901 		goto out;
6902 	}
6903 	/* Trim out the UDP header */
6904 	m_adj(sp, sizeof(struct udphdr));
6905 
6906 	/* Now reconstruct the mbuf chain */
6907 	for (last = m; last->m_next; last = last->m_next);
6908 	last->m_next = sp;
6909 	m->m_pkthdr.len += sp->m_pkthdr.len;
6910 	iph = mtod(m, struct ip *);
6911 	switch (iph->ip_v) {
6912 #ifdef INET
6913 	case IPVERSION:
6914 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6915 		sctp_input_with_port(m, off, port);
6916 		break;
6917 #endif
6918 #ifdef INET6
6919 	case IPV6_VERSION >> 4:
6920 		ip6 = mtod(m, struct ip6_hdr *);
6921 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6922 		sctp6_input_with_port(&m, &off, port);
6923 		break;
6924 #endif
6925 	default:
6926 		goto out;
6927 		break;
6928 	}
6929 	return;
6930 out:
6931 	m_freem(m);
6932 }
6933 
6934 void
6935 sctp_over_udp_stop(void)
6936 {
6937 	/*
6938 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6939 	 * for writting!
6940 	 */
6941 #ifdef INET
6942 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6943 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6944 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6945 	}
6946 #endif
6947 #ifdef INET6
6948 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6949 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6950 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6951 	}
6952 #endif
6953 }
6954 
6955 int
6956 sctp_over_udp_start(void)
6957 {
6958 	uint16_t port;
6959 	int ret;
6960 
6961 #ifdef INET
6962 	struct sockaddr_in sin;
6963 
6964 #endif
6965 #ifdef INET6
6966 	struct sockaddr_in6 sin6;
6967 
6968 #endif
6969 	/*
6970 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6971 	 * for writting!
6972 	 */
6973 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6974 	if (ntohs(port) == 0) {
6975 		/* Must have a port set */
6976 		return (EINVAL);
6977 	}
6978 #ifdef INET
6979 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6980 		/* Already running -- must stop first */
6981 		return (EALREADY);
6982 	}
6983 #endif
6984 #ifdef INET6
6985 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6986 		/* Already running -- must stop first */
6987 		return (EALREADY);
6988 	}
6989 #endif
6990 #ifdef INET
6991 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6992 	    SOCK_DGRAM, IPPROTO_UDP,
6993 	    curthread->td_ucred, curthread))) {
6994 		sctp_over_udp_stop();
6995 		return (ret);
6996 	}
6997 	/* Call the special UDP hook. */
6998 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6999 	    sctp_recv_udp_tunneled_packet, NULL))) {
7000 		sctp_over_udp_stop();
7001 		return (ret);
7002 	}
7003 	/* Ok, we have a socket, bind it to the port. */
7004 	memset(&sin, 0, sizeof(struct sockaddr_in));
7005 	sin.sin_len = sizeof(struct sockaddr_in);
7006 	sin.sin_family = AF_INET;
7007 	sin.sin_port = htons(port);
7008 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7009 	    (struct sockaddr *)&sin, curthread))) {
7010 		sctp_over_udp_stop();
7011 		return (ret);
7012 	}
7013 #endif
7014 #ifdef INET6
7015 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7016 	    SOCK_DGRAM, IPPROTO_UDP,
7017 	    curthread->td_ucred, curthread))) {
7018 		sctp_over_udp_stop();
7019 		return (ret);
7020 	}
7021 	/* Call the special UDP hook. */
7022 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7023 	    sctp_recv_udp_tunneled_packet, NULL))) {
7024 		sctp_over_udp_stop();
7025 		return (ret);
7026 	}
7027 	/* Ok, we have a socket, bind it to the port. */
7028 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7029 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7030 	sin6.sin6_family = AF_INET6;
7031 	sin6.sin6_port = htons(port);
7032 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7033 	    (struct sockaddr *)&sin6, curthread))) {
7034 		sctp_over_udp_stop();
7035 		return (ret);
7036 	}
7037 #endif
7038 	return (0);
7039 }
7040