xref: /freebsd/sys/netinet/sctputil.c (revision 119b75925c562202145d7bac7b676b98029c6cb9)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int
897 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
898     uint32_t override_tag, uint32_t vrf_id)
899 {
900 	struct sctp_association *asoc;
901 
902 	/*
903 	 * Anything set to zero is taken care of by the allocation routine's
904 	 * bzero
905 	 */
906 
907 	/*
908 	 * Up front select what scoping to apply on addresses I tell my peer
909 	 * Not sure what to do with these right now, we will need to come up
910 	 * with a way to set them. We may need to pass them through from the
911 	 * caller in the sctp_aloc_assoc() function.
912 	 */
913 	int i;
914 
915 #if defined(SCTP_DETAILED_STR_STATS)
916 	int j;
917 
918 #endif
919 
920 	asoc = &stcb->asoc;
921 	/* init all variables to a known value. */
922 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
923 	asoc->max_burst = inp->sctp_ep.max_burst;
924 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
925 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
926 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
927 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
928 	asoc->ecn_supported = inp->ecn_supported;
929 	asoc->prsctp_supported = inp->prsctp_supported;
930 	asoc->auth_supported = inp->auth_supported;
931 	asoc->asconf_supported = inp->asconf_supported;
932 	asoc->reconfig_supported = inp->reconfig_supported;
933 	asoc->nrsack_supported = inp->nrsack_supported;
934 	asoc->pktdrop_supported = inp->pktdrop_supported;
935 	asoc->sctp_cmt_pf = (uint8_t) 0;
936 	asoc->sctp_frag_point = inp->sctp_frag_point;
937 	asoc->sctp_features = inp->sctp_features;
938 	asoc->default_dscp = inp->sctp_ep.default_dscp;
939 	asoc->max_cwnd = inp->max_cwnd;
940 #ifdef INET6
941 	if (inp->sctp_ep.default_flowlabel) {
942 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
943 	} else {
944 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
945 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
946 			asoc->default_flowlabel &= 0x000fffff;
947 			asoc->default_flowlabel |= 0x80000000;
948 		} else {
949 			asoc->default_flowlabel = 0;
950 		}
951 	}
952 #endif
953 	asoc->sb_send_resv = 0;
954 	if (override_tag) {
955 		asoc->my_vtag = override_tag;
956 	} else {
957 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
958 	}
959 	/* Get the nonce tags */
960 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
961 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
962 	asoc->vrf_id = vrf_id;
963 
964 #ifdef SCTP_ASOCLOG_OF_TSNS
965 	asoc->tsn_in_at = 0;
966 	asoc->tsn_out_at = 0;
967 	asoc->tsn_in_wrapped = 0;
968 	asoc->tsn_out_wrapped = 0;
969 	asoc->cumack_log_at = 0;
970 	asoc->cumack_log_atsnt = 0;
971 #endif
972 #ifdef SCTP_FS_SPEC_LOG
973 	asoc->fs_index = 0;
974 #endif
975 	asoc->refcnt = 0;
976 	asoc->assoc_up_sent = 0;
977 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
978 	    sctp_select_initial_TSN(&inp->sctp_ep);
979 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
980 	/* we are optimisitic here */
981 	asoc->peer_supports_nat = 0;
982 	asoc->sent_queue_retran_cnt = 0;
983 
984 	/* for CMT */
985 	asoc->last_net_cmt_send_started = NULL;
986 
987 	/* This will need to be adjusted */
988 	asoc->last_acked_seq = asoc->init_seq_number - 1;
989 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
990 	asoc->asconf_seq_in = asoc->last_acked_seq;
991 
992 	/* here we are different, we hold the next one we expect */
993 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
994 
995 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
996 	asoc->initial_rto = inp->sctp_ep.initial_rto;
997 
998 	asoc->max_init_times = inp->sctp_ep.max_init_times;
999 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1000 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1001 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1002 	asoc->free_chunk_cnt = 0;
1003 
1004 	asoc->iam_blocking = 0;
1005 	asoc->context = inp->sctp_context;
1006 	asoc->local_strreset_support = inp->local_strreset_support;
1007 	asoc->def_send = inp->def_send;
1008 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1009 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1010 	asoc->pr_sctp_cnt = 0;
1011 	asoc->total_output_queue_size = 0;
1012 
1013 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1014 		asoc->scope.ipv6_addr_legal = 1;
1015 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1016 			asoc->scope.ipv4_addr_legal = 1;
1017 		} else {
1018 			asoc->scope.ipv4_addr_legal = 0;
1019 		}
1020 	} else {
1021 		asoc->scope.ipv6_addr_legal = 0;
1022 		asoc->scope.ipv4_addr_legal = 1;
1023 	}
1024 
1025 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1026 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1027 
1028 	asoc->smallest_mtu = inp->sctp_frag_point;
1029 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1030 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1031 
1032 	asoc->locked_on_sending = NULL;
1033 	asoc->stream_locked_on = 0;
1034 	asoc->ecn_echo_cnt_onq = 0;
1035 	asoc->stream_locked = 0;
1036 
1037 	asoc->send_sack = 1;
1038 
1039 	LIST_INIT(&asoc->sctp_restricted_addrs);
1040 
1041 	TAILQ_INIT(&asoc->nets);
1042 	TAILQ_INIT(&asoc->pending_reply_queue);
1043 	TAILQ_INIT(&asoc->asconf_ack_sent);
1044 	/* Setup to fill the hb random cache at first HB */
1045 	asoc->hb_random_idx = 4;
1046 
1047 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1048 
1049 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1050 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1051 
1052 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1053 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1054 
1055 	/*
1056 	 * Now the stream parameters, here we allocate space for all streams
1057 	 * that we request by default.
1058 	 */
1059 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1060 	    inp->sctp_ep.pre_open_stream_count;
1061 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1062 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1063 	    SCTP_M_STRMO);
1064 	if (asoc->strmout == NULL) {
1065 		/* big trouble no memory */
1066 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1067 		return (ENOMEM);
1068 	}
1069 	for (i = 0; i < asoc->streamoutcnt; i++) {
1070 		/*
1071 		 * inbound side must be set to 0xffff, also NOTE when we get
1072 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1073 		 * count (streamoutcnt) but first check if we sent to any of
1074 		 * the upper streams that were dropped (if some were). Those
1075 		 * that were dropped must be notified to the upper layer as
1076 		 * failed to send.
1077 		 */
1078 		asoc->strmout[i].next_sequence_send = 0x0;
1079 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1080 		asoc->strmout[i].chunks_on_queues = 0;
1081 #if defined(SCTP_DETAILED_STR_STATS)
1082 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1083 			asoc->strmout[i].abandoned_sent[j] = 0;
1084 			asoc->strmout[i].abandoned_unsent[j] = 0;
1085 		}
1086 #else
1087 		asoc->strmout[i].abandoned_sent[0] = 0;
1088 		asoc->strmout[i].abandoned_unsent[0] = 0;
1089 #endif
1090 		asoc->strmout[i].stream_no = i;
1091 		asoc->strmout[i].last_msg_incomplete = 0;
1092 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1093 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1094 	}
1095 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1096 
1097 	/* Now the mapping array */
1098 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1099 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1100 	    SCTP_M_MAP);
1101 	if (asoc->mapping_array == NULL) {
1102 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1103 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1104 		return (ENOMEM);
1105 	}
1106 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1107 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1108 	    SCTP_M_MAP);
1109 	if (asoc->nr_mapping_array == NULL) {
1110 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1111 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1116 
1117 	/* Now the init of the other outqueues */
1118 	TAILQ_INIT(&asoc->free_chunks);
1119 	TAILQ_INIT(&asoc->control_send_queue);
1120 	TAILQ_INIT(&asoc->asconf_send_queue);
1121 	TAILQ_INIT(&asoc->send_queue);
1122 	TAILQ_INIT(&asoc->sent_queue);
1123 	TAILQ_INIT(&asoc->reasmqueue);
1124 	TAILQ_INIT(&asoc->resetHead);
1125 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1126 	TAILQ_INIT(&asoc->asconf_queue);
1127 	/* authentication fields */
1128 	asoc->authinfo.random = NULL;
1129 	asoc->authinfo.active_keyid = 0;
1130 	asoc->authinfo.assoc_key = NULL;
1131 	asoc->authinfo.assoc_keyid = 0;
1132 	asoc->authinfo.recv_key = NULL;
1133 	asoc->authinfo.recv_keyid = 0;
1134 	LIST_INIT(&asoc->shared_keys);
1135 	asoc->marked_retrans = 0;
1136 	asoc->port = inp->sctp_ep.port;
1137 	asoc->timoinit = 0;
1138 	asoc->timodata = 0;
1139 	asoc->timosack = 0;
1140 	asoc->timoshutdown = 0;
1141 	asoc->timoheartbeat = 0;
1142 	asoc->timocookie = 0;
1143 	asoc->timoshutdownack = 0;
1144 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1145 	asoc->discontinuity_time = asoc->start_time;
1146 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1147 		asoc->abandoned_unsent[i] = 0;
1148 		asoc->abandoned_sent[i] = 0;
1149 	}
1150 	/*
1151 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1152 	 * freed later when the association is freed.
1153 	 */
1154 	return (0);
1155 }
1156 
1157 void
1158 sctp_print_mapping_array(struct sctp_association *asoc)
1159 {
1160 	unsigned int i, limit;
1161 
1162 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1163 	    asoc->mapping_array_size,
1164 	    asoc->mapping_array_base_tsn,
1165 	    asoc->cumulative_tsn,
1166 	    asoc->highest_tsn_inside_map,
1167 	    asoc->highest_tsn_inside_nr_map);
1168 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1169 		if (asoc->mapping_array[limit - 1] != 0) {
1170 			break;
1171 		}
1172 	}
1173 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1174 	for (i = 0; i < limit; i++) {
1175 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1176 	}
1177 	if (limit % 16)
1178 		SCTP_PRINTF("\n");
1179 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1180 		if (asoc->nr_mapping_array[limit - 1]) {
1181 			break;
1182 		}
1183 	}
1184 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1185 	for (i = 0; i < limit; i++) {
1186 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1187 	}
1188 	if (limit % 16)
1189 		SCTP_PRINTF("\n");
1190 }
1191 
1192 int
1193 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1194 {
1195 	/* mapping array needs to grow */
1196 	uint8_t *new_array1, *new_array2;
1197 	uint32_t new_size;
1198 
1199 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1200 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1201 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1202 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1203 		/* can't get more, forget it */
1204 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1205 		if (new_array1) {
1206 			SCTP_FREE(new_array1, SCTP_M_MAP);
1207 		}
1208 		if (new_array2) {
1209 			SCTP_FREE(new_array2, SCTP_M_MAP);
1210 		}
1211 		return (-1);
1212 	}
1213 	memset(new_array1, 0, new_size);
1214 	memset(new_array2, 0, new_size);
1215 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1216 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1217 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1218 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1219 	asoc->mapping_array = new_array1;
1220 	asoc->nr_mapping_array = new_array2;
1221 	asoc->mapping_array_size = new_size;
1222 	return (0);
1223 }
1224 
1225 
1226 static void
1227 sctp_iterator_work(struct sctp_iterator *it)
1228 {
1229 	int iteration_count = 0;
1230 	int inp_skip = 0;
1231 	int first_in = 1;
1232 	struct sctp_inpcb *tinp;
1233 
1234 	SCTP_INP_INFO_RLOCK();
1235 	SCTP_ITERATOR_LOCK();
1236 	if (it->inp) {
1237 		SCTP_INP_RLOCK(it->inp);
1238 		SCTP_INP_DECR_REF(it->inp);
1239 	}
1240 	if (it->inp == NULL) {
1241 		/* iterator is complete */
1242 done_with_iterator:
1243 		SCTP_ITERATOR_UNLOCK();
1244 		SCTP_INP_INFO_RUNLOCK();
1245 		if (it->function_atend != NULL) {
1246 			(*it->function_atend) (it->pointer, it->val);
1247 		}
1248 		SCTP_FREE(it, SCTP_M_ITER);
1249 		return;
1250 	}
1251 select_a_new_ep:
1252 	if (first_in) {
1253 		first_in = 0;
1254 	} else {
1255 		SCTP_INP_RLOCK(it->inp);
1256 	}
1257 	while (((it->pcb_flags) &&
1258 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1259 	    ((it->pcb_features) &&
1260 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1261 		/* endpoint flags or features don't match, so keep looking */
1262 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1263 			SCTP_INP_RUNLOCK(it->inp);
1264 			goto done_with_iterator;
1265 		}
1266 		tinp = it->inp;
1267 		it->inp = LIST_NEXT(it->inp, sctp_list);
1268 		SCTP_INP_RUNLOCK(tinp);
1269 		if (it->inp == NULL) {
1270 			goto done_with_iterator;
1271 		}
1272 		SCTP_INP_RLOCK(it->inp);
1273 	}
1274 	/* now go through each assoc which is in the desired state */
1275 	if (it->done_current_ep == 0) {
1276 		if (it->function_inp != NULL)
1277 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1278 		it->done_current_ep = 1;
1279 	}
1280 	if (it->stcb == NULL) {
1281 		/* run the per instance function */
1282 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1283 	}
1284 	if ((inp_skip) || it->stcb == NULL) {
1285 		if (it->function_inp_end != NULL) {
1286 			inp_skip = (*it->function_inp_end) (it->inp,
1287 			    it->pointer,
1288 			    it->val);
1289 		}
1290 		SCTP_INP_RUNLOCK(it->inp);
1291 		goto no_stcb;
1292 	}
1293 	while (it->stcb) {
1294 		SCTP_TCB_LOCK(it->stcb);
1295 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1296 			/* not in the right state... keep looking */
1297 			SCTP_TCB_UNLOCK(it->stcb);
1298 			goto next_assoc;
1299 		}
1300 		/* see if we have limited out the iterator loop */
1301 		iteration_count++;
1302 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1303 			/* Pause to let others grab the lock */
1304 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1305 			SCTP_TCB_UNLOCK(it->stcb);
1306 			SCTP_INP_INCR_REF(it->inp);
1307 			SCTP_INP_RUNLOCK(it->inp);
1308 			SCTP_ITERATOR_UNLOCK();
1309 			SCTP_INP_INFO_RUNLOCK();
1310 			SCTP_INP_INFO_RLOCK();
1311 			SCTP_ITERATOR_LOCK();
1312 			if (sctp_it_ctl.iterator_flags) {
1313 				/* We won't be staying here */
1314 				SCTP_INP_DECR_REF(it->inp);
1315 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1316 				if (sctp_it_ctl.iterator_flags &
1317 				    SCTP_ITERATOR_STOP_CUR_IT) {
1318 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1319 					goto done_with_iterator;
1320 				}
1321 				if (sctp_it_ctl.iterator_flags &
1322 				    SCTP_ITERATOR_STOP_CUR_INP) {
1323 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1324 					goto no_stcb;
1325 				}
1326 				/* If we reach here huh? */
1327 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1328 				    sctp_it_ctl.iterator_flags);
1329 				sctp_it_ctl.iterator_flags = 0;
1330 			}
1331 			SCTP_INP_RLOCK(it->inp);
1332 			SCTP_INP_DECR_REF(it->inp);
1333 			SCTP_TCB_LOCK(it->stcb);
1334 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1335 			iteration_count = 0;
1336 		}
1337 		/* run function on this one */
1338 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1339 
1340 		/*
1341 		 * we lie here, it really needs to have its own type but
1342 		 * first I must verify that this won't effect things :-0
1343 		 */
1344 		if (it->no_chunk_output == 0)
1345 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1346 
1347 		SCTP_TCB_UNLOCK(it->stcb);
1348 next_assoc:
1349 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1350 		if (it->stcb == NULL) {
1351 			/* Run last function */
1352 			if (it->function_inp_end != NULL) {
1353 				inp_skip = (*it->function_inp_end) (it->inp,
1354 				    it->pointer,
1355 				    it->val);
1356 			}
1357 		}
1358 	}
1359 	SCTP_INP_RUNLOCK(it->inp);
1360 no_stcb:
1361 	/* done with all assocs on this endpoint, move on to next endpoint */
1362 	it->done_current_ep = 0;
1363 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1364 		it->inp = NULL;
1365 	} else {
1366 		it->inp = LIST_NEXT(it->inp, sctp_list);
1367 	}
1368 	if (it->inp == NULL) {
1369 		goto done_with_iterator;
1370 	}
1371 	goto select_a_new_ep;
1372 }
1373 
1374 void
1375 sctp_iterator_worker(void)
1376 {
1377 	struct sctp_iterator *it, *nit;
1378 
1379 	/* This function is called with the WQ lock in place */
1380 
1381 	sctp_it_ctl.iterator_running = 1;
1382 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1383 		sctp_it_ctl.cur_it = it;
1384 		/* now lets work on this one */
1385 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1386 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1387 		CURVNET_SET(it->vn);
1388 		sctp_iterator_work(it);
1389 		sctp_it_ctl.cur_it = NULL;
1390 		CURVNET_RESTORE();
1391 		SCTP_IPI_ITERATOR_WQ_LOCK();
1392 		/* sa_ignore FREED_MEMORY */
1393 	}
1394 	sctp_it_ctl.iterator_running = 0;
1395 	return;
1396 }
1397 
1398 
1399 static void
1400 sctp_handle_addr_wq(void)
1401 {
1402 	/* deal with the ADDR wq from the rtsock calls */
1403 	struct sctp_laddr *wi, *nwi;
1404 	struct sctp_asconf_iterator *asc;
1405 
1406 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1407 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1408 	if (asc == NULL) {
1409 		/* Try later, no memory */
1410 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1411 		    (struct sctp_inpcb *)NULL,
1412 		    (struct sctp_tcb *)NULL,
1413 		    (struct sctp_nets *)NULL);
1414 		return;
1415 	}
1416 	LIST_INIT(&asc->list_of_work);
1417 	asc->cnt = 0;
1418 
1419 	SCTP_WQ_ADDR_LOCK();
1420 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1421 		LIST_REMOVE(wi, sctp_nxt_addr);
1422 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1423 		asc->cnt++;
1424 	}
1425 	SCTP_WQ_ADDR_UNLOCK();
1426 
1427 	if (asc->cnt == 0) {
1428 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1429 	} else {
1430 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1431 		    sctp_asconf_iterator_stcb,
1432 		    NULL,	/* No ep end for boundall */
1433 		    SCTP_PCB_FLAGS_BOUNDALL,
1434 		    SCTP_PCB_ANY_FEATURES,
1435 		    SCTP_ASOC_ANY_STATE,
1436 		    (void *)asc, 0,
1437 		    sctp_asconf_iterator_end, NULL, 0);
1438 	}
1439 }
1440 
1441 void
1442 sctp_timeout_handler(void *t)
1443 {
1444 	struct sctp_inpcb *inp;
1445 	struct sctp_tcb *stcb;
1446 	struct sctp_nets *net;
1447 	struct sctp_timer *tmr;
1448 
1449 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1450 	struct socket *so;
1451 
1452 #endif
1453 	int did_output;
1454 
1455 	tmr = (struct sctp_timer *)t;
1456 	inp = (struct sctp_inpcb *)tmr->ep;
1457 	stcb = (struct sctp_tcb *)tmr->tcb;
1458 	net = (struct sctp_nets *)tmr->net;
1459 	CURVNET_SET((struct vnet *)tmr->vnet);
1460 	did_output = 1;
1461 
1462 #ifdef SCTP_AUDITING_ENABLED
1463 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1464 	sctp_auditing(3, inp, stcb, net);
1465 #endif
1466 
1467 	/* sanity checks... */
1468 	if (tmr->self != (void *)tmr) {
1469 		/*
1470 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1471 		 * (void *)tmr);
1472 		 */
1473 		CURVNET_RESTORE();
1474 		return;
1475 	}
1476 	tmr->stopped_from = 0xa001;
1477 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1478 		/*
1479 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1480 		 * tmr->type);
1481 		 */
1482 		CURVNET_RESTORE();
1483 		return;
1484 	}
1485 	tmr->stopped_from = 0xa002;
1486 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1487 		CURVNET_RESTORE();
1488 		return;
1489 	}
1490 	/* if this is an iterator timeout, get the struct and clear inp */
1491 	tmr->stopped_from = 0xa003;
1492 	if (inp) {
1493 		SCTP_INP_INCR_REF(inp);
1494 		if ((inp->sctp_socket == NULL) &&
1495 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1496 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1499 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1500 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1501 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1502 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1503 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1504 		    ) {
1505 			SCTP_INP_DECR_REF(inp);
1506 			CURVNET_RESTORE();
1507 			return;
1508 		}
1509 	}
1510 	tmr->stopped_from = 0xa004;
1511 	if (stcb) {
1512 		atomic_add_int(&stcb->asoc.refcnt, 1);
1513 		if (stcb->asoc.state == 0) {
1514 			atomic_add_int(&stcb->asoc.refcnt, -1);
1515 			if (inp) {
1516 				SCTP_INP_DECR_REF(inp);
1517 			}
1518 			CURVNET_RESTORE();
1519 			return;
1520 		}
1521 	}
1522 	tmr->stopped_from = 0xa005;
1523 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1524 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1525 		if (inp) {
1526 			SCTP_INP_DECR_REF(inp);
1527 		}
1528 		if (stcb) {
1529 			atomic_add_int(&stcb->asoc.refcnt, -1);
1530 		}
1531 		CURVNET_RESTORE();
1532 		return;
1533 	}
1534 	tmr->stopped_from = 0xa006;
1535 
1536 	if (stcb) {
1537 		SCTP_TCB_LOCK(stcb);
1538 		atomic_add_int(&stcb->asoc.refcnt, -1);
1539 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1540 		    ((stcb->asoc.state == 0) ||
1541 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1542 			SCTP_TCB_UNLOCK(stcb);
1543 			if (inp) {
1544 				SCTP_INP_DECR_REF(inp);
1545 			}
1546 			CURVNET_RESTORE();
1547 			return;
1548 		}
1549 	}
1550 	/* record in stopped what t-o occured */
1551 	tmr->stopped_from = tmr->type;
1552 
1553 	/* mark as being serviced now */
1554 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1555 		/*
1556 		 * Callout has been rescheduled.
1557 		 */
1558 		goto get_out;
1559 	}
1560 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1561 		/*
1562 		 * Not active, so no action.
1563 		 */
1564 		goto get_out;
1565 	}
1566 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1567 
1568 	/* call the handler for the appropriate timer type */
1569 	switch (tmr->type) {
1570 	case SCTP_TIMER_TYPE_ZERO_COPY:
1571 		if (inp == NULL) {
1572 			break;
1573 		}
1574 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1575 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1576 		}
1577 		break;
1578 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1579 		if (inp == NULL) {
1580 			break;
1581 		}
1582 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1583 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1584 		}
1585 		break;
1586 	case SCTP_TIMER_TYPE_ADDR_WQ:
1587 		sctp_handle_addr_wq();
1588 		break;
1589 	case SCTP_TIMER_TYPE_SEND:
1590 		if ((stcb == NULL) || (inp == NULL)) {
1591 			break;
1592 		}
1593 		SCTP_STAT_INCR(sctps_timodata);
1594 		stcb->asoc.timodata++;
1595 		stcb->asoc.num_send_timers_up--;
1596 		if (stcb->asoc.num_send_timers_up < 0) {
1597 			stcb->asoc.num_send_timers_up = 0;
1598 		}
1599 		SCTP_TCB_LOCK_ASSERT(stcb);
1600 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1601 			/* no need to unlock on tcb its gone */
1602 
1603 			goto out_decr;
1604 		}
1605 		SCTP_TCB_LOCK_ASSERT(stcb);
1606 #ifdef SCTP_AUDITING_ENABLED
1607 		sctp_auditing(4, inp, stcb, net);
1608 #endif
1609 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1610 		if ((stcb->asoc.num_send_timers_up == 0) &&
1611 		    (stcb->asoc.sent_queue_cnt > 0)) {
1612 			struct sctp_tmit_chunk *chk;
1613 
1614 			/*
1615 			 * safeguard. If there on some on the sent queue
1616 			 * somewhere but no timers running something is
1617 			 * wrong... so we start a timer on the first chunk
1618 			 * on the send queue on whatever net it is sent to.
1619 			 */
1620 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1621 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1622 			    chk->whoTo);
1623 		}
1624 		break;
1625 	case SCTP_TIMER_TYPE_INIT:
1626 		if ((stcb == NULL) || (inp == NULL)) {
1627 			break;
1628 		}
1629 		SCTP_STAT_INCR(sctps_timoinit);
1630 		stcb->asoc.timoinit++;
1631 		if (sctp_t1init_timer(inp, stcb, net)) {
1632 			/* no need to unlock on tcb its gone */
1633 			goto out_decr;
1634 		}
1635 		/* We do output but not here */
1636 		did_output = 0;
1637 		break;
1638 	case SCTP_TIMER_TYPE_RECV:
1639 		if ((stcb == NULL) || (inp == NULL)) {
1640 			break;
1641 		}
1642 		SCTP_STAT_INCR(sctps_timosack);
1643 		stcb->asoc.timosack++;
1644 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1645 #ifdef SCTP_AUDITING_ENABLED
1646 		sctp_auditing(4, inp, stcb, net);
1647 #endif
1648 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1649 		break;
1650 	case SCTP_TIMER_TYPE_SHUTDOWN:
1651 		if ((stcb == NULL) || (inp == NULL)) {
1652 			break;
1653 		}
1654 		if (sctp_shutdown_timer(inp, stcb, net)) {
1655 			/* no need to unlock on tcb its gone */
1656 			goto out_decr;
1657 		}
1658 		SCTP_STAT_INCR(sctps_timoshutdown);
1659 		stcb->asoc.timoshutdown++;
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1664 		break;
1665 	case SCTP_TIMER_TYPE_HEARTBEAT:
1666 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1667 			break;
1668 		}
1669 		SCTP_STAT_INCR(sctps_timoheartbeat);
1670 		stcb->asoc.timoheartbeat++;
1671 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1672 			/* no need to unlock on tcb its gone */
1673 			goto out_decr;
1674 		}
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1679 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1680 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1681 		}
1682 		break;
1683 	case SCTP_TIMER_TYPE_COOKIE:
1684 		if ((stcb == NULL) || (inp == NULL)) {
1685 			break;
1686 		}
1687 		if (sctp_cookie_timer(inp, stcb, net)) {
1688 			/* no need to unlock on tcb its gone */
1689 			goto out_decr;
1690 		}
1691 		SCTP_STAT_INCR(sctps_timocookie);
1692 		stcb->asoc.timocookie++;
1693 #ifdef SCTP_AUDITING_ENABLED
1694 		sctp_auditing(4, inp, stcb, net);
1695 #endif
1696 		/*
1697 		 * We consider T3 and Cookie timer pretty much the same with
1698 		 * respect to where from in chunk_output.
1699 		 */
1700 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1701 		break;
1702 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1703 		{
1704 			struct timeval tv;
1705 			int i, secret;
1706 
1707 			if (inp == NULL) {
1708 				break;
1709 			}
1710 			SCTP_STAT_INCR(sctps_timosecret);
1711 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1712 			SCTP_INP_WLOCK(inp);
1713 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1714 			inp->sctp_ep.last_secret_number =
1715 			    inp->sctp_ep.current_secret_number;
1716 			inp->sctp_ep.current_secret_number++;
1717 			if (inp->sctp_ep.current_secret_number >=
1718 			    SCTP_HOW_MANY_SECRETS) {
1719 				inp->sctp_ep.current_secret_number = 0;
1720 			}
1721 			secret = (int)inp->sctp_ep.current_secret_number;
1722 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1723 				inp->sctp_ep.secret_key[secret][i] =
1724 				    sctp_select_initial_TSN(&inp->sctp_ep);
1725 			}
1726 			SCTP_INP_WUNLOCK(inp);
1727 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1728 		}
1729 		did_output = 0;
1730 		break;
1731 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1732 		if ((stcb == NULL) || (inp == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timopathmtu);
1736 		sctp_pathmtu_timer(inp, stcb, net);
1737 		did_output = 0;
1738 		break;
1739 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1740 		if ((stcb == NULL) || (inp == NULL)) {
1741 			break;
1742 		}
1743 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1744 			/* no need to unlock on tcb its gone */
1745 			goto out_decr;
1746 		}
1747 		SCTP_STAT_INCR(sctps_timoshutdownack);
1748 		stcb->asoc.timoshutdownack++;
1749 #ifdef SCTP_AUDITING_ENABLED
1750 		sctp_auditing(4, inp, stcb, net);
1751 #endif
1752 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1753 		break;
1754 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1755 		if ((stcb == NULL) || (inp == NULL)) {
1756 			break;
1757 		}
1758 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1759 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1760 		/* no need to unlock on tcb its gone */
1761 		goto out_decr;
1762 
1763 	case SCTP_TIMER_TYPE_STRRESET:
1764 		if ((stcb == NULL) || (inp == NULL)) {
1765 			break;
1766 		}
1767 		if (sctp_strreset_timer(inp, stcb, net)) {
1768 			/* no need to unlock on tcb its gone */
1769 			goto out_decr;
1770 		}
1771 		SCTP_STAT_INCR(sctps_timostrmrst);
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1773 		break;
1774 	case SCTP_TIMER_TYPE_ASCONF:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		if (sctp_asconf_timer(inp, stcb, net)) {
1779 			/* no need to unlock on tcb its gone */
1780 			goto out_decr;
1781 		}
1782 		SCTP_STAT_INCR(sctps_timoasconf);
1783 #ifdef SCTP_AUDITING_ENABLED
1784 		sctp_auditing(4, inp, stcb, net);
1785 #endif
1786 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1787 		break;
1788 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		sctp_delete_prim_timer(inp, stcb, net);
1793 		SCTP_STAT_INCR(sctps_timodelprim);
1794 		break;
1795 
1796 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1797 		if ((stcb == NULL) || (inp == NULL)) {
1798 			break;
1799 		}
1800 		SCTP_STAT_INCR(sctps_timoautoclose);
1801 		sctp_autoclose_timer(inp, stcb, net);
1802 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_ASOCKILL:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		SCTP_STAT_INCR(sctps_timoassockill);
1810 		/* Can we free it yet? */
1811 		SCTP_INP_DECR_REF(inp);
1812 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1813 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1814 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1815 		so = SCTP_INP_SO(inp);
1816 		atomic_add_int(&stcb->asoc.refcnt, 1);
1817 		SCTP_TCB_UNLOCK(stcb);
1818 		SCTP_SOCKET_LOCK(so, 1);
1819 		SCTP_TCB_LOCK(stcb);
1820 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1821 #endif
1822 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1823 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1824 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1825 		SCTP_SOCKET_UNLOCK(so, 1);
1826 #endif
1827 		/*
1828 		 * free asoc, always unlocks (or destroy's) so prevent
1829 		 * duplicate unlock or unlock of a free mtx :-0
1830 		 */
1831 		stcb = NULL;
1832 		goto out_no_decr;
1833 	case SCTP_TIMER_TYPE_INPKILL:
1834 		SCTP_STAT_INCR(sctps_timoinpkill);
1835 		if (inp == NULL) {
1836 			break;
1837 		}
1838 		/*
1839 		 * special case, take away our increment since WE are the
1840 		 * killer
1841 		 */
1842 		SCTP_INP_DECR_REF(inp);
1843 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1844 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1845 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1846 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1847 		inp = NULL;
1848 		goto out_no_decr;
1849 	default:
1850 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1851 		    tmr->type);
1852 		break;
1853 	}
1854 #ifdef SCTP_AUDITING_ENABLED
1855 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1856 	if (inp)
1857 		sctp_auditing(5, inp, stcb, net);
1858 #endif
1859 	if ((did_output) && stcb) {
1860 		/*
1861 		 * Now we need to clean up the control chunk chain if an
1862 		 * ECNE is on it. It must be marked as UNSENT again so next
1863 		 * call will continue to send it until such time that we get
1864 		 * a CWR, to remove it. It is, however, less likely that we
1865 		 * will find a ecn echo on the chain though.
1866 		 */
1867 		sctp_fix_ecn_echo(&stcb->asoc);
1868 	}
1869 get_out:
1870 	if (stcb) {
1871 		SCTP_TCB_UNLOCK(stcb);
1872 	}
1873 out_decr:
1874 	if (inp) {
1875 		SCTP_INP_DECR_REF(inp);
1876 	}
1877 out_no_decr:
1878 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1879 	    tmr->type);
1880 	CURVNET_RESTORE();
1881 }
1882 
1883 void
1884 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1885     struct sctp_nets *net)
1886 {
1887 	uint32_t to_ticks;
1888 	struct sctp_timer *tmr;
1889 
1890 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1891 		return;
1892 
1893 	tmr = NULL;
1894 	if (stcb) {
1895 		SCTP_TCB_LOCK_ASSERT(stcb);
1896 	}
1897 	switch (t_type) {
1898 	case SCTP_TIMER_TYPE_ZERO_COPY:
1899 		tmr = &inp->sctp_ep.zero_copy_timer;
1900 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1901 		break;
1902 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1903 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1904 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1905 		break;
1906 	case SCTP_TIMER_TYPE_ADDR_WQ:
1907 		/* Only 1 tick away :-) */
1908 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1909 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1910 		break;
1911 	case SCTP_TIMER_TYPE_SEND:
1912 		/* Here we use the RTO timer */
1913 		{
1914 			int rto_val;
1915 
1916 			if ((stcb == NULL) || (net == NULL)) {
1917 				return;
1918 			}
1919 			tmr = &net->rxt_timer;
1920 			if (net->RTO == 0) {
1921 				rto_val = stcb->asoc.initial_rto;
1922 			} else {
1923 				rto_val = net->RTO;
1924 			}
1925 			to_ticks = MSEC_TO_TICKS(rto_val);
1926 		}
1927 		break;
1928 	case SCTP_TIMER_TYPE_INIT:
1929 		/*
1930 		 * Here we use the INIT timer default usually about 1
1931 		 * minute.
1932 		 */
1933 		if ((stcb == NULL) || (net == NULL)) {
1934 			return;
1935 		}
1936 		tmr = &net->rxt_timer;
1937 		if (net->RTO == 0) {
1938 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1939 		} else {
1940 			to_ticks = MSEC_TO_TICKS(net->RTO);
1941 		}
1942 		break;
1943 	case SCTP_TIMER_TYPE_RECV:
1944 		/*
1945 		 * Here we use the Delayed-Ack timer value from the inp
1946 		 * ususually about 200ms.
1947 		 */
1948 		if (stcb == NULL) {
1949 			return;
1950 		}
1951 		tmr = &stcb->asoc.dack_timer;
1952 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1953 		break;
1954 	case SCTP_TIMER_TYPE_SHUTDOWN:
1955 		/* Here we use the RTO of the destination. */
1956 		if ((stcb == NULL) || (net == NULL)) {
1957 			return;
1958 		}
1959 		if (net->RTO == 0) {
1960 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1961 		} else {
1962 			to_ticks = MSEC_TO_TICKS(net->RTO);
1963 		}
1964 		tmr = &net->rxt_timer;
1965 		break;
1966 	case SCTP_TIMER_TYPE_HEARTBEAT:
1967 		/*
1968 		 * the net is used here so that we can add in the RTO. Even
1969 		 * though we use a different timer. We also add the HB timer
1970 		 * PLUS a random jitter.
1971 		 */
1972 		if ((stcb == NULL) || (net == NULL)) {
1973 			return;
1974 		} else {
1975 			uint32_t rndval;
1976 			uint32_t jitter;
1977 
1978 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1979 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1980 				return;
1981 			}
1982 			if (net->RTO == 0) {
1983 				to_ticks = stcb->asoc.initial_rto;
1984 			} else {
1985 				to_ticks = net->RTO;
1986 			}
1987 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1988 			jitter = rndval % to_ticks;
1989 			if (jitter >= (to_ticks >> 1)) {
1990 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1991 			} else {
1992 				to_ticks = to_ticks - jitter;
1993 			}
1994 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1995 			    !(net->dest_state & SCTP_ADDR_PF)) {
1996 				to_ticks += net->heart_beat_delay;
1997 			}
1998 			/*
1999 			 * Now we must convert the to_ticks that are now in
2000 			 * ms to ticks.
2001 			 */
2002 			to_ticks = MSEC_TO_TICKS(to_ticks);
2003 			tmr = &net->hb_timer;
2004 		}
2005 		break;
2006 	case SCTP_TIMER_TYPE_COOKIE:
2007 		/*
2008 		 * Here we can use the RTO timer from the network since one
2009 		 * RTT was compelete. If a retran happened then we will be
2010 		 * using the RTO initial value.
2011 		 */
2012 		if ((stcb == NULL) || (net == NULL)) {
2013 			return;
2014 		}
2015 		if (net->RTO == 0) {
2016 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2017 		} else {
2018 			to_ticks = MSEC_TO_TICKS(net->RTO);
2019 		}
2020 		tmr = &net->rxt_timer;
2021 		break;
2022 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2023 		/*
2024 		 * nothing needed but the endpoint here ususually about 60
2025 		 * minutes.
2026 		 */
2027 		tmr = &inp->sctp_ep.signature_change;
2028 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2029 		break;
2030 	case SCTP_TIMER_TYPE_ASOCKILL:
2031 		if (stcb == NULL) {
2032 			return;
2033 		}
2034 		tmr = &stcb->asoc.strreset_timer;
2035 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2036 		break;
2037 	case SCTP_TIMER_TYPE_INPKILL:
2038 		/*
2039 		 * The inp is setup to die. We re-use the signature_chage
2040 		 * timer since that has stopped and we are in the GONE
2041 		 * state.
2042 		 */
2043 		tmr = &inp->sctp_ep.signature_change;
2044 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2045 		break;
2046 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2047 		/*
2048 		 * Here we use the value found in the EP for PMTU ususually
2049 		 * about 10 minutes.
2050 		 */
2051 		if ((stcb == NULL) || (net == NULL)) {
2052 			return;
2053 		}
2054 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2055 			return;
2056 		}
2057 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2058 		tmr = &net->pmtu_timer;
2059 		break;
2060 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2061 		/* Here we use the RTO of the destination */
2062 		if ((stcb == NULL) || (net == NULL)) {
2063 			return;
2064 		}
2065 		if (net->RTO == 0) {
2066 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2067 		} else {
2068 			to_ticks = MSEC_TO_TICKS(net->RTO);
2069 		}
2070 		tmr = &net->rxt_timer;
2071 		break;
2072 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2073 		/*
2074 		 * Here we use the endpoints shutdown guard timer usually
2075 		 * about 3 minutes.
2076 		 */
2077 		if (stcb == NULL) {
2078 			return;
2079 		}
2080 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2081 		tmr = &stcb->asoc.shut_guard_timer;
2082 		break;
2083 	case SCTP_TIMER_TYPE_STRRESET:
2084 		/*
2085 		 * Here the timer comes from the stcb but its value is from
2086 		 * the net's RTO.
2087 		 */
2088 		if ((stcb == NULL) || (net == NULL)) {
2089 			return;
2090 		}
2091 		if (net->RTO == 0) {
2092 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2093 		} else {
2094 			to_ticks = MSEC_TO_TICKS(net->RTO);
2095 		}
2096 		tmr = &stcb->asoc.strreset_timer;
2097 		break;
2098 	case SCTP_TIMER_TYPE_ASCONF:
2099 		/*
2100 		 * Here the timer comes from the stcb but its value is from
2101 		 * the net's RTO.
2102 		 */
2103 		if ((stcb == NULL) || (net == NULL)) {
2104 			return;
2105 		}
2106 		if (net->RTO == 0) {
2107 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2108 		} else {
2109 			to_ticks = MSEC_TO_TICKS(net->RTO);
2110 		}
2111 		tmr = &stcb->asoc.asconf_timer;
2112 		break;
2113 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2114 		if ((stcb == NULL) || (net != NULL)) {
2115 			return;
2116 		}
2117 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2118 		tmr = &stcb->asoc.delete_prim_timer;
2119 		break;
2120 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2121 		if (stcb == NULL) {
2122 			return;
2123 		}
2124 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2125 			/*
2126 			 * Really an error since stcb is NOT set to
2127 			 * autoclose
2128 			 */
2129 			return;
2130 		}
2131 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2132 		tmr = &stcb->asoc.autoclose_timer;
2133 		break;
2134 	default:
2135 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2136 		    __FUNCTION__, t_type);
2137 		return;
2138 		break;
2139 	}
2140 	if ((to_ticks <= 0) || (tmr == NULL)) {
2141 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2142 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2143 		return;
2144 	}
2145 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2146 		/*
2147 		 * we do NOT allow you to have it already running. if it is
2148 		 * we leave the current one up unchanged
2149 		 */
2150 		return;
2151 	}
2152 	/* At this point we can proceed */
2153 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2154 		stcb->asoc.num_send_timers_up++;
2155 	}
2156 	tmr->stopped_from = 0;
2157 	tmr->type = t_type;
2158 	tmr->ep = (void *)inp;
2159 	tmr->tcb = (void *)stcb;
2160 	tmr->net = (void *)net;
2161 	tmr->self = (void *)tmr;
2162 	tmr->vnet = (void *)curvnet;
2163 	tmr->ticks = sctp_get_tick_count();
2164 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2165 	return;
2166 }
2167 
2168 void
2169 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2170     struct sctp_nets *net, uint32_t from)
2171 {
2172 	struct sctp_timer *tmr;
2173 
2174 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2175 	    (inp == NULL))
2176 		return;
2177 
2178 	tmr = NULL;
2179 	if (stcb) {
2180 		SCTP_TCB_LOCK_ASSERT(stcb);
2181 	}
2182 	switch (t_type) {
2183 	case SCTP_TIMER_TYPE_ZERO_COPY:
2184 		tmr = &inp->sctp_ep.zero_copy_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2187 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_ADDR_WQ:
2190 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2191 		break;
2192 	case SCTP_TIMER_TYPE_SEND:
2193 		if ((stcb == NULL) || (net == NULL)) {
2194 			return;
2195 		}
2196 		tmr = &net->rxt_timer;
2197 		break;
2198 	case SCTP_TIMER_TYPE_INIT:
2199 		if ((stcb == NULL) || (net == NULL)) {
2200 			return;
2201 		}
2202 		tmr = &net->rxt_timer;
2203 		break;
2204 	case SCTP_TIMER_TYPE_RECV:
2205 		if (stcb == NULL) {
2206 			return;
2207 		}
2208 		tmr = &stcb->asoc.dack_timer;
2209 		break;
2210 	case SCTP_TIMER_TYPE_SHUTDOWN:
2211 		if ((stcb == NULL) || (net == NULL)) {
2212 			return;
2213 		}
2214 		tmr = &net->rxt_timer;
2215 		break;
2216 	case SCTP_TIMER_TYPE_HEARTBEAT:
2217 		if ((stcb == NULL) || (net == NULL)) {
2218 			return;
2219 		}
2220 		tmr = &net->hb_timer;
2221 		break;
2222 	case SCTP_TIMER_TYPE_COOKIE:
2223 		if ((stcb == NULL) || (net == NULL)) {
2224 			return;
2225 		}
2226 		tmr = &net->rxt_timer;
2227 		break;
2228 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2229 		/* nothing needed but the endpoint here */
2230 		tmr = &inp->sctp_ep.signature_change;
2231 		/*
2232 		 * We re-use the newcookie timer for the INP kill timer. We
2233 		 * must assure that we do not kill it by accident.
2234 		 */
2235 		break;
2236 	case SCTP_TIMER_TYPE_ASOCKILL:
2237 		/*
2238 		 * Stop the asoc kill timer.
2239 		 */
2240 		if (stcb == NULL) {
2241 			return;
2242 		}
2243 		tmr = &stcb->asoc.strreset_timer;
2244 		break;
2245 
2246 	case SCTP_TIMER_TYPE_INPKILL:
2247 		/*
2248 		 * The inp is setup to die. We re-use the signature_chage
2249 		 * timer since that has stopped and we are in the GONE
2250 		 * state.
2251 		 */
2252 		tmr = &inp->sctp_ep.signature_change;
2253 		break;
2254 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2255 		if ((stcb == NULL) || (net == NULL)) {
2256 			return;
2257 		}
2258 		tmr = &net->pmtu_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2261 		if ((stcb == NULL) || (net == NULL)) {
2262 			return;
2263 		}
2264 		tmr = &net->rxt_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2267 		if (stcb == NULL) {
2268 			return;
2269 		}
2270 		tmr = &stcb->asoc.shut_guard_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_STRRESET:
2273 		if (stcb == NULL) {
2274 			return;
2275 		}
2276 		tmr = &stcb->asoc.strreset_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_ASCONF:
2279 		if (stcb == NULL) {
2280 			return;
2281 		}
2282 		tmr = &stcb->asoc.asconf_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2285 		if (stcb == NULL) {
2286 			return;
2287 		}
2288 		tmr = &stcb->asoc.delete_prim_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2291 		if (stcb == NULL) {
2292 			return;
2293 		}
2294 		tmr = &stcb->asoc.autoclose_timer;
2295 		break;
2296 	default:
2297 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2298 		    __FUNCTION__, t_type);
2299 		break;
2300 	}
2301 	if (tmr == NULL) {
2302 		return;
2303 	}
2304 	if ((tmr->type != t_type) && tmr->type) {
2305 		/*
2306 		 * Ok we have a timer that is under joint use. Cookie timer
2307 		 * per chance with the SEND timer. We therefore are NOT
2308 		 * running the timer that the caller wants stopped.  So just
2309 		 * return.
2310 		 */
2311 		return;
2312 	}
2313 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2314 		stcb->asoc.num_send_timers_up--;
2315 		if (stcb->asoc.num_send_timers_up < 0) {
2316 			stcb->asoc.num_send_timers_up = 0;
2317 		}
2318 	}
2319 	tmr->self = NULL;
2320 	tmr->stopped_from = from;
2321 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2322 	return;
2323 }
2324 
2325 uint32_t
2326 sctp_calculate_len(struct mbuf *m)
2327 {
2328 	uint32_t tlen = 0;
2329 	struct mbuf *at;
2330 
2331 	at = m;
2332 	while (at) {
2333 		tlen += SCTP_BUF_LEN(at);
2334 		at = SCTP_BUF_NEXT(at);
2335 	}
2336 	return (tlen);
2337 }
2338 
2339 void
2340 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2341     struct sctp_association *asoc, uint32_t mtu)
2342 {
2343 	/*
2344 	 * Reset the P-MTU size on this association, this involves changing
2345 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2346 	 * allow the DF flag to be cleared.
2347 	 */
2348 	struct sctp_tmit_chunk *chk;
2349 	unsigned int eff_mtu, ovh;
2350 
2351 	asoc->smallest_mtu = mtu;
2352 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2353 		ovh = SCTP_MIN_OVERHEAD;
2354 	} else {
2355 		ovh = SCTP_MIN_V4_OVERHEAD;
2356 	}
2357 	eff_mtu = mtu - ovh;
2358 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2359 		if (chk->send_size > eff_mtu) {
2360 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2361 		}
2362 	}
2363 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2364 		if (chk->send_size > eff_mtu) {
2365 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2366 		}
2367 	}
2368 }
2369 
2370 
2371 /*
2372  * given an association and starting time of the current RTT period return
2373  * RTO in number of msecs net should point to the current network
2374  */
2375 
2376 uint32_t
2377 sctp_calculate_rto(struct sctp_tcb *stcb,
2378     struct sctp_association *asoc,
2379     struct sctp_nets *net,
2380     struct timeval *told,
2381     int safe, int rtt_from_sack)
2382 {
2383 	/*-
2384 	 * given an association and the starting time of the current RTT
2385 	 * period (in value1/value2) return RTO in number of msecs.
2386 	 */
2387 	int32_t rtt;		/* RTT in ms */
2388 	uint32_t new_rto;
2389 	int first_measure = 0;
2390 	struct timeval now, then, *old;
2391 
2392 	/* Copy it out for sparc64 */
2393 	if (safe == sctp_align_unsafe_makecopy) {
2394 		old = &then;
2395 		memcpy(&then, told, sizeof(struct timeval));
2396 	} else if (safe == sctp_align_safe_nocopy) {
2397 		old = told;
2398 	} else {
2399 		/* error */
2400 		SCTP_PRINTF("Huh, bad rto calc call\n");
2401 		return (0);
2402 	}
2403 	/************************/
2404 	/* 1. calculate new RTT */
2405 	/************************/
2406 	/* get the current time */
2407 	if (stcb->asoc.use_precise_time) {
2408 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2409 	} else {
2410 		(void)SCTP_GETTIME_TIMEVAL(&now);
2411 	}
2412 	timevalsub(&now, old);
2413 	/* store the current RTT in us */
2414 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2415 	        (uint64_t) now.tv_usec;
2416 
2417 	/* compute rtt in ms */
2418 	rtt = (int32_t) (net->rtt / 1000);
2419 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2420 		/*
2421 		 * Tell the CC module that a new update has just occurred
2422 		 * from a sack
2423 		 */
2424 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2425 	}
2426 	/*
2427 	 * Do we need to determine the lan? We do this only on sacks i.e.
2428 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2429 	 */
2430 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2431 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2432 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2433 			net->lan_type = SCTP_LAN_INTERNET;
2434 		} else {
2435 			net->lan_type = SCTP_LAN_LOCAL;
2436 		}
2437 	}
2438 	/***************************/
2439 	/* 2. update RTTVAR & SRTT */
2440 	/***************************/
2441 	/*-
2442 	 * Compute the scaled average lastsa and the
2443 	 * scaled variance lastsv as described in van Jacobson
2444 	 * Paper "Congestion Avoidance and Control", Annex A.
2445 	 *
2446 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2447 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2448 	 */
2449 	if (net->RTO_measured) {
2450 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2451 		net->lastsa += rtt;
2452 		if (rtt < 0) {
2453 			rtt = -rtt;
2454 		}
2455 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2456 		net->lastsv += rtt;
2457 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2458 			rto_logging(net, SCTP_LOG_RTTVAR);
2459 		}
2460 	} else {
2461 		/* First RTO measurment */
2462 		net->RTO_measured = 1;
2463 		first_measure = 1;
2464 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2465 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2466 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2467 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2468 		}
2469 	}
2470 	if (net->lastsv == 0) {
2471 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2472 	}
2473 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2474 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2475 	    (stcb->asoc.sat_network_lockout == 0)) {
2476 		stcb->asoc.sat_network = 1;
2477 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2478 		stcb->asoc.sat_network = 0;
2479 		stcb->asoc.sat_network_lockout = 1;
2480 	}
2481 	/* bound it, per C6/C7 in Section 5.3.1 */
2482 	if (new_rto < stcb->asoc.minrto) {
2483 		new_rto = stcb->asoc.minrto;
2484 	}
2485 	if (new_rto > stcb->asoc.maxrto) {
2486 		new_rto = stcb->asoc.maxrto;
2487 	}
2488 	/* we are now returning the RTO */
2489 	return (new_rto);
2490 }
2491 
2492 /*
2493  * return a pointer to a contiguous piece of data from the given mbuf chain
2494  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2495  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2496  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2497  */
2498 caddr_t
2499 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2500 {
2501 	uint32_t count;
2502 	uint8_t *ptr;
2503 
2504 	ptr = in_ptr;
2505 	if ((off < 0) || (len <= 0))
2506 		return (NULL);
2507 
2508 	/* find the desired start location */
2509 	while ((m != NULL) && (off > 0)) {
2510 		if (off < SCTP_BUF_LEN(m))
2511 			break;
2512 		off -= SCTP_BUF_LEN(m);
2513 		m = SCTP_BUF_NEXT(m);
2514 	}
2515 	if (m == NULL)
2516 		return (NULL);
2517 
2518 	/* is the current mbuf large enough (eg. contiguous)? */
2519 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2520 		return (mtod(m, caddr_t)+off);
2521 	} else {
2522 		/* else, it spans more than one mbuf, so save a temp copy... */
2523 		while ((m != NULL) && (len > 0)) {
2524 			count = min(SCTP_BUF_LEN(m) - off, len);
2525 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2526 			len -= count;
2527 			ptr += count;
2528 			off = 0;
2529 			m = SCTP_BUF_NEXT(m);
2530 		}
2531 		if ((m == NULL) && (len > 0))
2532 			return (NULL);
2533 		else
2534 			return ((caddr_t)in_ptr);
2535 	}
2536 }
2537 
2538 
2539 
2540 struct sctp_paramhdr *
2541 sctp_get_next_param(struct mbuf *m,
2542     int offset,
2543     struct sctp_paramhdr *pull,
2544     int pull_limit)
2545 {
2546 	/* This just provides a typed signature to Peter's Pull routine */
2547 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2548 	    (uint8_t *) pull));
2549 }
2550 
2551 
2552 struct mbuf *
2553 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2554 {
2555 	struct mbuf *m_last;
2556 	caddr_t dp;
2557 
2558 	if (padlen > 3) {
2559 		return (NULL);
2560 	}
2561 	if (padlen <= M_TRAILINGSPACE(m)) {
2562 		/*
2563 		 * The easy way. We hope the majority of the time we hit
2564 		 * here :)
2565 		 */
2566 		m_last = m;
2567 	} else {
2568 		/* Hard way we must grow the mbuf chain */
2569 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2570 		if (m_last == NULL) {
2571 			return (NULL);
2572 		}
2573 		SCTP_BUF_LEN(m_last) = 0;
2574 		SCTP_BUF_NEXT(m_last) = NULL;
2575 		SCTP_BUF_NEXT(m) = m_last;
2576 	}
2577 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2578 	SCTP_BUF_LEN(m_last) += padlen;
2579 	memset(dp, 0, padlen);
2580 	return (m_last);
2581 }
2582 
2583 struct mbuf *
2584 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2585 {
2586 	/* find the last mbuf in chain and pad it */
2587 	struct mbuf *m_at;
2588 
2589 	if (last_mbuf != NULL) {
2590 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2591 	} else {
2592 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2593 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2594 				return (sctp_add_pad_tombuf(m_at, padval));
2595 			}
2596 		}
2597 	}
2598 	return (NULL);
2599 }
2600 
2601 static void
2602 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2603     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2604 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2605     SCTP_UNUSED
2606 #endif
2607 )
2608 {
2609 	struct mbuf *m_notify;
2610 	struct sctp_assoc_change *sac;
2611 	struct sctp_queued_to_read *control;
2612 	size_t notif_len, abort_len;
2613 	unsigned int i;
2614 
2615 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2616 	struct socket *so;
2617 
2618 #endif
2619 
2620 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2621 		notif_len = sizeof(struct sctp_assoc_change);
2622 		if (abort != NULL) {
2623 			abort_len = ntohs(abort->ch.chunk_length);
2624 		} else {
2625 			abort_len = 0;
2626 		}
2627 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2628 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2629 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2630 			notif_len += abort_len;
2631 		}
2632 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2633 		if (m_notify == NULL) {
2634 			/* Retry with smaller value. */
2635 			notif_len = sizeof(struct sctp_assoc_change);
2636 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2637 			if (m_notify == NULL) {
2638 				goto set_error;
2639 			}
2640 		}
2641 		SCTP_BUF_NEXT(m_notify) = NULL;
2642 		sac = mtod(m_notify, struct sctp_assoc_change *);
2643 		memset(sac, 0, notif_len);
2644 		sac->sac_type = SCTP_ASSOC_CHANGE;
2645 		sac->sac_flags = 0;
2646 		sac->sac_length = sizeof(struct sctp_assoc_change);
2647 		sac->sac_state = state;
2648 		sac->sac_error = error;
2649 		/* XXX verify these stream counts */
2650 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2651 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2652 		sac->sac_assoc_id = sctp_get_associd(stcb);
2653 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2654 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2655 				i = 0;
2656 				if (stcb->asoc.prsctp_supported == 1) {
2657 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2658 				}
2659 				if (stcb->asoc.auth_supported == 1) {
2660 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2661 				}
2662 				if (stcb->asoc.asconf_supported == 1) {
2663 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2664 				}
2665 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2666 				if (stcb->asoc.reconfig_supported == 1) {
2667 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2668 				}
2669 				sac->sac_length += i;
2670 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2671 				memcpy(sac->sac_info, abort, abort_len);
2672 				sac->sac_length += abort_len;
2673 			}
2674 		}
2675 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2676 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2677 		    0, 0, stcb->asoc.context, 0, 0, 0,
2678 		    m_notify);
2679 		if (control != NULL) {
2680 			control->length = SCTP_BUF_LEN(m_notify);
2681 			/* not that we need this */
2682 			control->tail_mbuf = m_notify;
2683 			control->spec_flags = M_NOTIFICATION;
2684 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2685 			    control,
2686 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2687 			    so_locked);
2688 		} else {
2689 			sctp_m_freem(m_notify);
2690 		}
2691 	}
2692 	/*
2693 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2694 	 * comes in.
2695 	 */
2696 set_error:
2697 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2698 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2699 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2700 		SOCK_LOCK(stcb->sctp_socket);
2701 		if (from_peer) {
2702 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2703 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2704 				stcb->sctp_socket->so_error = ECONNREFUSED;
2705 			} else {
2706 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2707 				stcb->sctp_socket->so_error = ECONNRESET;
2708 			}
2709 		} else {
2710 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2711 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2712 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2713 				stcb->sctp_socket->so_error = ETIMEDOUT;
2714 			} else {
2715 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2716 				stcb->sctp_socket->so_error = ECONNABORTED;
2717 			}
2718 		}
2719 	}
2720 	/* Wake ANY sleepers */
2721 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2722 	so = SCTP_INP_SO(stcb->sctp_ep);
2723 	if (!so_locked) {
2724 		atomic_add_int(&stcb->asoc.refcnt, 1);
2725 		SCTP_TCB_UNLOCK(stcb);
2726 		SCTP_SOCKET_LOCK(so, 1);
2727 		SCTP_TCB_LOCK(stcb);
2728 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2729 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2730 			SCTP_SOCKET_UNLOCK(so, 1);
2731 			return;
2732 		}
2733 	}
2734 #endif
2735 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2736 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2737 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2738 		socantrcvmore_locked(stcb->sctp_socket);
2739 	}
2740 	sorwakeup(stcb->sctp_socket);
2741 	sowwakeup(stcb->sctp_socket);
2742 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2743 	if (!so_locked) {
2744 		SCTP_SOCKET_UNLOCK(so, 1);
2745 	}
2746 #endif
2747 }
2748 
2749 static void
2750 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2751     struct sockaddr *sa, uint32_t error, int so_locked
2752 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2753     SCTP_UNUSED
2754 #endif
2755 )
2756 {
2757 	struct mbuf *m_notify;
2758 	struct sctp_paddr_change *spc;
2759 	struct sctp_queued_to_read *control;
2760 
2761 	if ((stcb == NULL) ||
2762 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2763 		/* event not enabled */
2764 		return;
2765 	}
2766 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2767 	if (m_notify == NULL)
2768 		return;
2769 	SCTP_BUF_LEN(m_notify) = 0;
2770 	spc = mtod(m_notify, struct sctp_paddr_change *);
2771 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2772 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2773 	spc->spc_flags = 0;
2774 	spc->spc_length = sizeof(struct sctp_paddr_change);
2775 	switch (sa->sa_family) {
2776 #ifdef INET
2777 	case AF_INET:
2778 #ifdef INET6
2779 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2780 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2781 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2782 		} else {
2783 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2784 		}
2785 #else
2786 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2787 #endif
2788 		break;
2789 #endif
2790 #ifdef INET6
2791 	case AF_INET6:
2792 		{
2793 			struct sockaddr_in6 *sin6;
2794 
2795 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2796 
2797 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2798 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2799 				if (sin6->sin6_scope_id == 0) {
2800 					/* recover scope_id for user */
2801 					(void)sa6_recoverscope(sin6);
2802 				} else {
2803 					/* clear embedded scope_id for user */
2804 					in6_clearscope(&sin6->sin6_addr);
2805 				}
2806 			}
2807 			break;
2808 		}
2809 #endif
2810 	default:
2811 		/* TSNH */
2812 		break;
2813 	}
2814 	spc->spc_state = state;
2815 	spc->spc_error = error;
2816 	spc->spc_assoc_id = sctp_get_associd(stcb);
2817 
2818 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2819 	SCTP_BUF_NEXT(m_notify) = NULL;
2820 
2821 	/* append to socket */
2822 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2823 	    0, 0, stcb->asoc.context, 0, 0, 0,
2824 	    m_notify);
2825 	if (control == NULL) {
2826 		/* no memory */
2827 		sctp_m_freem(m_notify);
2828 		return;
2829 	}
2830 	control->length = SCTP_BUF_LEN(m_notify);
2831 	control->spec_flags = M_NOTIFICATION;
2832 	/* not that we need this */
2833 	control->tail_mbuf = m_notify;
2834 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2835 	    control,
2836 	    &stcb->sctp_socket->so_rcv, 1,
2837 	    SCTP_READ_LOCK_NOT_HELD,
2838 	    so_locked);
2839 }
2840 
2841 
2842 static void
2843 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2844     struct sctp_tmit_chunk *chk, int so_locked
2845 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2846     SCTP_UNUSED
2847 #endif
2848 )
2849 {
2850 	struct mbuf *m_notify;
2851 	struct sctp_send_failed *ssf;
2852 	struct sctp_send_failed_event *ssfe;
2853 	struct sctp_queued_to_read *control;
2854 	int length;
2855 
2856 	if ((stcb == NULL) ||
2857 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2858 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2859 		/* event not enabled */
2860 		return;
2861 	}
2862 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2863 		length = sizeof(struct sctp_send_failed_event);
2864 	} else {
2865 		length = sizeof(struct sctp_send_failed);
2866 	}
2867 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2868 	if (m_notify == NULL)
2869 		/* no space left */
2870 		return;
2871 	SCTP_BUF_LEN(m_notify) = 0;
2872 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2873 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2874 		memset(ssfe, 0, length);
2875 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2876 		if (sent) {
2877 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2878 		} else {
2879 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2880 		}
2881 		length += chk->send_size;
2882 		length -= sizeof(struct sctp_data_chunk);
2883 		ssfe->ssfe_length = length;
2884 		ssfe->ssfe_error = error;
2885 		/* not exactly what the user sent in, but should be close :) */
2886 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2887 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2888 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2889 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2890 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2891 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2892 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2893 	} else {
2894 		ssf = mtod(m_notify, struct sctp_send_failed *);
2895 		memset(ssf, 0, length);
2896 		ssf->ssf_type = SCTP_SEND_FAILED;
2897 		if (sent) {
2898 			ssf->ssf_flags = SCTP_DATA_SENT;
2899 		} else {
2900 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2901 		}
2902 		length += chk->send_size;
2903 		length -= sizeof(struct sctp_data_chunk);
2904 		ssf->ssf_length = length;
2905 		ssf->ssf_error = error;
2906 		/* not exactly what the user sent in, but should be close :) */
2907 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2908 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2909 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2910 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2911 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2912 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2913 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2914 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2915 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2916 	}
2917 	if (chk->data) {
2918 		/*
2919 		 * trim off the sctp chunk header(it should be there)
2920 		 */
2921 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2922 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2923 			sctp_mbuf_crush(chk->data);
2924 			chk->send_size -= sizeof(struct sctp_data_chunk);
2925 		}
2926 	}
2927 	SCTP_BUF_NEXT(m_notify) = chk->data;
2928 	/* Steal off the mbuf */
2929 	chk->data = NULL;
2930 	/*
2931 	 * For this case, we check the actual socket buffer, since the assoc
2932 	 * is going away we don't want to overfill the socket buffer for a
2933 	 * non-reader
2934 	 */
2935 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2936 		sctp_m_freem(m_notify);
2937 		return;
2938 	}
2939 	/* append to socket */
2940 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2941 	    0, 0, stcb->asoc.context, 0, 0, 0,
2942 	    m_notify);
2943 	if (control == NULL) {
2944 		/* no memory */
2945 		sctp_m_freem(m_notify);
2946 		return;
2947 	}
2948 	control->spec_flags = M_NOTIFICATION;
2949 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2950 	    control,
2951 	    &stcb->sctp_socket->so_rcv, 1,
2952 	    SCTP_READ_LOCK_NOT_HELD,
2953 	    so_locked);
2954 }
2955 
2956 
2957 static void
2958 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2959     struct sctp_stream_queue_pending *sp, int so_locked
2960 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2961     SCTP_UNUSED
2962 #endif
2963 )
2964 {
2965 	struct mbuf *m_notify;
2966 	struct sctp_send_failed *ssf;
2967 	struct sctp_send_failed_event *ssfe;
2968 	struct sctp_queued_to_read *control;
2969 	int length;
2970 
2971 	if ((stcb == NULL) ||
2972 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2973 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2974 		/* event not enabled */
2975 		return;
2976 	}
2977 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2978 		length = sizeof(struct sctp_send_failed_event);
2979 	} else {
2980 		length = sizeof(struct sctp_send_failed);
2981 	}
2982 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2983 	if (m_notify == NULL) {
2984 		/* no space left */
2985 		return;
2986 	}
2987 	SCTP_BUF_LEN(m_notify) = 0;
2988 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2989 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2990 		memset(ssfe, 0, length);
2991 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2992 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2993 		length += sp->length;
2994 		ssfe->ssfe_length = length;
2995 		ssfe->ssfe_error = error;
2996 		/* not exactly what the user sent in, but should be close :) */
2997 		ssfe->ssfe_info.snd_sid = sp->stream;
2998 		if (sp->some_taken) {
2999 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3000 		} else {
3001 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3002 		}
3003 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3004 		ssfe->ssfe_info.snd_context = sp->context;
3005 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3006 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3007 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3008 	} else {
3009 		ssf = mtod(m_notify, struct sctp_send_failed *);
3010 		memset(ssf, 0, length);
3011 		ssf->ssf_type = SCTP_SEND_FAILED;
3012 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3013 		length += sp->length;
3014 		ssf->ssf_length = length;
3015 		ssf->ssf_error = error;
3016 		/* not exactly what the user sent in, but should be close :) */
3017 		ssf->ssf_info.sinfo_stream = sp->stream;
3018 		ssf->ssf_info.sinfo_ssn = 0;
3019 		if (sp->some_taken) {
3020 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3021 		} else {
3022 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3023 		}
3024 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3025 		ssf->ssf_info.sinfo_context = sp->context;
3026 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3027 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3028 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3029 	}
3030 	SCTP_BUF_NEXT(m_notify) = sp->data;
3031 
3032 	/* Steal off the mbuf */
3033 	sp->data = NULL;
3034 	/*
3035 	 * For this case, we check the actual socket buffer, since the assoc
3036 	 * is going away we don't want to overfill the socket buffer for a
3037 	 * non-reader
3038 	 */
3039 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3040 		sctp_m_freem(m_notify);
3041 		return;
3042 	}
3043 	/* append to socket */
3044 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3045 	    0, 0, stcb->asoc.context, 0, 0, 0,
3046 	    m_notify);
3047 	if (control == NULL) {
3048 		/* no memory */
3049 		sctp_m_freem(m_notify);
3050 		return;
3051 	}
3052 	control->spec_flags = M_NOTIFICATION;
3053 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3054 	    control,
3055 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3056 }
3057 
3058 
3059 
3060 static void
3061 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3062 {
3063 	struct mbuf *m_notify;
3064 	struct sctp_adaptation_event *sai;
3065 	struct sctp_queued_to_read *control;
3066 
3067 	if ((stcb == NULL) ||
3068 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3069 		/* event not enabled */
3070 		return;
3071 	}
3072 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3073 	if (m_notify == NULL)
3074 		/* no space left */
3075 		return;
3076 	SCTP_BUF_LEN(m_notify) = 0;
3077 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3078 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3079 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3080 	sai->sai_flags = 0;
3081 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3082 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3083 	sai->sai_assoc_id = sctp_get_associd(stcb);
3084 
3085 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3086 	SCTP_BUF_NEXT(m_notify) = NULL;
3087 
3088 	/* append to socket */
3089 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3090 	    0, 0, stcb->asoc.context, 0, 0, 0,
3091 	    m_notify);
3092 	if (control == NULL) {
3093 		/* no memory */
3094 		sctp_m_freem(m_notify);
3095 		return;
3096 	}
3097 	control->length = SCTP_BUF_LEN(m_notify);
3098 	control->spec_flags = M_NOTIFICATION;
3099 	/* not that we need this */
3100 	control->tail_mbuf = m_notify;
3101 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3102 	    control,
3103 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3104 }
3105 
3106 /* This always must be called with the read-queue LOCKED in the INP */
3107 static void
3108 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3109     uint32_t val, int so_locked
3110 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3111     SCTP_UNUSED
3112 #endif
3113 )
3114 {
3115 	struct mbuf *m_notify;
3116 	struct sctp_pdapi_event *pdapi;
3117 	struct sctp_queued_to_read *control;
3118 	struct sockbuf *sb;
3119 
3120 	if ((stcb == NULL) ||
3121 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3122 		/* event not enabled */
3123 		return;
3124 	}
3125 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3126 		return;
3127 	}
3128 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3129 	if (m_notify == NULL)
3130 		/* no space left */
3131 		return;
3132 	SCTP_BUF_LEN(m_notify) = 0;
3133 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3134 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3135 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3136 	pdapi->pdapi_flags = 0;
3137 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3138 	pdapi->pdapi_indication = error;
3139 	pdapi->pdapi_stream = (val >> 16);
3140 	pdapi->pdapi_seq = (val & 0x0000ffff);
3141 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3142 
3143 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3144 	SCTP_BUF_NEXT(m_notify) = NULL;
3145 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3146 	    0, 0, stcb->asoc.context, 0, 0, 0,
3147 	    m_notify);
3148 	if (control == NULL) {
3149 		/* no memory */
3150 		sctp_m_freem(m_notify);
3151 		return;
3152 	}
3153 	control->spec_flags = M_NOTIFICATION;
3154 	control->length = SCTP_BUF_LEN(m_notify);
3155 	/* not that we need this */
3156 	control->tail_mbuf = m_notify;
3157 	control->held_length = 0;
3158 	control->length = 0;
3159 	sb = &stcb->sctp_socket->so_rcv;
3160 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3161 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3162 	}
3163 	sctp_sballoc(stcb, sb, m_notify);
3164 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3165 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3166 	}
3167 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3168 	control->end_added = 1;
3169 	if (stcb->asoc.control_pdapi)
3170 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3171 	else {
3172 		/* we really should not see this case */
3173 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3174 	}
3175 	if (stcb->sctp_ep && stcb->sctp_socket) {
3176 		/* This should always be the case */
3177 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3178 		struct socket *so;
3179 
3180 		so = SCTP_INP_SO(stcb->sctp_ep);
3181 		if (!so_locked) {
3182 			atomic_add_int(&stcb->asoc.refcnt, 1);
3183 			SCTP_TCB_UNLOCK(stcb);
3184 			SCTP_SOCKET_LOCK(so, 1);
3185 			SCTP_TCB_LOCK(stcb);
3186 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3187 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3188 				SCTP_SOCKET_UNLOCK(so, 1);
3189 				return;
3190 			}
3191 		}
3192 #endif
3193 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3194 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3195 		if (!so_locked) {
3196 			SCTP_SOCKET_UNLOCK(so, 1);
3197 		}
3198 #endif
3199 	}
3200 }
3201 
3202 static void
3203 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3204 {
3205 	struct mbuf *m_notify;
3206 	struct sctp_shutdown_event *sse;
3207 	struct sctp_queued_to_read *control;
3208 
3209 	/*
3210 	 * For TCP model AND UDP connected sockets we will send an error up
3211 	 * when an SHUTDOWN completes
3212 	 */
3213 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3214 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3215 		/* mark socket closed for read/write and wakeup! */
3216 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3217 		struct socket *so;
3218 
3219 		so = SCTP_INP_SO(stcb->sctp_ep);
3220 		atomic_add_int(&stcb->asoc.refcnt, 1);
3221 		SCTP_TCB_UNLOCK(stcb);
3222 		SCTP_SOCKET_LOCK(so, 1);
3223 		SCTP_TCB_LOCK(stcb);
3224 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3225 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3226 			SCTP_SOCKET_UNLOCK(so, 1);
3227 			return;
3228 		}
3229 #endif
3230 		socantsendmore(stcb->sctp_socket);
3231 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3232 		SCTP_SOCKET_UNLOCK(so, 1);
3233 #endif
3234 	}
3235 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3236 		/* event not enabled */
3237 		return;
3238 	}
3239 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3240 	if (m_notify == NULL)
3241 		/* no space left */
3242 		return;
3243 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3244 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3245 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3246 	sse->sse_flags = 0;
3247 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3248 	sse->sse_assoc_id = sctp_get_associd(stcb);
3249 
3250 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3251 	SCTP_BUF_NEXT(m_notify) = NULL;
3252 
3253 	/* append to socket */
3254 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3255 	    0, 0, stcb->asoc.context, 0, 0, 0,
3256 	    m_notify);
3257 	if (control == NULL) {
3258 		/* no memory */
3259 		sctp_m_freem(m_notify);
3260 		return;
3261 	}
3262 	control->spec_flags = M_NOTIFICATION;
3263 	control->length = SCTP_BUF_LEN(m_notify);
3264 	/* not that we need this */
3265 	control->tail_mbuf = m_notify;
3266 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3267 	    control,
3268 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3269 }
3270 
3271 static void
3272 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3273     int so_locked
3274 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3275     SCTP_UNUSED
3276 #endif
3277 )
3278 {
3279 	struct mbuf *m_notify;
3280 	struct sctp_sender_dry_event *event;
3281 	struct sctp_queued_to_read *control;
3282 
3283 	if ((stcb == NULL) ||
3284 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3285 		/* event not enabled */
3286 		return;
3287 	}
3288 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3289 	if (m_notify == NULL) {
3290 		/* no space left */
3291 		return;
3292 	}
3293 	SCTP_BUF_LEN(m_notify) = 0;
3294 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3295 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3296 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3297 	event->sender_dry_flags = 0;
3298 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3299 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3300 
3301 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3302 	SCTP_BUF_NEXT(m_notify) = NULL;
3303 
3304 	/* append to socket */
3305 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3306 	    0, 0, stcb->asoc.context, 0, 0, 0,
3307 	    m_notify);
3308 	if (control == NULL) {
3309 		/* no memory */
3310 		sctp_m_freem(m_notify);
3311 		return;
3312 	}
3313 	control->length = SCTP_BUF_LEN(m_notify);
3314 	control->spec_flags = M_NOTIFICATION;
3315 	/* not that we need this */
3316 	control->tail_mbuf = m_notify;
3317 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3318 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3319 }
3320 
3321 
3322 void
3323 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3324 {
3325 	struct mbuf *m_notify;
3326 	struct sctp_queued_to_read *control;
3327 	struct sctp_stream_change_event *stradd;
3328 
3329 	if ((stcb == NULL) ||
3330 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3331 		/* event not enabled */
3332 		return;
3333 	}
3334 	if ((stcb->asoc.peer_req_out) && flag) {
3335 		/* Peer made the request, don't tell the local user */
3336 		stcb->asoc.peer_req_out = 0;
3337 		return;
3338 	}
3339 	stcb->asoc.peer_req_out = 0;
3340 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3341 	if (m_notify == NULL)
3342 		/* no space left */
3343 		return;
3344 	SCTP_BUF_LEN(m_notify) = 0;
3345 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3346 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3347 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3348 	stradd->strchange_flags = flag;
3349 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3350 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3351 	stradd->strchange_instrms = numberin;
3352 	stradd->strchange_outstrms = numberout;
3353 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3354 	SCTP_BUF_NEXT(m_notify) = NULL;
3355 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3356 		/* no space */
3357 		sctp_m_freem(m_notify);
3358 		return;
3359 	}
3360 	/* append to socket */
3361 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3362 	    0, 0, stcb->asoc.context, 0, 0, 0,
3363 	    m_notify);
3364 	if (control == NULL) {
3365 		/* no memory */
3366 		sctp_m_freem(m_notify);
3367 		return;
3368 	}
3369 	control->spec_flags = M_NOTIFICATION;
3370 	control->length = SCTP_BUF_LEN(m_notify);
3371 	/* not that we need this */
3372 	control->tail_mbuf = m_notify;
3373 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3374 	    control,
3375 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3376 }
3377 
3378 void
3379 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3380 {
3381 	struct mbuf *m_notify;
3382 	struct sctp_queued_to_read *control;
3383 	struct sctp_assoc_reset_event *strasoc;
3384 
3385 	if ((stcb == NULL) ||
3386 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3387 		/* event not enabled */
3388 		return;
3389 	}
3390 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3391 	if (m_notify == NULL)
3392 		/* no space left */
3393 		return;
3394 	SCTP_BUF_LEN(m_notify) = 0;
3395 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3396 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3397 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3398 	strasoc->assocreset_flags = flag;
3399 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3400 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3401 	strasoc->assocreset_local_tsn = sending_tsn;
3402 	strasoc->assocreset_remote_tsn = recv_tsn;
3403 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3404 	SCTP_BUF_NEXT(m_notify) = NULL;
3405 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3406 		/* no space */
3407 		sctp_m_freem(m_notify);
3408 		return;
3409 	}
3410 	/* append to socket */
3411 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3412 	    0, 0, stcb->asoc.context, 0, 0, 0,
3413 	    m_notify);
3414 	if (control == NULL) {
3415 		/* no memory */
3416 		sctp_m_freem(m_notify);
3417 		return;
3418 	}
3419 	control->spec_flags = M_NOTIFICATION;
3420 	control->length = SCTP_BUF_LEN(m_notify);
3421 	/* not that we need this */
3422 	control->tail_mbuf = m_notify;
3423 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3424 	    control,
3425 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3426 }
3427 
3428 
3429 
3430 static void
3431 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3432     int number_entries, uint16_t * list, int flag)
3433 {
3434 	struct mbuf *m_notify;
3435 	struct sctp_queued_to_read *control;
3436 	struct sctp_stream_reset_event *strreset;
3437 	int len;
3438 
3439 	if ((stcb == NULL) ||
3440 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3441 		/* event not enabled */
3442 		return;
3443 	}
3444 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3445 	if (m_notify == NULL)
3446 		/* no space left */
3447 		return;
3448 	SCTP_BUF_LEN(m_notify) = 0;
3449 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3450 	if (len > M_TRAILINGSPACE(m_notify)) {
3451 		/* never enough room */
3452 		sctp_m_freem(m_notify);
3453 		return;
3454 	}
3455 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3456 	memset(strreset, 0, len);
3457 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3458 	strreset->strreset_flags = flag;
3459 	strreset->strreset_length = len;
3460 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3461 	if (number_entries) {
3462 		int i;
3463 
3464 		for (i = 0; i < number_entries; i++) {
3465 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3466 		}
3467 	}
3468 	SCTP_BUF_LEN(m_notify) = len;
3469 	SCTP_BUF_NEXT(m_notify) = NULL;
3470 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3471 		/* no space */
3472 		sctp_m_freem(m_notify);
3473 		return;
3474 	}
3475 	/* append to socket */
3476 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3477 	    0, 0, stcb->asoc.context, 0, 0, 0,
3478 	    m_notify);
3479 	if (control == NULL) {
3480 		/* no memory */
3481 		sctp_m_freem(m_notify);
3482 		return;
3483 	}
3484 	control->spec_flags = M_NOTIFICATION;
3485 	control->length = SCTP_BUF_LEN(m_notify);
3486 	/* not that we need this */
3487 	control->tail_mbuf = m_notify;
3488 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3489 	    control,
3490 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3491 }
3492 
3493 
3494 static void
3495 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3496 {
3497 	struct mbuf *m_notify;
3498 	struct sctp_remote_error *sre;
3499 	struct sctp_queued_to_read *control;
3500 	size_t notif_len, chunk_len;
3501 
3502 	if ((stcb == NULL) ||
3503 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3504 		return;
3505 	}
3506 	if (chunk != NULL) {
3507 		chunk_len = ntohs(chunk->ch.chunk_length);
3508 	} else {
3509 		chunk_len = 0;
3510 	}
3511 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3512 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3513 	if (m_notify == NULL) {
3514 		/* Retry with smaller value. */
3515 		notif_len = sizeof(struct sctp_remote_error);
3516 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3517 		if (m_notify == NULL) {
3518 			return;
3519 		}
3520 	}
3521 	SCTP_BUF_NEXT(m_notify) = NULL;
3522 	sre = mtod(m_notify, struct sctp_remote_error *);
3523 	memset(sre, 0, notif_len);
3524 	sre->sre_type = SCTP_REMOTE_ERROR;
3525 	sre->sre_flags = 0;
3526 	sre->sre_length = sizeof(struct sctp_remote_error);
3527 	sre->sre_error = error;
3528 	sre->sre_assoc_id = sctp_get_associd(stcb);
3529 	if (notif_len > sizeof(struct sctp_remote_error)) {
3530 		memcpy(sre->sre_data, chunk, chunk_len);
3531 		sre->sre_length += chunk_len;
3532 	}
3533 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3534 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3535 	    0, 0, stcb->asoc.context, 0, 0, 0,
3536 	    m_notify);
3537 	if (control != NULL) {
3538 		control->length = SCTP_BUF_LEN(m_notify);
3539 		/* not that we need this */
3540 		control->tail_mbuf = m_notify;
3541 		control->spec_flags = M_NOTIFICATION;
3542 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3543 		    control,
3544 		    &stcb->sctp_socket->so_rcv, 1,
3545 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3546 	} else {
3547 		sctp_m_freem(m_notify);
3548 	}
3549 }
3550 
3551 
3552 void
3553 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3554     uint32_t error, void *data, int so_locked
3555 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3556     SCTP_UNUSED
3557 #endif
3558 )
3559 {
3560 	if ((stcb == NULL) ||
3561 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3562 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3563 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3564 		/* If the socket is gone we are out of here */
3565 		return;
3566 	}
3567 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3568 		return;
3569 	}
3570 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3571 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3572 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3573 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3574 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3575 			/* Don't report these in front states */
3576 			return;
3577 		}
3578 	}
3579 	switch (notification) {
3580 	case SCTP_NOTIFY_ASSOC_UP:
3581 		if (stcb->asoc.assoc_up_sent == 0) {
3582 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3583 			stcb->asoc.assoc_up_sent = 1;
3584 		}
3585 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3586 			sctp_notify_adaptation_layer(stcb);
3587 		}
3588 		if (stcb->asoc.auth_supported == 0) {
3589 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3590 			    NULL, so_locked);
3591 		}
3592 		break;
3593 	case SCTP_NOTIFY_ASSOC_DOWN:
3594 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3595 		break;
3596 	case SCTP_NOTIFY_INTERFACE_DOWN:
3597 		{
3598 			struct sctp_nets *net;
3599 
3600 			net = (struct sctp_nets *)data;
3601 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3602 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3603 			break;
3604 		}
3605 	case SCTP_NOTIFY_INTERFACE_UP:
3606 		{
3607 			struct sctp_nets *net;
3608 
3609 			net = (struct sctp_nets *)data;
3610 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3611 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3612 			break;
3613 		}
3614 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3615 		{
3616 			struct sctp_nets *net;
3617 
3618 			net = (struct sctp_nets *)data;
3619 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3620 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3621 			break;
3622 		}
3623 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3624 		sctp_notify_send_failed2(stcb, error,
3625 		    (struct sctp_stream_queue_pending *)data, so_locked);
3626 		break;
3627 	case SCTP_NOTIFY_SENT_DG_FAIL:
3628 		sctp_notify_send_failed(stcb, 1, error,
3629 		    (struct sctp_tmit_chunk *)data, so_locked);
3630 		break;
3631 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3632 		sctp_notify_send_failed(stcb, 0, error,
3633 		    (struct sctp_tmit_chunk *)data, so_locked);
3634 		break;
3635 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3636 		{
3637 			uint32_t val;
3638 
3639 			val = *((uint32_t *) data);
3640 
3641 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3642 			break;
3643 		}
3644 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3645 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3646 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3647 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3648 		} else {
3649 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3650 		}
3651 		break;
3652 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3653 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3654 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3655 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3656 		} else {
3657 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3658 		}
3659 		break;
3660 	case SCTP_NOTIFY_ASSOC_RESTART:
3661 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3662 		if (stcb->asoc.auth_supported == 0) {
3663 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3664 			    NULL, so_locked);
3665 		}
3666 		break;
3667 	case SCTP_NOTIFY_STR_RESET_SEND:
3668 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3669 		break;
3670 	case SCTP_NOTIFY_STR_RESET_RECV:
3671 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3672 		break;
3673 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3674 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3675 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3676 		break;
3677 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3678 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3679 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3680 		break;
3681 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3682 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3683 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3684 		break;
3685 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3686 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3687 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3688 		break;
3689 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3690 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3691 		    error, so_locked);
3692 		break;
3693 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3694 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3695 		    error, so_locked);
3696 		break;
3697 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3698 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3699 		    error, so_locked);
3700 		break;
3701 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3702 		sctp_notify_shutdown_event(stcb);
3703 		break;
3704 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3705 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3706 		    (uint16_t) (uintptr_t) data,
3707 		    so_locked);
3708 		break;
3709 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3710 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3711 		    (uint16_t) (uintptr_t) data,
3712 		    so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_NO_PEER_AUTH:
3715 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3716 		    (uint16_t) (uintptr_t) data,
3717 		    so_locked);
3718 		break;
3719 	case SCTP_NOTIFY_SENDER_DRY:
3720 		sctp_notify_sender_dry_event(stcb, so_locked);
3721 		break;
3722 	case SCTP_NOTIFY_REMOTE_ERROR:
3723 		sctp_notify_remote_error(stcb, error, data);
3724 		break;
3725 	default:
3726 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3727 		    __FUNCTION__, notification, notification);
3728 		break;
3729 	}			/* end switch */
3730 }
3731 
3732 void
3733 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3734 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3735     SCTP_UNUSED
3736 #endif
3737 )
3738 {
3739 	struct sctp_association *asoc;
3740 	struct sctp_stream_out *outs;
3741 	struct sctp_tmit_chunk *chk, *nchk;
3742 	struct sctp_stream_queue_pending *sp, *nsp;
3743 	int i;
3744 
3745 	if (stcb == NULL) {
3746 		return;
3747 	}
3748 	asoc = &stcb->asoc;
3749 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3750 		/* already being freed */
3751 		return;
3752 	}
3753 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3754 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3755 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3756 		return;
3757 	}
3758 	/* now through all the gunk freeing chunks */
3759 	if (holds_lock == 0) {
3760 		SCTP_TCB_SEND_LOCK(stcb);
3761 	}
3762 	/* sent queue SHOULD be empty */
3763 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3764 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3765 		asoc->sent_queue_cnt--;
3766 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3767 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3768 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3769 #ifdef INVARIANTS
3770 			} else {
3771 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3772 #endif
3773 			}
3774 		}
3775 		if (chk->data != NULL) {
3776 			sctp_free_bufspace(stcb, asoc, chk, 1);
3777 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3778 			    error, chk, so_locked);
3779 			if (chk->data) {
3780 				sctp_m_freem(chk->data);
3781 				chk->data = NULL;
3782 			}
3783 		}
3784 		sctp_free_a_chunk(stcb, chk, so_locked);
3785 		/* sa_ignore FREED_MEMORY */
3786 	}
3787 	/* pending send queue SHOULD be empty */
3788 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3789 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3790 		asoc->send_queue_cnt--;
3791 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3792 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3793 #ifdef INVARIANTS
3794 		} else {
3795 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3796 #endif
3797 		}
3798 		if (chk->data != NULL) {
3799 			sctp_free_bufspace(stcb, asoc, chk, 1);
3800 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3801 			    error, chk, so_locked);
3802 			if (chk->data) {
3803 				sctp_m_freem(chk->data);
3804 				chk->data = NULL;
3805 			}
3806 		}
3807 		sctp_free_a_chunk(stcb, chk, so_locked);
3808 		/* sa_ignore FREED_MEMORY */
3809 	}
3810 	for (i = 0; i < asoc->streamoutcnt; i++) {
3811 		/* For each stream */
3812 		outs = &asoc->strmout[i];
3813 		/* clean up any sends there */
3814 		asoc->locked_on_sending = NULL;
3815 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3816 			asoc->stream_queue_cnt--;
3817 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3818 			sctp_free_spbufspace(stcb, asoc, sp);
3819 			if (sp->data) {
3820 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3821 				    error, (void *)sp, so_locked);
3822 				if (sp->data) {
3823 					sctp_m_freem(sp->data);
3824 					sp->data = NULL;
3825 					sp->tail_mbuf = NULL;
3826 					sp->length = 0;
3827 				}
3828 			}
3829 			if (sp->net) {
3830 				sctp_free_remote_addr(sp->net);
3831 				sp->net = NULL;
3832 			}
3833 			/* Free the chunk */
3834 			sctp_free_a_strmoq(stcb, sp, so_locked);
3835 			/* sa_ignore FREED_MEMORY */
3836 		}
3837 	}
3838 
3839 	if (holds_lock == 0) {
3840 		SCTP_TCB_SEND_UNLOCK(stcb);
3841 	}
3842 }
3843 
3844 void
3845 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3846     struct sctp_abort_chunk *abort, int so_locked
3847 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3848     SCTP_UNUSED
3849 #endif
3850 )
3851 {
3852 	if (stcb == NULL) {
3853 		return;
3854 	}
3855 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3856 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3857 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3858 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3859 	}
3860 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3861 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3862 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3863 		return;
3864 	}
3865 	/* Tell them we lost the asoc */
3866 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3867 	if (from_peer) {
3868 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3869 	} else {
3870 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3871 	}
3872 }
3873 
3874 void
3875 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3876     struct mbuf *m, int iphlen,
3877     struct sockaddr *src, struct sockaddr *dst,
3878     struct sctphdr *sh, struct mbuf *op_err,
3879     uint8_t mflowtype, uint32_t mflowid,
3880     uint32_t vrf_id, uint16_t port)
3881 {
3882 	uint32_t vtag;
3883 
3884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3885 	struct socket *so;
3886 
3887 #endif
3888 
3889 	vtag = 0;
3890 	if (stcb != NULL) {
3891 		/* We have a TCB to abort, send notification too */
3892 		vtag = stcb->asoc.peer_vtag;
3893 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3894 		/* get the assoc vrf id and table id */
3895 		vrf_id = stcb->asoc.vrf_id;
3896 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3897 	}
3898 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3899 	    mflowtype, mflowid, inp->fibnum,
3900 	    vrf_id, port);
3901 	if (stcb != NULL) {
3902 		/* Ok, now lets free it */
3903 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3904 		so = SCTP_INP_SO(inp);
3905 		atomic_add_int(&stcb->asoc.refcnt, 1);
3906 		SCTP_TCB_UNLOCK(stcb);
3907 		SCTP_SOCKET_LOCK(so, 1);
3908 		SCTP_TCB_LOCK(stcb);
3909 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3910 #endif
3911 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3912 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3913 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3914 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3915 		}
3916 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3917 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3918 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3919 		SCTP_SOCKET_UNLOCK(so, 1);
3920 #endif
3921 	}
3922 }
3923 
3924 #ifdef SCTP_ASOCLOG_OF_TSNS
3925 void
3926 sctp_print_out_track_log(struct sctp_tcb *stcb)
3927 {
3928 #ifdef NOSIY_PRINTS
3929 	int i;
3930 
3931 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3932 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3933 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3934 		SCTP_PRINTF("None rcvd\n");
3935 		goto none_in;
3936 	}
3937 	if (stcb->asoc.tsn_in_wrapped) {
3938 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3939 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3940 			    stcb->asoc.in_tsnlog[i].tsn,
3941 			    stcb->asoc.in_tsnlog[i].strm,
3942 			    stcb->asoc.in_tsnlog[i].seq,
3943 			    stcb->asoc.in_tsnlog[i].flgs,
3944 			    stcb->asoc.in_tsnlog[i].sz);
3945 		}
3946 	}
3947 	if (stcb->asoc.tsn_in_at) {
3948 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3949 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3950 			    stcb->asoc.in_tsnlog[i].tsn,
3951 			    stcb->asoc.in_tsnlog[i].strm,
3952 			    stcb->asoc.in_tsnlog[i].seq,
3953 			    stcb->asoc.in_tsnlog[i].flgs,
3954 			    stcb->asoc.in_tsnlog[i].sz);
3955 		}
3956 	}
3957 none_in:
3958 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3959 	if ((stcb->asoc.tsn_out_at == 0) &&
3960 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3961 		SCTP_PRINTF("None sent\n");
3962 	}
3963 	if (stcb->asoc.tsn_out_wrapped) {
3964 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3965 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3966 			    stcb->asoc.out_tsnlog[i].tsn,
3967 			    stcb->asoc.out_tsnlog[i].strm,
3968 			    stcb->asoc.out_tsnlog[i].seq,
3969 			    stcb->asoc.out_tsnlog[i].flgs,
3970 			    stcb->asoc.out_tsnlog[i].sz);
3971 		}
3972 	}
3973 	if (stcb->asoc.tsn_out_at) {
3974 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3975 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3976 			    stcb->asoc.out_tsnlog[i].tsn,
3977 			    stcb->asoc.out_tsnlog[i].strm,
3978 			    stcb->asoc.out_tsnlog[i].seq,
3979 			    stcb->asoc.out_tsnlog[i].flgs,
3980 			    stcb->asoc.out_tsnlog[i].sz);
3981 		}
3982 	}
3983 #endif
3984 }
3985 
3986 #endif
3987 
3988 void
3989 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3990     struct mbuf *op_err,
3991     int so_locked
3992 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3993     SCTP_UNUSED
3994 #endif
3995 )
3996 {
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998 	struct socket *so;
3999 
4000 #endif
4001 
4002 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4003 	so = SCTP_INP_SO(inp);
4004 #endif
4005 	if (stcb == NULL) {
4006 		/* Got to have a TCB */
4007 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4008 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4009 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4010 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4011 			}
4012 		}
4013 		return;
4014 	} else {
4015 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4016 	}
4017 	/* notify the ulp */
4018 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4019 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4020 	}
4021 	/* notify the peer */
4022 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4023 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4024 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4025 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4026 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4027 	}
4028 	/* now free the asoc */
4029 #ifdef SCTP_ASOCLOG_OF_TSNS
4030 	sctp_print_out_track_log(stcb);
4031 #endif
4032 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4033 	if (!so_locked) {
4034 		atomic_add_int(&stcb->asoc.refcnt, 1);
4035 		SCTP_TCB_UNLOCK(stcb);
4036 		SCTP_SOCKET_LOCK(so, 1);
4037 		SCTP_TCB_LOCK(stcb);
4038 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4039 	}
4040 #endif
4041 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4042 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4043 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4044 	if (!so_locked) {
4045 		SCTP_SOCKET_UNLOCK(so, 1);
4046 	}
4047 #endif
4048 }
4049 
4050 void
4051 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4052     struct sockaddr *src, struct sockaddr *dst,
4053     struct sctphdr *sh, struct sctp_inpcb *inp,
4054     struct mbuf *cause,
4055     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4056     uint32_t vrf_id, uint16_t port)
4057 {
4058 	struct sctp_chunkhdr *ch, chunk_buf;
4059 	unsigned int chk_length;
4060 	int contains_init_chunk;
4061 
4062 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4063 	/* Generate a TO address for future reference */
4064 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4065 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4066 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4067 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4068 		}
4069 	}
4070 	contains_init_chunk = 0;
4071 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4072 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4073 	while (ch != NULL) {
4074 		chk_length = ntohs(ch->chunk_length);
4075 		if (chk_length < sizeof(*ch)) {
4076 			/* break to abort land */
4077 			break;
4078 		}
4079 		switch (ch->chunk_type) {
4080 		case SCTP_INIT:
4081 			contains_init_chunk = 1;
4082 			break;
4083 		case SCTP_PACKET_DROPPED:
4084 			/* we don't respond to pkt-dropped */
4085 			return;
4086 		case SCTP_ABORT_ASSOCIATION:
4087 			/* we don't respond with an ABORT to an ABORT */
4088 			return;
4089 		case SCTP_SHUTDOWN_COMPLETE:
4090 			/*
4091 			 * we ignore it since we are not waiting for it and
4092 			 * peer is gone
4093 			 */
4094 			return;
4095 		case SCTP_SHUTDOWN_ACK:
4096 			sctp_send_shutdown_complete2(src, dst, sh,
4097 			    mflowtype, mflowid, fibnum,
4098 			    vrf_id, port);
4099 			return;
4100 		default:
4101 			break;
4102 		}
4103 		offset += SCTP_SIZE32(chk_length);
4104 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4105 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4106 	}
4107 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4108 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4109 	    (contains_init_chunk == 0))) {
4110 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4111 		    mflowtype, mflowid, fibnum,
4112 		    vrf_id, port);
4113 	}
4114 }
4115 
4116 /*
4117  * check the inbound datagram to make sure there is not an abort inside it,
4118  * if there is return 1, else return 0.
4119  */
4120 int
4121 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4122 {
4123 	struct sctp_chunkhdr *ch;
4124 	struct sctp_init_chunk *init_chk, chunk_buf;
4125 	int offset;
4126 	unsigned int chk_length;
4127 
4128 	offset = iphlen + sizeof(struct sctphdr);
4129 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4130 	    (uint8_t *) & chunk_buf);
4131 	while (ch != NULL) {
4132 		chk_length = ntohs(ch->chunk_length);
4133 		if (chk_length < sizeof(*ch)) {
4134 			/* packet is probably corrupt */
4135 			break;
4136 		}
4137 		/* we seem to be ok, is it an abort? */
4138 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4139 			/* yep, tell them */
4140 			return (1);
4141 		}
4142 		if (ch->chunk_type == SCTP_INITIATION) {
4143 			/* need to update the Vtag */
4144 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4145 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4146 			if (init_chk != NULL) {
4147 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4148 			}
4149 		}
4150 		/* Nope, move to the next chunk */
4151 		offset += SCTP_SIZE32(chk_length);
4152 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4153 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4154 	}
4155 	return (0);
4156 }
4157 
4158 /*
4159  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4160  * set (i.e. it's 0) so, create this function to compare link local scopes
4161  */
4162 #ifdef INET6
4163 uint32_t
4164 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4165 {
4166 	struct sockaddr_in6 a, b;
4167 
4168 	/* save copies */
4169 	a = *addr1;
4170 	b = *addr2;
4171 
4172 	if (a.sin6_scope_id == 0)
4173 		if (sa6_recoverscope(&a)) {
4174 			/* can't get scope, so can't match */
4175 			return (0);
4176 		}
4177 	if (b.sin6_scope_id == 0)
4178 		if (sa6_recoverscope(&b)) {
4179 			/* can't get scope, so can't match */
4180 			return (0);
4181 		}
4182 	if (a.sin6_scope_id != b.sin6_scope_id)
4183 		return (0);
4184 
4185 	return (1);
4186 }
4187 
4188 /*
4189  * returns a sockaddr_in6 with embedded scope recovered and removed
4190  */
4191 struct sockaddr_in6 *
4192 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4193 {
4194 	/* check and strip embedded scope junk */
4195 	if (addr->sin6_family == AF_INET6) {
4196 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4197 			if (addr->sin6_scope_id == 0) {
4198 				*store = *addr;
4199 				if (!sa6_recoverscope(store)) {
4200 					/* use the recovered scope */
4201 					addr = store;
4202 				}
4203 			} else {
4204 				/* else, return the original "to" addr */
4205 				in6_clearscope(&addr->sin6_addr);
4206 			}
4207 		}
4208 	}
4209 	return (addr);
4210 }
4211 
4212 #endif
4213 
4214 /*
4215  * are the two addresses the same?  currently a "scopeless" check returns: 1
4216  * if same, 0 if not
4217  */
4218 int
4219 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4220 {
4221 
4222 	/* must be valid */
4223 	if (sa1 == NULL || sa2 == NULL)
4224 		return (0);
4225 
4226 	/* must be the same family */
4227 	if (sa1->sa_family != sa2->sa_family)
4228 		return (0);
4229 
4230 	switch (sa1->sa_family) {
4231 #ifdef INET6
4232 	case AF_INET6:
4233 		{
4234 			/* IPv6 addresses */
4235 			struct sockaddr_in6 *sin6_1, *sin6_2;
4236 
4237 			sin6_1 = (struct sockaddr_in6 *)sa1;
4238 			sin6_2 = (struct sockaddr_in6 *)sa2;
4239 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4240 			    sin6_2));
4241 		}
4242 #endif
4243 #ifdef INET
4244 	case AF_INET:
4245 		{
4246 			/* IPv4 addresses */
4247 			struct sockaddr_in *sin_1, *sin_2;
4248 
4249 			sin_1 = (struct sockaddr_in *)sa1;
4250 			sin_2 = (struct sockaddr_in *)sa2;
4251 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4252 		}
4253 #endif
4254 	default:
4255 		/* we don't do these... */
4256 		return (0);
4257 	}
4258 }
4259 
4260 void
4261 sctp_print_address(struct sockaddr *sa)
4262 {
4263 #ifdef INET6
4264 	char ip6buf[INET6_ADDRSTRLEN];
4265 
4266 #endif
4267 
4268 	switch (sa->sa_family) {
4269 #ifdef INET6
4270 	case AF_INET6:
4271 		{
4272 			struct sockaddr_in6 *sin6;
4273 
4274 			sin6 = (struct sockaddr_in6 *)sa;
4275 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4276 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4277 			    ntohs(sin6->sin6_port),
4278 			    sin6->sin6_scope_id);
4279 			break;
4280 		}
4281 #endif
4282 #ifdef INET
4283 	case AF_INET:
4284 		{
4285 			struct sockaddr_in *sin;
4286 			unsigned char *p;
4287 
4288 			sin = (struct sockaddr_in *)sa;
4289 			p = (unsigned char *)&sin->sin_addr;
4290 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4291 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4292 			break;
4293 		}
4294 #endif
4295 	default:
4296 		SCTP_PRINTF("?\n");
4297 		break;
4298 	}
4299 }
4300 
4301 void
4302 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4303     struct sctp_inpcb *new_inp,
4304     struct sctp_tcb *stcb,
4305     int waitflags)
4306 {
4307 	/*
4308 	 * go through our old INP and pull off any control structures that
4309 	 * belong to stcb and move then to the new inp.
4310 	 */
4311 	struct socket *old_so, *new_so;
4312 	struct sctp_queued_to_read *control, *nctl;
4313 	struct sctp_readhead tmp_queue;
4314 	struct mbuf *m;
4315 	int error = 0;
4316 
4317 	old_so = old_inp->sctp_socket;
4318 	new_so = new_inp->sctp_socket;
4319 	TAILQ_INIT(&tmp_queue);
4320 	error = sblock(&old_so->so_rcv, waitflags);
4321 	if (error) {
4322 		/*
4323 		 * Gak, can't get sblock, we have a problem. data will be
4324 		 * left stranded.. and we don't dare look at it since the
4325 		 * other thread may be reading something. Oh well, its a
4326 		 * screwed up app that does a peeloff OR a accept while
4327 		 * reading from the main socket... actually its only the
4328 		 * peeloff() case, since I think read will fail on a
4329 		 * listening socket..
4330 		 */
4331 		return;
4332 	}
4333 	/* lock the socket buffers */
4334 	SCTP_INP_READ_LOCK(old_inp);
4335 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4336 		/* Pull off all for out target stcb */
4337 		if (control->stcb == stcb) {
4338 			/* remove it we want it */
4339 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4340 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4341 			m = control->data;
4342 			while (m) {
4343 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4344 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4345 				}
4346 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4347 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4348 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4349 				}
4350 				m = SCTP_BUF_NEXT(m);
4351 			}
4352 		}
4353 	}
4354 	SCTP_INP_READ_UNLOCK(old_inp);
4355 	/* Remove the sb-lock on the old socket */
4356 
4357 	sbunlock(&old_so->so_rcv);
4358 	/* Now we move them over to the new socket buffer */
4359 	SCTP_INP_READ_LOCK(new_inp);
4360 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4361 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4362 		m = control->data;
4363 		while (m) {
4364 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4365 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4366 			}
4367 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4368 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4369 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4370 			}
4371 			m = SCTP_BUF_NEXT(m);
4372 		}
4373 	}
4374 	SCTP_INP_READ_UNLOCK(new_inp);
4375 }
4376 
4377 void
4378 sctp_add_to_readq(struct sctp_inpcb *inp,
4379     struct sctp_tcb *stcb,
4380     struct sctp_queued_to_read *control,
4381     struct sockbuf *sb,
4382     int end,
4383     int inp_read_lock_held,
4384     int so_locked
4385 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4386     SCTP_UNUSED
4387 #endif
4388 )
4389 {
4390 	/*
4391 	 * Here we must place the control on the end of the socket read
4392 	 * queue AND increment sb_cc so that select will work properly on
4393 	 * read.
4394 	 */
4395 	struct mbuf *m, *prev = NULL;
4396 
4397 	if (inp == NULL) {
4398 		/* Gak, TSNH!! */
4399 #ifdef INVARIANTS
4400 		panic("Gak, inp NULL on add_to_readq");
4401 #endif
4402 		return;
4403 	}
4404 	if (inp_read_lock_held == 0)
4405 		SCTP_INP_READ_LOCK(inp);
4406 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4407 		sctp_free_remote_addr(control->whoFrom);
4408 		if (control->data) {
4409 			sctp_m_freem(control->data);
4410 			control->data = NULL;
4411 		}
4412 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4413 		if (inp_read_lock_held == 0)
4414 			SCTP_INP_READ_UNLOCK(inp);
4415 		return;
4416 	}
4417 	if (!(control->spec_flags & M_NOTIFICATION)) {
4418 		atomic_add_int(&inp->total_recvs, 1);
4419 		if (!control->do_not_ref_stcb) {
4420 			atomic_add_int(&stcb->total_recvs, 1);
4421 		}
4422 	}
4423 	m = control->data;
4424 	control->held_length = 0;
4425 	control->length = 0;
4426 	while (m) {
4427 		if (SCTP_BUF_LEN(m) == 0) {
4428 			/* Skip mbufs with NO length */
4429 			if (prev == NULL) {
4430 				/* First one */
4431 				control->data = sctp_m_free(m);
4432 				m = control->data;
4433 			} else {
4434 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4435 				m = SCTP_BUF_NEXT(prev);
4436 			}
4437 			if (m == NULL) {
4438 				control->tail_mbuf = prev;
4439 			}
4440 			continue;
4441 		}
4442 		prev = m;
4443 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4444 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4445 		}
4446 		sctp_sballoc(stcb, sb, m);
4447 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4448 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4449 		}
4450 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4451 		m = SCTP_BUF_NEXT(m);
4452 	}
4453 	if (prev != NULL) {
4454 		control->tail_mbuf = prev;
4455 	} else {
4456 		/* Everything got collapsed out?? */
4457 		sctp_free_remote_addr(control->whoFrom);
4458 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4459 		if (inp_read_lock_held == 0)
4460 			SCTP_INP_READ_UNLOCK(inp);
4461 		return;
4462 	}
4463 	if (end) {
4464 		control->end_added = 1;
4465 	}
4466 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4467 	if (inp_read_lock_held == 0)
4468 		SCTP_INP_READ_UNLOCK(inp);
4469 	if (inp && inp->sctp_socket) {
4470 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4471 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4472 		} else {
4473 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4474 			struct socket *so;
4475 
4476 			so = SCTP_INP_SO(inp);
4477 			if (!so_locked) {
4478 				if (stcb) {
4479 					atomic_add_int(&stcb->asoc.refcnt, 1);
4480 					SCTP_TCB_UNLOCK(stcb);
4481 				}
4482 				SCTP_SOCKET_LOCK(so, 1);
4483 				if (stcb) {
4484 					SCTP_TCB_LOCK(stcb);
4485 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4486 				}
4487 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4488 					SCTP_SOCKET_UNLOCK(so, 1);
4489 					return;
4490 				}
4491 			}
4492 #endif
4493 			sctp_sorwakeup(inp, inp->sctp_socket);
4494 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4495 			if (!so_locked) {
4496 				SCTP_SOCKET_UNLOCK(so, 1);
4497 			}
4498 #endif
4499 		}
4500 	}
4501 }
4502 
4503 
4504 int
4505 sctp_append_to_readq(struct sctp_inpcb *inp,
4506     struct sctp_tcb *stcb,
4507     struct sctp_queued_to_read *control,
4508     struct mbuf *m,
4509     int end,
4510     int ctls_cumack,
4511     struct sockbuf *sb)
4512 {
4513 	/*
4514 	 * A partial delivery API event is underway. OR we are appending on
4515 	 * the reassembly queue.
4516 	 *
4517 	 * If PDAPI this means we need to add m to the end of the data.
4518 	 * Increase the length in the control AND increment the sb_cc.
4519 	 * Otherwise sb is NULL and all we need to do is put it at the end
4520 	 * of the mbuf chain.
4521 	 */
4522 	int len = 0;
4523 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4524 
4525 	if (inp) {
4526 		SCTP_INP_READ_LOCK(inp);
4527 	}
4528 	if (control == NULL) {
4529 get_out:
4530 		if (inp) {
4531 			SCTP_INP_READ_UNLOCK(inp);
4532 		}
4533 		return (-1);
4534 	}
4535 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4536 		SCTP_INP_READ_UNLOCK(inp);
4537 		return (0);
4538 	}
4539 	if (control->end_added) {
4540 		/* huh this one is complete? */
4541 		goto get_out;
4542 	}
4543 	mm = m;
4544 	if (mm == NULL) {
4545 		goto get_out;
4546 	}
4547 	while (mm) {
4548 		if (SCTP_BUF_LEN(mm) == 0) {
4549 			/* Skip mbufs with NO lenght */
4550 			if (prev == NULL) {
4551 				/* First one */
4552 				m = sctp_m_free(mm);
4553 				mm = m;
4554 			} else {
4555 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4556 				mm = SCTP_BUF_NEXT(prev);
4557 			}
4558 			continue;
4559 		}
4560 		prev = mm;
4561 		len += SCTP_BUF_LEN(mm);
4562 		if (sb) {
4563 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4564 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4565 			}
4566 			sctp_sballoc(stcb, sb, mm);
4567 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4568 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4569 			}
4570 		}
4571 		mm = SCTP_BUF_NEXT(mm);
4572 	}
4573 	if (prev) {
4574 		tail = prev;
4575 	} else {
4576 		/* Really there should always be a prev */
4577 		if (m == NULL) {
4578 			/* Huh nothing left? */
4579 #ifdef INVARIANTS
4580 			panic("Nothing left to add?");
4581 #else
4582 			goto get_out;
4583 #endif
4584 		}
4585 		tail = m;
4586 	}
4587 	if (control->tail_mbuf) {
4588 		/* append */
4589 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4590 		control->tail_mbuf = tail;
4591 	} else {
4592 		/* nothing there */
4593 #ifdef INVARIANTS
4594 		if (control->data != NULL) {
4595 			panic("This should NOT happen");
4596 		}
4597 #endif
4598 		control->data = m;
4599 		control->tail_mbuf = tail;
4600 	}
4601 	atomic_add_int(&control->length, len);
4602 	if (end) {
4603 		/* message is complete */
4604 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4605 			stcb->asoc.control_pdapi = NULL;
4606 		}
4607 		control->held_length = 0;
4608 		control->end_added = 1;
4609 	}
4610 	if (stcb == NULL) {
4611 		control->do_not_ref_stcb = 1;
4612 	}
4613 	/*
4614 	 * When we are appending in partial delivery, the cum-ack is used
4615 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4616 	 * is populated in the outbound sinfo structure from the true cumack
4617 	 * if the association exists...
4618 	 */
4619 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4620 	if (inp) {
4621 		SCTP_INP_READ_UNLOCK(inp);
4622 	}
4623 	if (inp && inp->sctp_socket) {
4624 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4625 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4626 		} else {
4627 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4628 			struct socket *so;
4629 
4630 			so = SCTP_INP_SO(inp);
4631 			if (stcb) {
4632 				atomic_add_int(&stcb->asoc.refcnt, 1);
4633 				SCTP_TCB_UNLOCK(stcb);
4634 			}
4635 			SCTP_SOCKET_LOCK(so, 1);
4636 			if (stcb) {
4637 				SCTP_TCB_LOCK(stcb);
4638 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4639 			}
4640 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4641 				SCTP_SOCKET_UNLOCK(so, 1);
4642 				return (0);
4643 			}
4644 #endif
4645 			sctp_sorwakeup(inp, inp->sctp_socket);
4646 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4647 			SCTP_SOCKET_UNLOCK(so, 1);
4648 #endif
4649 		}
4650 	}
4651 	return (0);
4652 }
4653 
4654 
4655 
4656 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4657  *************ALTERNATE ROUTING CODE
4658  */
4659 
4660 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4661  *************ALTERNATE ROUTING CODE
4662  */
4663 
4664 struct mbuf *
4665 sctp_generate_cause(uint16_t code, char *info)
4666 {
4667 	struct mbuf *m;
4668 	struct sctp_gen_error_cause *cause;
4669 	size_t info_len, len;
4670 
4671 	if ((code == 0) || (info == NULL)) {
4672 		return (NULL);
4673 	}
4674 	info_len = strlen(info);
4675 	len = sizeof(struct sctp_paramhdr) + info_len;
4676 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4677 	if (m != NULL) {
4678 		SCTP_BUF_LEN(m) = len;
4679 		cause = mtod(m, struct sctp_gen_error_cause *);
4680 		cause->code = htons(code);
4681 		cause->length = htons((uint16_t) len);
4682 		memcpy(cause->info, info, info_len);
4683 	}
4684 	return (m);
4685 }
4686 
4687 struct mbuf *
4688 sctp_generate_no_user_data_cause(uint32_t tsn)
4689 {
4690 	struct mbuf *m;
4691 	struct sctp_error_no_user_data *no_user_data_cause;
4692 	size_t len;
4693 
4694 	len = sizeof(struct sctp_error_no_user_data);
4695 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4696 	if (m != NULL) {
4697 		SCTP_BUF_LEN(m) = len;
4698 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4699 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4700 		no_user_data_cause->cause.length = htons((uint16_t) len);
4701 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4702 	}
4703 	return (m);
4704 }
4705 
4706 #ifdef SCTP_MBCNT_LOGGING
4707 void
4708 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4709     struct sctp_tmit_chunk *tp1, int chk_cnt)
4710 {
4711 	if (tp1->data == NULL) {
4712 		return;
4713 	}
4714 	asoc->chunks_on_out_queue -= chk_cnt;
4715 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4716 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4717 		    asoc->total_output_queue_size,
4718 		    tp1->book_size,
4719 		    0,
4720 		    tp1->mbcnt);
4721 	}
4722 	if (asoc->total_output_queue_size >= tp1->book_size) {
4723 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4724 	} else {
4725 		asoc->total_output_queue_size = 0;
4726 	}
4727 
4728 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4729 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4730 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4731 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4732 		} else {
4733 			stcb->sctp_socket->so_snd.sb_cc = 0;
4734 
4735 		}
4736 	}
4737 }
4738 
4739 #endif
4740 
4741 int
4742 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4743     uint8_t sent, int so_locked
4744 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4745     SCTP_UNUSED
4746 #endif
4747 )
4748 {
4749 	struct sctp_stream_out *strq;
4750 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4751 	struct sctp_stream_queue_pending *sp;
4752 	uint16_t stream = 0, seq = 0;
4753 	uint8_t foundeom = 0;
4754 	int ret_sz = 0;
4755 	int notdone;
4756 	int do_wakeup_routine = 0;
4757 
4758 	stream = tp1->rec.data.stream_number;
4759 	seq = tp1->rec.data.stream_seq;
4760 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4761 		stcb->asoc.abandoned_sent[0]++;
4762 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4763 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4764 #if defined(SCTP_DETAILED_STR_STATS)
4765 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4766 #endif
4767 	} else {
4768 		stcb->asoc.abandoned_unsent[0]++;
4769 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4770 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4771 #if defined(SCTP_DETAILED_STR_STATS)
4772 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4773 #endif
4774 	}
4775 	do {
4776 		ret_sz += tp1->book_size;
4777 		if (tp1->data != NULL) {
4778 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4779 				sctp_flight_size_decrease(tp1);
4780 				sctp_total_flight_decrease(stcb, tp1);
4781 			}
4782 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4783 			stcb->asoc.peers_rwnd += tp1->send_size;
4784 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4785 			if (sent) {
4786 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4787 			} else {
4788 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4789 			}
4790 			if (tp1->data) {
4791 				sctp_m_freem(tp1->data);
4792 				tp1->data = NULL;
4793 			}
4794 			do_wakeup_routine = 1;
4795 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4796 				stcb->asoc.sent_queue_cnt_removeable--;
4797 			}
4798 		}
4799 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4800 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4801 		    SCTP_DATA_NOT_FRAG) {
4802 			/* not frag'ed we ae done   */
4803 			notdone = 0;
4804 			foundeom = 1;
4805 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4806 			/* end of frag, we are done */
4807 			notdone = 0;
4808 			foundeom = 1;
4809 		} else {
4810 			/*
4811 			 * Its a begin or middle piece, we must mark all of
4812 			 * it
4813 			 */
4814 			notdone = 1;
4815 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4816 		}
4817 	} while (tp1 && notdone);
4818 	if (foundeom == 0) {
4819 		/*
4820 		 * The multi-part message was scattered across the send and
4821 		 * sent queue.
4822 		 */
4823 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4824 			if ((tp1->rec.data.stream_number != stream) ||
4825 			    (tp1->rec.data.stream_seq != seq)) {
4826 				break;
4827 			}
4828 			/*
4829 			 * save to chk in case we have some on stream out
4830 			 * queue. If so and we have an un-transmitted one we
4831 			 * don't have to fudge the TSN.
4832 			 */
4833 			chk = tp1;
4834 			ret_sz += tp1->book_size;
4835 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4836 			if (sent) {
4837 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4838 			} else {
4839 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4840 			}
4841 			if (tp1->data) {
4842 				sctp_m_freem(tp1->data);
4843 				tp1->data = NULL;
4844 			}
4845 			/* No flight involved here book the size to 0 */
4846 			tp1->book_size = 0;
4847 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4848 				foundeom = 1;
4849 			}
4850 			do_wakeup_routine = 1;
4851 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4852 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4853 			/*
4854 			 * on to the sent queue so we can wait for it to be
4855 			 * passed by.
4856 			 */
4857 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4858 			    sctp_next);
4859 			stcb->asoc.send_queue_cnt--;
4860 			stcb->asoc.sent_queue_cnt++;
4861 		}
4862 	}
4863 	if (foundeom == 0) {
4864 		/*
4865 		 * Still no eom found. That means there is stuff left on the
4866 		 * stream out queue.. yuck.
4867 		 */
4868 		SCTP_TCB_SEND_LOCK(stcb);
4869 		strq = &stcb->asoc.strmout[stream];
4870 		sp = TAILQ_FIRST(&strq->outqueue);
4871 		if (sp != NULL) {
4872 			sp->discard_rest = 1;
4873 			/*
4874 			 * We may need to put a chunk on the queue that
4875 			 * holds the TSN that would have been sent with the
4876 			 * LAST bit.
4877 			 */
4878 			if (chk == NULL) {
4879 				/* Yep, we have to */
4880 				sctp_alloc_a_chunk(stcb, chk);
4881 				if (chk == NULL) {
4882 					/*
4883 					 * we are hosed. All we can do is
4884 					 * nothing.. which will cause an
4885 					 * abort if the peer is paying
4886 					 * attention.
4887 					 */
4888 					goto oh_well;
4889 				}
4890 				memset(chk, 0, sizeof(*chk));
4891 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4892 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4893 				chk->asoc = &stcb->asoc;
4894 				chk->rec.data.stream_seq = strq->next_sequence_send;
4895 				chk->rec.data.stream_number = sp->stream;
4896 				chk->rec.data.payloadtype = sp->ppid;
4897 				chk->rec.data.context = sp->context;
4898 				chk->flags = sp->act_flags;
4899 				chk->whoTo = NULL;
4900 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4901 				strq->chunks_on_queues++;
4902 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4903 				stcb->asoc.sent_queue_cnt++;
4904 				stcb->asoc.pr_sctp_cnt++;
4905 			} else {
4906 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4907 			}
4908 			strq->next_sequence_send++;
4909 	oh_well:
4910 			if (sp->data) {
4911 				/*
4912 				 * Pull any data to free up the SB and allow
4913 				 * sender to "add more" while we will throw
4914 				 * away :-)
4915 				 */
4916 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4917 				ret_sz += sp->length;
4918 				do_wakeup_routine = 1;
4919 				sp->some_taken = 1;
4920 				sctp_m_freem(sp->data);
4921 				sp->data = NULL;
4922 				sp->tail_mbuf = NULL;
4923 				sp->length = 0;
4924 			}
4925 		}
4926 		SCTP_TCB_SEND_UNLOCK(stcb);
4927 	}
4928 	if (do_wakeup_routine) {
4929 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4930 		struct socket *so;
4931 
4932 		so = SCTP_INP_SO(stcb->sctp_ep);
4933 		if (!so_locked) {
4934 			atomic_add_int(&stcb->asoc.refcnt, 1);
4935 			SCTP_TCB_UNLOCK(stcb);
4936 			SCTP_SOCKET_LOCK(so, 1);
4937 			SCTP_TCB_LOCK(stcb);
4938 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4939 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4940 				/* assoc was freed while we were unlocked */
4941 				SCTP_SOCKET_UNLOCK(so, 1);
4942 				return (ret_sz);
4943 			}
4944 		}
4945 #endif
4946 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4947 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4948 		if (!so_locked) {
4949 			SCTP_SOCKET_UNLOCK(so, 1);
4950 		}
4951 #endif
4952 	}
4953 	return (ret_sz);
4954 }
4955 
4956 /*
4957  * checks to see if the given address, sa, is one that is currently known by
4958  * the kernel note: can't distinguish the same address on multiple interfaces
4959  * and doesn't handle multiple addresses with different zone/scope id's note:
4960  * ifa_ifwithaddr() compares the entire sockaddr struct
4961  */
4962 struct sctp_ifa *
4963 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4964     int holds_lock)
4965 {
4966 	struct sctp_laddr *laddr;
4967 
4968 	if (holds_lock == 0) {
4969 		SCTP_INP_RLOCK(inp);
4970 	}
4971 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4972 		if (laddr->ifa == NULL)
4973 			continue;
4974 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4975 			continue;
4976 #ifdef INET
4977 		if (addr->sa_family == AF_INET) {
4978 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4979 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4980 				/* found him. */
4981 				if (holds_lock == 0) {
4982 					SCTP_INP_RUNLOCK(inp);
4983 				}
4984 				return (laddr->ifa);
4985 				break;
4986 			}
4987 		}
4988 #endif
4989 #ifdef INET6
4990 		if (addr->sa_family == AF_INET6) {
4991 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4992 			    &laddr->ifa->address.sin6)) {
4993 				/* found him. */
4994 				if (holds_lock == 0) {
4995 					SCTP_INP_RUNLOCK(inp);
4996 				}
4997 				return (laddr->ifa);
4998 				break;
4999 			}
5000 		}
5001 #endif
5002 	}
5003 	if (holds_lock == 0) {
5004 		SCTP_INP_RUNLOCK(inp);
5005 	}
5006 	return (NULL);
5007 }
5008 
5009 uint32_t
5010 sctp_get_ifa_hash_val(struct sockaddr *addr)
5011 {
5012 	switch (addr->sa_family) {
5013 #ifdef INET
5014 	case AF_INET:
5015 		{
5016 			struct sockaddr_in *sin;
5017 
5018 			sin = (struct sockaddr_in *)addr;
5019 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5020 		}
5021 #endif
5022 #ifdef INET6
5023 	case AF_INET6:
5024 		{
5025 			struct sockaddr_in6 *sin6;
5026 			uint32_t hash_of_addr;
5027 
5028 			sin6 = (struct sockaddr_in6 *)addr;
5029 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5030 			    sin6->sin6_addr.s6_addr32[1] +
5031 			    sin6->sin6_addr.s6_addr32[2] +
5032 			    sin6->sin6_addr.s6_addr32[3]);
5033 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5034 			return (hash_of_addr);
5035 		}
5036 #endif
5037 	default:
5038 		break;
5039 	}
5040 	return (0);
5041 }
5042 
5043 struct sctp_ifa *
5044 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5045 {
5046 	struct sctp_ifa *sctp_ifap;
5047 	struct sctp_vrf *vrf;
5048 	struct sctp_ifalist *hash_head;
5049 	uint32_t hash_of_addr;
5050 
5051 	if (holds_lock == 0)
5052 		SCTP_IPI_ADDR_RLOCK();
5053 
5054 	vrf = sctp_find_vrf(vrf_id);
5055 	if (vrf == NULL) {
5056 		if (holds_lock == 0)
5057 			SCTP_IPI_ADDR_RUNLOCK();
5058 		return (NULL);
5059 	}
5060 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5061 
5062 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5063 	if (hash_head == NULL) {
5064 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5065 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5066 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5067 		sctp_print_address(addr);
5068 		SCTP_PRINTF("No such bucket for address\n");
5069 		if (holds_lock == 0)
5070 			SCTP_IPI_ADDR_RUNLOCK();
5071 
5072 		return (NULL);
5073 	}
5074 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5075 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5076 			continue;
5077 #ifdef INET
5078 		if (addr->sa_family == AF_INET) {
5079 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5080 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5081 				/* found him. */
5082 				if (holds_lock == 0)
5083 					SCTP_IPI_ADDR_RUNLOCK();
5084 				return (sctp_ifap);
5085 				break;
5086 			}
5087 		}
5088 #endif
5089 #ifdef INET6
5090 		if (addr->sa_family == AF_INET6) {
5091 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5092 			    &sctp_ifap->address.sin6)) {
5093 				/* found him. */
5094 				if (holds_lock == 0)
5095 					SCTP_IPI_ADDR_RUNLOCK();
5096 				return (sctp_ifap);
5097 				break;
5098 			}
5099 		}
5100 #endif
5101 	}
5102 	if (holds_lock == 0)
5103 		SCTP_IPI_ADDR_RUNLOCK();
5104 	return (NULL);
5105 }
5106 
5107 static void
5108 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5109     uint32_t rwnd_req)
5110 {
5111 	/* User pulled some data, do we need a rwnd update? */
5112 	int r_unlocked = 0;
5113 	uint32_t dif, rwnd;
5114 	struct socket *so = NULL;
5115 
5116 	if (stcb == NULL)
5117 		return;
5118 
5119 	atomic_add_int(&stcb->asoc.refcnt, 1);
5120 
5121 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5122 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5123 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5124 		/* Pre-check If we are freeing no update */
5125 		goto no_lock;
5126 	}
5127 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5128 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5129 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5130 		goto out;
5131 	}
5132 	so = stcb->sctp_socket;
5133 	if (so == NULL) {
5134 		goto out;
5135 	}
5136 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5137 	/* Have you have freed enough to look */
5138 	*freed_so_far = 0;
5139 	/* Yep, its worth a look and the lock overhead */
5140 
5141 	/* Figure out what the rwnd would be */
5142 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5143 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5144 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5145 	} else {
5146 		dif = 0;
5147 	}
5148 	if (dif >= rwnd_req) {
5149 		if (hold_rlock) {
5150 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5151 			r_unlocked = 1;
5152 		}
5153 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5154 			/*
5155 			 * One last check before we allow the guy possibly
5156 			 * to get in. There is a race, where the guy has not
5157 			 * reached the gate. In that case
5158 			 */
5159 			goto out;
5160 		}
5161 		SCTP_TCB_LOCK(stcb);
5162 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5163 			/* No reports here */
5164 			SCTP_TCB_UNLOCK(stcb);
5165 			goto out;
5166 		}
5167 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5168 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5169 
5170 		sctp_chunk_output(stcb->sctp_ep, stcb,
5171 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5172 		/* make sure no timer is running */
5173 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5174 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5175 		SCTP_TCB_UNLOCK(stcb);
5176 	} else {
5177 		/* Update how much we have pending */
5178 		stcb->freed_by_sorcv_sincelast = dif;
5179 	}
5180 out:
5181 	if (so && r_unlocked && hold_rlock) {
5182 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5183 	}
5184 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5185 no_lock:
5186 	atomic_add_int(&stcb->asoc.refcnt, -1);
5187 	return;
5188 }
5189 
5190 int
5191 sctp_sorecvmsg(struct socket *so,
5192     struct uio *uio,
5193     struct mbuf **mp,
5194     struct sockaddr *from,
5195     int fromlen,
5196     int *msg_flags,
5197     struct sctp_sndrcvinfo *sinfo,
5198     int filling_sinfo)
5199 {
5200 	/*
5201 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5202 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5203 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5204 	 * On the way out we may send out any combination of:
5205 	 * MSG_NOTIFICATION MSG_EOR
5206 	 *
5207 	 */
5208 	struct sctp_inpcb *inp = NULL;
5209 	int my_len = 0;
5210 	int cp_len = 0, error = 0;
5211 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5212 	struct mbuf *m = NULL;
5213 	struct sctp_tcb *stcb = NULL;
5214 	int wakeup_read_socket = 0;
5215 	int freecnt_applied = 0;
5216 	int out_flags = 0, in_flags = 0;
5217 	int block_allowed = 1;
5218 	uint32_t freed_so_far = 0;
5219 	uint32_t copied_so_far = 0;
5220 	int in_eeor_mode = 0;
5221 	int no_rcv_needed = 0;
5222 	uint32_t rwnd_req = 0;
5223 	int hold_sblock = 0;
5224 	int hold_rlock = 0;
5225 	int slen = 0;
5226 	uint32_t held_length = 0;
5227 	int sockbuf_lock = 0;
5228 
5229 	if (uio == NULL) {
5230 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5231 		return (EINVAL);
5232 	}
5233 	if (msg_flags) {
5234 		in_flags = *msg_flags;
5235 		if (in_flags & MSG_PEEK)
5236 			SCTP_STAT_INCR(sctps_read_peeks);
5237 	} else {
5238 		in_flags = 0;
5239 	}
5240 	slen = uio->uio_resid;
5241 
5242 	/* Pull in and set up our int flags */
5243 	if (in_flags & MSG_OOB) {
5244 		/* Out of band's NOT supported */
5245 		return (EOPNOTSUPP);
5246 	}
5247 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5248 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5249 		return (EINVAL);
5250 	}
5251 	if ((in_flags & (MSG_DONTWAIT
5252 	    | MSG_NBIO
5253 	    )) ||
5254 	    SCTP_SO_IS_NBIO(so)) {
5255 		block_allowed = 0;
5256 	}
5257 	/* setup the endpoint */
5258 	inp = (struct sctp_inpcb *)so->so_pcb;
5259 	if (inp == NULL) {
5260 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5261 		return (EFAULT);
5262 	}
5263 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5264 	/* Must be at least a MTU's worth */
5265 	if (rwnd_req < SCTP_MIN_RWND)
5266 		rwnd_req = SCTP_MIN_RWND;
5267 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5268 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5269 		sctp_misc_ints(SCTP_SORECV_ENTER,
5270 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5271 	}
5272 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5273 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5274 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5275 	}
5276 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5277 	if (error) {
5278 		goto release_unlocked;
5279 	}
5280 	sockbuf_lock = 1;
5281 restart:
5282 
5283 
5284 restart_nosblocks:
5285 	if (hold_sblock == 0) {
5286 		SOCKBUF_LOCK(&so->so_rcv);
5287 		hold_sblock = 1;
5288 	}
5289 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5290 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5291 		goto out;
5292 	}
5293 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5294 		if (so->so_error) {
5295 			error = so->so_error;
5296 			if ((in_flags & MSG_PEEK) == 0)
5297 				so->so_error = 0;
5298 			goto out;
5299 		} else {
5300 			if (so->so_rcv.sb_cc == 0) {
5301 				/* indicate EOF */
5302 				error = 0;
5303 				goto out;
5304 			}
5305 		}
5306 	}
5307 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5308 		/* we need to wait for data */
5309 		if ((so->so_rcv.sb_cc == 0) &&
5310 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5311 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5312 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5313 				/*
5314 				 * For active open side clear flags for
5315 				 * re-use passive open is blocked by
5316 				 * connect.
5317 				 */
5318 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5319 					/*
5320 					 * You were aborted, passive side
5321 					 * always hits here
5322 					 */
5323 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5324 					error = ECONNRESET;
5325 				}
5326 				so->so_state &= ~(SS_ISCONNECTING |
5327 				    SS_ISDISCONNECTING |
5328 				    SS_ISCONFIRMING |
5329 				    SS_ISCONNECTED);
5330 				if (error == 0) {
5331 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5332 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5333 						error = ENOTCONN;
5334 					}
5335 				}
5336 				goto out;
5337 			}
5338 		}
5339 		error = sbwait(&so->so_rcv);
5340 		if (error) {
5341 			goto out;
5342 		}
5343 		held_length = 0;
5344 		goto restart_nosblocks;
5345 	} else if (so->so_rcv.sb_cc == 0) {
5346 		if (so->so_error) {
5347 			error = so->so_error;
5348 			if ((in_flags & MSG_PEEK) == 0)
5349 				so->so_error = 0;
5350 		} else {
5351 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5352 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5353 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5354 					/*
5355 					 * For active open side clear flags
5356 					 * for re-use passive open is
5357 					 * blocked by connect.
5358 					 */
5359 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5360 						/*
5361 						 * You were aborted, passive
5362 						 * side always hits here
5363 						 */
5364 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5365 						error = ECONNRESET;
5366 					}
5367 					so->so_state &= ~(SS_ISCONNECTING |
5368 					    SS_ISDISCONNECTING |
5369 					    SS_ISCONFIRMING |
5370 					    SS_ISCONNECTED);
5371 					if (error == 0) {
5372 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5373 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5374 							error = ENOTCONN;
5375 						}
5376 					}
5377 					goto out;
5378 				}
5379 			}
5380 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5381 			error = EWOULDBLOCK;
5382 		}
5383 		goto out;
5384 	}
5385 	if (hold_sblock == 1) {
5386 		SOCKBUF_UNLOCK(&so->so_rcv);
5387 		hold_sblock = 0;
5388 	}
5389 	/* we possibly have data we can read */
5390 	/* sa_ignore FREED_MEMORY */
5391 	control = TAILQ_FIRST(&inp->read_queue);
5392 	if (control == NULL) {
5393 		/*
5394 		 * This could be happening since the appender did the
5395 		 * increment but as not yet did the tailq insert onto the
5396 		 * read_queue
5397 		 */
5398 		if (hold_rlock == 0) {
5399 			SCTP_INP_READ_LOCK(inp);
5400 		}
5401 		control = TAILQ_FIRST(&inp->read_queue);
5402 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5403 #ifdef INVARIANTS
5404 			panic("Huh, its non zero and nothing on control?");
5405 #endif
5406 			so->so_rcv.sb_cc = 0;
5407 		}
5408 		SCTP_INP_READ_UNLOCK(inp);
5409 		hold_rlock = 0;
5410 		goto restart;
5411 	}
5412 	if ((control->length == 0) &&
5413 	    (control->do_not_ref_stcb)) {
5414 		/*
5415 		 * Clean up code for freeing assoc that left behind a
5416 		 * pdapi.. maybe a peer in EEOR that just closed after
5417 		 * sending and never indicated a EOR.
5418 		 */
5419 		if (hold_rlock == 0) {
5420 			hold_rlock = 1;
5421 			SCTP_INP_READ_LOCK(inp);
5422 		}
5423 		control->held_length = 0;
5424 		if (control->data) {
5425 			/* Hmm there is data here .. fix */
5426 			struct mbuf *m_tmp;
5427 			int cnt = 0;
5428 
5429 			m_tmp = control->data;
5430 			while (m_tmp) {
5431 				cnt += SCTP_BUF_LEN(m_tmp);
5432 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5433 					control->tail_mbuf = m_tmp;
5434 					control->end_added = 1;
5435 				}
5436 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5437 			}
5438 			control->length = cnt;
5439 		} else {
5440 			/* remove it */
5441 			TAILQ_REMOVE(&inp->read_queue, control, next);
5442 			/* Add back any hiddend data */
5443 			sctp_free_remote_addr(control->whoFrom);
5444 			sctp_free_a_readq(stcb, control);
5445 		}
5446 		if (hold_rlock) {
5447 			hold_rlock = 0;
5448 			SCTP_INP_READ_UNLOCK(inp);
5449 		}
5450 		goto restart;
5451 	}
5452 	if ((control->length == 0) &&
5453 	    (control->end_added == 1)) {
5454 		/*
5455 		 * Do we also need to check for (control->pdapi_aborted ==
5456 		 * 1)?
5457 		 */
5458 		if (hold_rlock == 0) {
5459 			hold_rlock = 1;
5460 			SCTP_INP_READ_LOCK(inp);
5461 		}
5462 		TAILQ_REMOVE(&inp->read_queue, control, next);
5463 		if (control->data) {
5464 #ifdef INVARIANTS
5465 			panic("control->data not null but control->length == 0");
5466 #else
5467 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5468 			sctp_m_freem(control->data);
5469 			control->data = NULL;
5470 #endif
5471 		}
5472 		if (control->aux_data) {
5473 			sctp_m_free(control->aux_data);
5474 			control->aux_data = NULL;
5475 		}
5476 		sctp_free_remote_addr(control->whoFrom);
5477 		sctp_free_a_readq(stcb, control);
5478 		if (hold_rlock) {
5479 			hold_rlock = 0;
5480 			SCTP_INP_READ_UNLOCK(inp);
5481 		}
5482 		goto restart;
5483 	}
5484 	if (control->length == 0) {
5485 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5486 		    (filling_sinfo)) {
5487 			/* find a more suitable one then this */
5488 			ctl = TAILQ_NEXT(control, next);
5489 			while (ctl) {
5490 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5491 				    (ctl->some_taken ||
5492 				    (ctl->spec_flags & M_NOTIFICATION) ||
5493 				    ((ctl->do_not_ref_stcb == 0) &&
5494 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5495 				    ) {
5496 					/*-
5497 					 * If we have a different TCB next, and there is data
5498 					 * present. If we have already taken some (pdapi), OR we can
5499 					 * ref the tcb and no delivery as started on this stream, we
5500 					 * take it. Note we allow a notification on a different
5501 					 * assoc to be delivered..
5502 					 */
5503 					control = ctl;
5504 					goto found_one;
5505 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5506 					    (ctl->length) &&
5507 					    ((ctl->some_taken) ||
5508 					    ((ctl->do_not_ref_stcb == 0) &&
5509 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5510 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5511 					/*-
5512 					 * If we have the same tcb, and there is data present, and we
5513 					 * have the strm interleave feature present. Then if we have
5514 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5515 					 * not started a delivery for this stream, we can take it.
5516 					 * Note we do NOT allow a notificaiton on the same assoc to
5517 					 * be delivered.
5518 					 */
5519 					control = ctl;
5520 					goto found_one;
5521 				}
5522 				ctl = TAILQ_NEXT(ctl, next);
5523 			}
5524 		}
5525 		/*
5526 		 * if we reach here, not suitable replacement is available
5527 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5528 		 * into the our held count, and its time to sleep again.
5529 		 */
5530 		held_length = so->so_rcv.sb_cc;
5531 		control->held_length = so->so_rcv.sb_cc;
5532 		goto restart;
5533 	}
5534 	/* Clear the held length since there is something to read */
5535 	control->held_length = 0;
5536 	if (hold_rlock) {
5537 		SCTP_INP_READ_UNLOCK(inp);
5538 		hold_rlock = 0;
5539 	}
5540 found_one:
5541 	/*
5542 	 * If we reach here, control has a some data for us to read off.
5543 	 * Note that stcb COULD be NULL.
5544 	 */
5545 	control->some_taken++;
5546 	if (hold_sblock) {
5547 		SOCKBUF_UNLOCK(&so->so_rcv);
5548 		hold_sblock = 0;
5549 	}
5550 	stcb = control->stcb;
5551 	if (stcb) {
5552 		if ((control->do_not_ref_stcb == 0) &&
5553 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5554 			if (freecnt_applied == 0)
5555 				stcb = NULL;
5556 		} else if (control->do_not_ref_stcb == 0) {
5557 			/* you can't free it on me please */
5558 			/*
5559 			 * The lock on the socket buffer protects us so the
5560 			 * free code will stop. But since we used the
5561 			 * socketbuf lock and the sender uses the tcb_lock
5562 			 * to increment, we need to use the atomic add to
5563 			 * the refcnt
5564 			 */
5565 			if (freecnt_applied) {
5566 #ifdef INVARIANTS
5567 				panic("refcnt already incremented");
5568 #else
5569 				SCTP_PRINTF("refcnt already incremented?\n");
5570 #endif
5571 			} else {
5572 				atomic_add_int(&stcb->asoc.refcnt, 1);
5573 				freecnt_applied = 1;
5574 			}
5575 			/*
5576 			 * Setup to remember how much we have not yet told
5577 			 * the peer our rwnd has opened up. Note we grab the
5578 			 * value from the tcb from last time. Note too that
5579 			 * sack sending clears this when a sack is sent,
5580 			 * which is fine. Once we hit the rwnd_req, we then
5581 			 * will go to the sctp_user_rcvd() that will not
5582 			 * lock until it KNOWs it MUST send a WUP-SACK.
5583 			 */
5584 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5585 			stcb->freed_by_sorcv_sincelast = 0;
5586 		}
5587 	}
5588 	if (stcb &&
5589 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5590 	    control->do_not_ref_stcb == 0) {
5591 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5592 	}
5593 	/* First lets get off the sinfo and sockaddr info */
5594 	if ((sinfo) && filling_sinfo) {
5595 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5596 		nxt = TAILQ_NEXT(control, next);
5597 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5598 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5599 			struct sctp_extrcvinfo *s_extra;
5600 
5601 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5602 			if ((nxt) &&
5603 			    (nxt->length)) {
5604 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5605 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5606 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5607 				}
5608 				if (nxt->spec_flags & M_NOTIFICATION) {
5609 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5610 				}
5611 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5612 				s_extra->sreinfo_next_length = nxt->length;
5613 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5614 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5615 				if (nxt->tail_mbuf != NULL) {
5616 					if (nxt->end_added) {
5617 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5618 					}
5619 				}
5620 			} else {
5621 				/*
5622 				 * we explicitly 0 this, since the memcpy
5623 				 * got some other things beyond the older
5624 				 * sinfo_ that is on the control's structure
5625 				 * :-D
5626 				 */
5627 				nxt = NULL;
5628 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5629 				s_extra->sreinfo_next_aid = 0;
5630 				s_extra->sreinfo_next_length = 0;
5631 				s_extra->sreinfo_next_ppid = 0;
5632 				s_extra->sreinfo_next_stream = 0;
5633 			}
5634 		}
5635 		/*
5636 		 * update off the real current cum-ack, if we have an stcb.
5637 		 */
5638 		if ((control->do_not_ref_stcb == 0) && stcb)
5639 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5640 		/*
5641 		 * mask off the high bits, we keep the actual chunk bits in
5642 		 * there.
5643 		 */
5644 		sinfo->sinfo_flags &= 0x00ff;
5645 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5646 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5647 		}
5648 	}
5649 #ifdef SCTP_ASOCLOG_OF_TSNS
5650 	{
5651 		int index, newindex;
5652 		struct sctp_pcbtsn_rlog *entry;
5653 
5654 		do {
5655 			index = inp->readlog_index;
5656 			newindex = index + 1;
5657 			if (newindex >= SCTP_READ_LOG_SIZE) {
5658 				newindex = 0;
5659 			}
5660 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5661 		entry = &inp->readlog[index];
5662 		entry->vtag = control->sinfo_assoc_id;
5663 		entry->strm = control->sinfo_stream;
5664 		entry->seq = control->sinfo_ssn;
5665 		entry->sz = control->length;
5666 		entry->flgs = control->sinfo_flags;
5667 	}
5668 #endif
5669 	if ((fromlen > 0) && (from != NULL)) {
5670 		union sctp_sockstore store;
5671 		size_t len;
5672 
5673 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5674 #ifdef INET6
5675 		case AF_INET6:
5676 			len = sizeof(struct sockaddr_in6);
5677 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5678 			store.sin6.sin6_port = control->port_from;
5679 			break;
5680 #endif
5681 #ifdef INET
5682 		case AF_INET:
5683 #ifdef INET6
5684 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5685 				len = sizeof(struct sockaddr_in6);
5686 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5687 				    &store.sin6);
5688 				store.sin6.sin6_port = control->port_from;
5689 			} else {
5690 				len = sizeof(struct sockaddr_in);
5691 				store.sin = control->whoFrom->ro._l_addr.sin;
5692 				store.sin.sin_port = control->port_from;
5693 			}
5694 #else
5695 			len = sizeof(struct sockaddr_in);
5696 			store.sin = control->whoFrom->ro._l_addr.sin;
5697 			store.sin.sin_port = control->port_from;
5698 #endif
5699 			break;
5700 #endif
5701 		default:
5702 			len = 0;
5703 			break;
5704 		}
5705 		memcpy(from, &store, min((size_t)fromlen, len));
5706 #ifdef INET6
5707 		{
5708 			struct sockaddr_in6 lsa6, *from6;
5709 
5710 			from6 = (struct sockaddr_in6 *)from;
5711 			sctp_recover_scope_mac(from6, (&lsa6));
5712 		}
5713 #endif
5714 	}
5715 	/* now copy out what data we can */
5716 	if (mp == NULL) {
5717 		/* copy out each mbuf in the chain up to length */
5718 get_more_data:
5719 		m = control->data;
5720 		while (m) {
5721 			/* Move out all we can */
5722 			cp_len = (int)uio->uio_resid;
5723 			my_len = (int)SCTP_BUF_LEN(m);
5724 			if (cp_len > my_len) {
5725 				/* not enough in this buf */
5726 				cp_len = my_len;
5727 			}
5728 			if (hold_rlock) {
5729 				SCTP_INP_READ_UNLOCK(inp);
5730 				hold_rlock = 0;
5731 			}
5732 			if (cp_len > 0)
5733 				error = uiomove(mtod(m, char *), cp_len, uio);
5734 			/* re-read */
5735 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5736 				goto release;
5737 			}
5738 			if ((control->do_not_ref_stcb == 0) && stcb &&
5739 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5740 				no_rcv_needed = 1;
5741 			}
5742 			if (error) {
5743 				/* error we are out of here */
5744 				goto release;
5745 			}
5746 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5747 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5748 			    ((control->end_added == 0) ||
5749 			    (control->end_added &&
5750 			    (TAILQ_NEXT(control, next) == NULL)))
5751 			    ) {
5752 				SCTP_INP_READ_LOCK(inp);
5753 				hold_rlock = 1;
5754 			}
5755 			if (cp_len == SCTP_BUF_LEN(m)) {
5756 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5757 				    (control->end_added)) {
5758 					out_flags |= MSG_EOR;
5759 					if ((control->do_not_ref_stcb == 0) &&
5760 					    (control->stcb != NULL) &&
5761 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5762 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5763 				}
5764 				if (control->spec_flags & M_NOTIFICATION) {
5765 					out_flags |= MSG_NOTIFICATION;
5766 				}
5767 				/* we ate up the mbuf */
5768 				if (in_flags & MSG_PEEK) {
5769 					/* just looking */
5770 					m = SCTP_BUF_NEXT(m);
5771 					copied_so_far += cp_len;
5772 				} else {
5773 					/* dispose of the mbuf */
5774 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5775 						sctp_sblog(&so->so_rcv,
5776 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5777 					}
5778 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5779 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5780 						sctp_sblog(&so->so_rcv,
5781 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5782 					}
5783 					copied_so_far += cp_len;
5784 					freed_so_far += cp_len;
5785 					freed_so_far += MSIZE;
5786 					atomic_subtract_int(&control->length, cp_len);
5787 					control->data = sctp_m_free(m);
5788 					m = control->data;
5789 					/*
5790 					 * been through it all, must hold sb
5791 					 * lock ok to null tail
5792 					 */
5793 					if (control->data == NULL) {
5794 #ifdef INVARIANTS
5795 						if ((control->end_added == 0) ||
5796 						    (TAILQ_NEXT(control, next) == NULL)) {
5797 							/*
5798 							 * If the end is not
5799 							 * added, OR the
5800 							 * next is NOT null
5801 							 * we MUST have the
5802 							 * lock.
5803 							 */
5804 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5805 								panic("Hmm we don't own the lock?");
5806 							}
5807 						}
5808 #endif
5809 						control->tail_mbuf = NULL;
5810 #ifdef INVARIANTS
5811 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5812 							panic("end_added, nothing left and no MSG_EOR");
5813 						}
5814 #endif
5815 					}
5816 				}
5817 			} else {
5818 				/* Do we need to trim the mbuf? */
5819 				if (control->spec_flags & M_NOTIFICATION) {
5820 					out_flags |= MSG_NOTIFICATION;
5821 				}
5822 				if ((in_flags & MSG_PEEK) == 0) {
5823 					SCTP_BUF_RESV_UF(m, cp_len);
5824 					SCTP_BUF_LEN(m) -= cp_len;
5825 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5826 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5827 					}
5828 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5829 					if ((control->do_not_ref_stcb == 0) &&
5830 					    stcb) {
5831 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5832 					}
5833 					copied_so_far += cp_len;
5834 					freed_so_far += cp_len;
5835 					freed_so_far += MSIZE;
5836 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5837 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5838 						    SCTP_LOG_SBRESULT, 0);
5839 					}
5840 					atomic_subtract_int(&control->length, cp_len);
5841 				} else {
5842 					copied_so_far += cp_len;
5843 				}
5844 			}
5845 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5846 				break;
5847 			}
5848 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5849 			    (control->do_not_ref_stcb == 0) &&
5850 			    (freed_so_far >= rwnd_req)) {
5851 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5852 			}
5853 		}		/* end while(m) */
5854 		/*
5855 		 * At this point we have looked at it all and we either have
5856 		 * a MSG_EOR/or read all the user wants... <OR>
5857 		 * control->length == 0.
5858 		 */
5859 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5860 			/* we are done with this control */
5861 			if (control->length == 0) {
5862 				if (control->data) {
5863 #ifdef INVARIANTS
5864 					panic("control->data not null at read eor?");
5865 #else
5866 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5867 					sctp_m_freem(control->data);
5868 					control->data = NULL;
5869 #endif
5870 				}
5871 		done_with_control:
5872 				if (TAILQ_NEXT(control, next) == NULL) {
5873 					/*
5874 					 * If we don't have a next we need a
5875 					 * lock, if there is a next
5876 					 * interrupt is filling ahead of us
5877 					 * and we don't need a lock to
5878 					 * remove this guy (which is the
5879 					 * head of the queue).
5880 					 */
5881 					if (hold_rlock == 0) {
5882 						SCTP_INP_READ_LOCK(inp);
5883 						hold_rlock = 1;
5884 					}
5885 				}
5886 				TAILQ_REMOVE(&inp->read_queue, control, next);
5887 				/* Add back any hiddend data */
5888 				if (control->held_length) {
5889 					held_length = 0;
5890 					control->held_length = 0;
5891 					wakeup_read_socket = 1;
5892 				}
5893 				if (control->aux_data) {
5894 					sctp_m_free(control->aux_data);
5895 					control->aux_data = NULL;
5896 				}
5897 				no_rcv_needed = control->do_not_ref_stcb;
5898 				sctp_free_remote_addr(control->whoFrom);
5899 				control->data = NULL;
5900 				sctp_free_a_readq(stcb, control);
5901 				control = NULL;
5902 				if ((freed_so_far >= rwnd_req) &&
5903 				    (no_rcv_needed == 0))
5904 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5905 
5906 			} else {
5907 				/*
5908 				 * The user did not read all of this
5909 				 * message, turn off the returned MSG_EOR
5910 				 * since we are leaving more behind on the
5911 				 * control to read.
5912 				 */
5913 #ifdef INVARIANTS
5914 				if (control->end_added &&
5915 				    (control->data == NULL) &&
5916 				    (control->tail_mbuf == NULL)) {
5917 					panic("Gak, control->length is corrupt?");
5918 				}
5919 #endif
5920 				no_rcv_needed = control->do_not_ref_stcb;
5921 				out_flags &= ~MSG_EOR;
5922 			}
5923 		}
5924 		if (out_flags & MSG_EOR) {
5925 			goto release;
5926 		}
5927 		if ((uio->uio_resid == 0) ||
5928 		    ((in_eeor_mode) &&
5929 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5930 			goto release;
5931 		}
5932 		/*
5933 		 * If I hit here the receiver wants more and this message is
5934 		 * NOT done (pd-api). So two questions. Can we block? if not
5935 		 * we are done. Did the user NOT set MSG_WAITALL?
5936 		 */
5937 		if (block_allowed == 0) {
5938 			goto release;
5939 		}
5940 		/*
5941 		 * We need to wait for more data a few things: - We don't
5942 		 * sbunlock() so we don't get someone else reading. - We
5943 		 * must be sure to account for the case where what is added
5944 		 * is NOT to our control when we wakeup.
5945 		 */
5946 
5947 		/*
5948 		 * Do we need to tell the transport a rwnd update might be
5949 		 * needed before we go to sleep?
5950 		 */
5951 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5952 		    ((freed_so_far >= rwnd_req) &&
5953 		    (control->do_not_ref_stcb == 0) &&
5954 		    (no_rcv_needed == 0))) {
5955 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5956 		}
5957 wait_some_more:
5958 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5959 			goto release;
5960 		}
5961 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5962 			goto release;
5963 
5964 		if (hold_rlock == 1) {
5965 			SCTP_INP_READ_UNLOCK(inp);
5966 			hold_rlock = 0;
5967 		}
5968 		if (hold_sblock == 0) {
5969 			SOCKBUF_LOCK(&so->so_rcv);
5970 			hold_sblock = 1;
5971 		}
5972 		if ((copied_so_far) && (control->length == 0) &&
5973 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5974 			goto release;
5975 		}
5976 		if (so->so_rcv.sb_cc <= control->held_length) {
5977 			error = sbwait(&so->so_rcv);
5978 			if (error) {
5979 				goto release;
5980 			}
5981 			control->held_length = 0;
5982 		}
5983 		if (hold_sblock) {
5984 			SOCKBUF_UNLOCK(&so->so_rcv);
5985 			hold_sblock = 0;
5986 		}
5987 		if (control->length == 0) {
5988 			/* still nothing here */
5989 			if (control->end_added == 1) {
5990 				/* he aborted, or is done i.e.did a shutdown */
5991 				out_flags |= MSG_EOR;
5992 				if (control->pdapi_aborted) {
5993 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5994 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5995 
5996 					out_flags |= MSG_TRUNC;
5997 				} else {
5998 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5999 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6000 				}
6001 				goto done_with_control;
6002 			}
6003 			if (so->so_rcv.sb_cc > held_length) {
6004 				control->held_length = so->so_rcv.sb_cc;
6005 				held_length = 0;
6006 			}
6007 			goto wait_some_more;
6008 		} else if (control->data == NULL) {
6009 			/*
6010 			 * we must re-sync since data is probably being
6011 			 * added
6012 			 */
6013 			SCTP_INP_READ_LOCK(inp);
6014 			if ((control->length > 0) && (control->data == NULL)) {
6015 				/*
6016 				 * big trouble.. we have the lock and its
6017 				 * corrupt?
6018 				 */
6019 #ifdef INVARIANTS
6020 				panic("Impossible data==NULL length !=0");
6021 #endif
6022 				out_flags |= MSG_EOR;
6023 				out_flags |= MSG_TRUNC;
6024 				control->length = 0;
6025 				SCTP_INP_READ_UNLOCK(inp);
6026 				goto done_with_control;
6027 			}
6028 			SCTP_INP_READ_UNLOCK(inp);
6029 			/* We will fall around to get more data */
6030 		}
6031 		goto get_more_data;
6032 	} else {
6033 		/*-
6034 		 * Give caller back the mbuf chain,
6035 		 * store in uio_resid the length
6036 		 */
6037 		wakeup_read_socket = 0;
6038 		if ((control->end_added == 0) ||
6039 		    (TAILQ_NEXT(control, next) == NULL)) {
6040 			/* Need to get rlock */
6041 			if (hold_rlock == 0) {
6042 				SCTP_INP_READ_LOCK(inp);
6043 				hold_rlock = 1;
6044 			}
6045 		}
6046 		if (control->end_added) {
6047 			out_flags |= MSG_EOR;
6048 			if ((control->do_not_ref_stcb == 0) &&
6049 			    (control->stcb != NULL) &&
6050 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6051 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6052 		}
6053 		if (control->spec_flags & M_NOTIFICATION) {
6054 			out_flags |= MSG_NOTIFICATION;
6055 		}
6056 		uio->uio_resid = control->length;
6057 		*mp = control->data;
6058 		m = control->data;
6059 		while (m) {
6060 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6061 				sctp_sblog(&so->so_rcv,
6062 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6063 			}
6064 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6065 			freed_so_far += SCTP_BUF_LEN(m);
6066 			freed_so_far += MSIZE;
6067 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6068 				sctp_sblog(&so->so_rcv,
6069 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6070 			}
6071 			m = SCTP_BUF_NEXT(m);
6072 		}
6073 		control->data = control->tail_mbuf = NULL;
6074 		control->length = 0;
6075 		if (out_flags & MSG_EOR) {
6076 			/* Done with this control */
6077 			goto done_with_control;
6078 		}
6079 	}
6080 release:
6081 	if (hold_rlock == 1) {
6082 		SCTP_INP_READ_UNLOCK(inp);
6083 		hold_rlock = 0;
6084 	}
6085 	if (hold_sblock == 1) {
6086 		SOCKBUF_UNLOCK(&so->so_rcv);
6087 		hold_sblock = 0;
6088 	}
6089 	sbunlock(&so->so_rcv);
6090 	sockbuf_lock = 0;
6091 
6092 release_unlocked:
6093 	if (hold_sblock) {
6094 		SOCKBUF_UNLOCK(&so->so_rcv);
6095 		hold_sblock = 0;
6096 	}
6097 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6098 		if ((freed_so_far >= rwnd_req) &&
6099 		    (control && (control->do_not_ref_stcb == 0)) &&
6100 		    (no_rcv_needed == 0))
6101 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6102 	}
6103 out:
6104 	if (msg_flags) {
6105 		*msg_flags = out_flags;
6106 	}
6107 	if (((out_flags & MSG_EOR) == 0) &&
6108 	    ((in_flags & MSG_PEEK) == 0) &&
6109 	    (sinfo) &&
6110 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6111 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6112 		struct sctp_extrcvinfo *s_extra;
6113 
6114 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6115 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6116 	}
6117 	if (hold_rlock == 1) {
6118 		SCTP_INP_READ_UNLOCK(inp);
6119 	}
6120 	if (hold_sblock) {
6121 		SOCKBUF_UNLOCK(&so->so_rcv);
6122 	}
6123 	if (sockbuf_lock) {
6124 		sbunlock(&so->so_rcv);
6125 	}
6126 	if (freecnt_applied) {
6127 		/*
6128 		 * The lock on the socket buffer protects us so the free
6129 		 * code will stop. But since we used the socketbuf lock and
6130 		 * the sender uses the tcb_lock to increment, we need to use
6131 		 * the atomic add to the refcnt.
6132 		 */
6133 		if (stcb == NULL) {
6134 #ifdef INVARIANTS
6135 			panic("stcb for refcnt has gone NULL?");
6136 			goto stage_left;
6137 #else
6138 			goto stage_left;
6139 #endif
6140 		}
6141 		atomic_add_int(&stcb->asoc.refcnt, -1);
6142 		/* Save the value back for next time */
6143 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6144 	}
6145 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6146 		if (stcb) {
6147 			sctp_misc_ints(SCTP_SORECV_DONE,
6148 			    freed_so_far,
6149 			    ((uio) ? (slen - uio->uio_resid) : slen),
6150 			    stcb->asoc.my_rwnd,
6151 			    so->so_rcv.sb_cc);
6152 		} else {
6153 			sctp_misc_ints(SCTP_SORECV_DONE,
6154 			    freed_so_far,
6155 			    ((uio) ? (slen - uio->uio_resid) : slen),
6156 			    0,
6157 			    so->so_rcv.sb_cc);
6158 		}
6159 	}
6160 stage_left:
6161 	if (wakeup_read_socket) {
6162 		sctp_sorwakeup(inp, so);
6163 	}
6164 	return (error);
6165 }
6166 
6167 
6168 #ifdef SCTP_MBUF_LOGGING
6169 struct mbuf *
6170 sctp_m_free(struct mbuf *m)
6171 {
6172 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6173 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6174 	}
6175 	return (m_free(m));
6176 }
6177 
6178 void
6179 sctp_m_freem(struct mbuf *mb)
6180 {
6181 	while (mb != NULL)
6182 		mb = sctp_m_free(mb);
6183 }
6184 
6185 #endif
6186 
6187 int
6188 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6189 {
6190 	/*
6191 	 * Given a local address. For all associations that holds the
6192 	 * address, request a peer-set-primary.
6193 	 */
6194 	struct sctp_ifa *ifa;
6195 	struct sctp_laddr *wi;
6196 
6197 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6198 	if (ifa == NULL) {
6199 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6200 		return (EADDRNOTAVAIL);
6201 	}
6202 	/*
6203 	 * Now that we have the ifa we must awaken the iterator with this
6204 	 * message.
6205 	 */
6206 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6207 	if (wi == NULL) {
6208 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6209 		return (ENOMEM);
6210 	}
6211 	/* Now incr the count and int wi structure */
6212 	SCTP_INCR_LADDR_COUNT();
6213 	bzero(wi, sizeof(*wi));
6214 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6215 	wi->ifa = ifa;
6216 	wi->action = SCTP_SET_PRIM_ADDR;
6217 	atomic_add_int(&ifa->refcount, 1);
6218 
6219 	/* Now add it to the work queue */
6220 	SCTP_WQ_ADDR_LOCK();
6221 	/*
6222 	 * Should this really be a tailq? As it is we will process the
6223 	 * newest first :-0
6224 	 */
6225 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6226 	SCTP_WQ_ADDR_UNLOCK();
6227 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6228 	    (struct sctp_inpcb *)NULL,
6229 	    (struct sctp_tcb *)NULL,
6230 	    (struct sctp_nets *)NULL);
6231 	return (0);
6232 }
6233 
6234 
6235 int
6236 sctp_soreceive(struct socket *so,
6237     struct sockaddr **psa,
6238     struct uio *uio,
6239     struct mbuf **mp0,
6240     struct mbuf **controlp,
6241     int *flagsp)
6242 {
6243 	int error, fromlen;
6244 	uint8_t sockbuf[256];
6245 	struct sockaddr *from;
6246 	struct sctp_extrcvinfo sinfo;
6247 	int filling_sinfo = 1;
6248 	struct sctp_inpcb *inp;
6249 
6250 	inp = (struct sctp_inpcb *)so->so_pcb;
6251 	/* pickup the assoc we are reading from */
6252 	if (inp == NULL) {
6253 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6254 		return (EINVAL);
6255 	}
6256 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6257 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6258 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6259 	    (controlp == NULL)) {
6260 		/* user does not want the sndrcv ctl */
6261 		filling_sinfo = 0;
6262 	}
6263 	if (psa) {
6264 		from = (struct sockaddr *)sockbuf;
6265 		fromlen = sizeof(sockbuf);
6266 		from->sa_len = 0;
6267 	} else {
6268 		from = NULL;
6269 		fromlen = 0;
6270 	}
6271 
6272 	if (filling_sinfo) {
6273 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6274 	}
6275 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6276 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6277 	if (controlp != NULL) {
6278 		/* copy back the sinfo in a CMSG format */
6279 		if (filling_sinfo)
6280 			*controlp = sctp_build_ctl_nchunk(inp,
6281 			    (struct sctp_sndrcvinfo *)&sinfo);
6282 		else
6283 			*controlp = NULL;
6284 	}
6285 	if (psa) {
6286 		/* copy back the address info */
6287 		if (from && from->sa_len) {
6288 			*psa = sodupsockaddr(from, M_NOWAIT);
6289 		} else {
6290 			*psa = NULL;
6291 		}
6292 	}
6293 	return (error);
6294 }
6295 
6296 
6297 
6298 
6299 
6300 int
6301 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6302     int totaddr, int *error)
6303 {
6304 	int added = 0;
6305 	int i;
6306 	struct sctp_inpcb *inp;
6307 	struct sockaddr *sa;
6308 	size_t incr = 0;
6309 
6310 #ifdef INET
6311 	struct sockaddr_in *sin;
6312 
6313 #endif
6314 #ifdef INET6
6315 	struct sockaddr_in6 *sin6;
6316 
6317 #endif
6318 
6319 	sa = addr;
6320 	inp = stcb->sctp_ep;
6321 	*error = 0;
6322 	for (i = 0; i < totaddr; i++) {
6323 		switch (sa->sa_family) {
6324 #ifdef INET
6325 		case AF_INET:
6326 			incr = sizeof(struct sockaddr_in);
6327 			sin = (struct sockaddr_in *)sa;
6328 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6329 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6330 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6331 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6332 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6333 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6334 				*error = EINVAL;
6335 				goto out_now;
6336 			}
6337 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6338 				/* assoc gone no un-lock */
6339 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6340 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6341 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6342 				*error = ENOBUFS;
6343 				goto out_now;
6344 			}
6345 			added++;
6346 			break;
6347 #endif
6348 #ifdef INET6
6349 		case AF_INET6:
6350 			incr = sizeof(struct sockaddr_in6);
6351 			sin6 = (struct sockaddr_in6 *)sa;
6352 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6353 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6354 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6355 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6356 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6357 				*error = EINVAL;
6358 				goto out_now;
6359 			}
6360 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6361 				/* assoc gone no un-lock */
6362 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6363 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6364 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6365 				*error = ENOBUFS;
6366 				goto out_now;
6367 			}
6368 			added++;
6369 			break;
6370 #endif
6371 		default:
6372 			break;
6373 		}
6374 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6375 	}
6376 out_now:
6377 	return (added);
6378 }
6379 
6380 struct sctp_tcb *
6381 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6382     int *totaddr, int *num_v4, int *num_v6, int *error,
6383     int limit, int *bad_addr)
6384 {
6385 	struct sockaddr *sa;
6386 	struct sctp_tcb *stcb = NULL;
6387 	size_t incr, at, i;
6388 
6389 	at = incr = 0;
6390 	sa = addr;
6391 
6392 	*error = *num_v6 = *num_v4 = 0;
6393 	/* account and validate addresses */
6394 	for (i = 0; i < (size_t)*totaddr; i++) {
6395 		switch (sa->sa_family) {
6396 #ifdef INET
6397 		case AF_INET:
6398 			(*num_v4) += 1;
6399 			incr = sizeof(struct sockaddr_in);
6400 			if (sa->sa_len != incr) {
6401 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6402 				*error = EINVAL;
6403 				*bad_addr = 1;
6404 				return (NULL);
6405 			}
6406 			break;
6407 #endif
6408 #ifdef INET6
6409 		case AF_INET6:
6410 			{
6411 				struct sockaddr_in6 *sin6;
6412 
6413 				sin6 = (struct sockaddr_in6 *)sa;
6414 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6415 					/* Must be non-mapped for connectx */
6416 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 					*error = EINVAL;
6418 					*bad_addr = 1;
6419 					return (NULL);
6420 				}
6421 				(*num_v6) += 1;
6422 				incr = sizeof(struct sockaddr_in6);
6423 				if (sa->sa_len != incr) {
6424 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6425 					*error = EINVAL;
6426 					*bad_addr = 1;
6427 					return (NULL);
6428 				}
6429 				break;
6430 			}
6431 #endif
6432 		default:
6433 			*totaddr = i;
6434 			/* we are done */
6435 			break;
6436 		}
6437 		if (i == (size_t)*totaddr) {
6438 			break;
6439 		}
6440 		SCTP_INP_INCR_REF(inp);
6441 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6442 		if (stcb != NULL) {
6443 			/* Already have or am bring up an association */
6444 			return (stcb);
6445 		} else {
6446 			SCTP_INP_DECR_REF(inp);
6447 		}
6448 		if ((at + incr) > (size_t)limit) {
6449 			*totaddr = i;
6450 			break;
6451 		}
6452 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6453 	}
6454 	return ((struct sctp_tcb *)NULL);
6455 }
6456 
6457 /*
6458  * sctp_bindx(ADD) for one address.
6459  * assumes all arguments are valid/checked by caller.
6460  */
6461 void
6462 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6463     struct sockaddr *sa, sctp_assoc_t assoc_id,
6464     uint32_t vrf_id, int *error, void *p)
6465 {
6466 	struct sockaddr *addr_touse;
6467 
6468 #if defined(INET) && defined(INET6)
6469 	struct sockaddr_in sin;
6470 
6471 #endif
6472 
6473 	/* see if we're bound all already! */
6474 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6475 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6476 		*error = EINVAL;
6477 		return;
6478 	}
6479 	addr_touse = sa;
6480 #ifdef INET6
6481 	if (sa->sa_family == AF_INET6) {
6482 #ifdef INET
6483 		struct sockaddr_in6 *sin6;
6484 
6485 #endif
6486 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6487 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6488 			*error = EINVAL;
6489 			return;
6490 		}
6491 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6492 			/* can only bind v6 on PF_INET6 sockets */
6493 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6494 			*error = EINVAL;
6495 			return;
6496 		}
6497 #ifdef INET
6498 		sin6 = (struct sockaddr_in6 *)addr_touse;
6499 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6500 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6501 			    SCTP_IPV6_V6ONLY(inp)) {
6502 				/* can't bind v4-mapped on PF_INET sockets */
6503 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6504 				*error = EINVAL;
6505 				return;
6506 			}
6507 			in6_sin6_2_sin(&sin, sin6);
6508 			addr_touse = (struct sockaddr *)&sin;
6509 		}
6510 #endif
6511 	}
6512 #endif
6513 #ifdef INET
6514 	if (sa->sa_family == AF_INET) {
6515 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6516 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6517 			*error = EINVAL;
6518 			return;
6519 		}
6520 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6521 		    SCTP_IPV6_V6ONLY(inp)) {
6522 			/* can't bind v4 on PF_INET sockets */
6523 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6524 			*error = EINVAL;
6525 			return;
6526 		}
6527 	}
6528 #endif
6529 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6530 		if (p == NULL) {
6531 			/* Can't get proc for Net/Open BSD */
6532 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6533 			*error = EINVAL;
6534 			return;
6535 		}
6536 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6537 		return;
6538 	}
6539 	/*
6540 	 * No locks required here since bind and mgmt_ep_sa all do their own
6541 	 * locking. If we do something for the FIX: below we may need to
6542 	 * lock in that case.
6543 	 */
6544 	if (assoc_id == 0) {
6545 		/* add the address */
6546 		struct sctp_inpcb *lep;
6547 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6548 
6549 		/* validate the incoming port */
6550 		if ((lsin->sin_port != 0) &&
6551 		    (lsin->sin_port != inp->sctp_lport)) {
6552 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6553 			*error = EINVAL;
6554 			return;
6555 		} else {
6556 			/* user specified 0 port, set it to existing port */
6557 			lsin->sin_port = inp->sctp_lport;
6558 		}
6559 
6560 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6561 		if (lep != NULL) {
6562 			/*
6563 			 * We must decrement the refcount since we have the
6564 			 * ep already and are binding. No remove going on
6565 			 * here.
6566 			 */
6567 			SCTP_INP_DECR_REF(lep);
6568 		}
6569 		if (lep == inp) {
6570 			/* already bound to it.. ok */
6571 			return;
6572 		} else if (lep == NULL) {
6573 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6574 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6575 			    SCTP_ADD_IP_ADDRESS,
6576 			    vrf_id, NULL);
6577 		} else {
6578 			*error = EADDRINUSE;
6579 		}
6580 		if (*error)
6581 			return;
6582 	} else {
6583 		/*
6584 		 * FIX: decide whether we allow assoc based bindx
6585 		 */
6586 	}
6587 }
6588 
6589 /*
6590  * sctp_bindx(DELETE) for one address.
6591  * assumes all arguments are valid/checked by caller.
6592  */
6593 void
6594 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6595     struct sockaddr *sa, sctp_assoc_t assoc_id,
6596     uint32_t vrf_id, int *error)
6597 {
6598 	struct sockaddr *addr_touse;
6599 
6600 #if defined(INET) && defined(INET6)
6601 	struct sockaddr_in sin;
6602 
6603 #endif
6604 
6605 	/* see if we're bound all already! */
6606 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6607 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6608 		*error = EINVAL;
6609 		return;
6610 	}
6611 	addr_touse = sa;
6612 #ifdef INET6
6613 	if (sa->sa_family == AF_INET6) {
6614 #ifdef INET
6615 		struct sockaddr_in6 *sin6;
6616 
6617 #endif
6618 
6619 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6620 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6621 			*error = EINVAL;
6622 			return;
6623 		}
6624 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6625 			/* can only bind v6 on PF_INET6 sockets */
6626 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6627 			*error = EINVAL;
6628 			return;
6629 		}
6630 #ifdef INET
6631 		sin6 = (struct sockaddr_in6 *)addr_touse;
6632 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6633 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6634 			    SCTP_IPV6_V6ONLY(inp)) {
6635 				/* can't bind mapped-v4 on PF_INET sockets */
6636 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6637 				*error = EINVAL;
6638 				return;
6639 			}
6640 			in6_sin6_2_sin(&sin, sin6);
6641 			addr_touse = (struct sockaddr *)&sin;
6642 		}
6643 #endif
6644 	}
6645 #endif
6646 #ifdef INET
6647 	if (sa->sa_family == AF_INET) {
6648 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6649 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6650 			*error = EINVAL;
6651 			return;
6652 		}
6653 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6654 		    SCTP_IPV6_V6ONLY(inp)) {
6655 			/* can't bind v4 on PF_INET sockets */
6656 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6657 			*error = EINVAL;
6658 			return;
6659 		}
6660 	}
6661 #endif
6662 	/*
6663 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6664 	 * below is ever changed we may need to lock before calling
6665 	 * association level binding.
6666 	 */
6667 	if (assoc_id == 0) {
6668 		/* delete the address */
6669 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6670 		    SCTP_DEL_IP_ADDRESS,
6671 		    vrf_id, NULL);
6672 	} else {
6673 		/*
6674 		 * FIX: decide whether we allow assoc based bindx
6675 		 */
6676 	}
6677 }
6678 
6679 /*
6680  * returns the valid local address count for an assoc, taking into account
6681  * all scoping rules
6682  */
6683 int
6684 sctp_local_addr_count(struct sctp_tcb *stcb)
6685 {
6686 	int loopback_scope;
6687 
6688 #if defined(INET)
6689 	int ipv4_local_scope, ipv4_addr_legal;
6690 
6691 #endif
6692 #if defined (INET6)
6693 	int local_scope, site_scope, ipv6_addr_legal;
6694 
6695 #endif
6696 	struct sctp_vrf *vrf;
6697 	struct sctp_ifn *sctp_ifn;
6698 	struct sctp_ifa *sctp_ifa;
6699 	int count = 0;
6700 
6701 	/* Turn on all the appropriate scopes */
6702 	loopback_scope = stcb->asoc.scope.loopback_scope;
6703 #if defined(INET)
6704 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6705 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6706 #endif
6707 #if defined(INET6)
6708 	local_scope = stcb->asoc.scope.local_scope;
6709 	site_scope = stcb->asoc.scope.site_scope;
6710 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6711 #endif
6712 	SCTP_IPI_ADDR_RLOCK();
6713 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6714 	if (vrf == NULL) {
6715 		/* no vrf, no addresses */
6716 		SCTP_IPI_ADDR_RUNLOCK();
6717 		return (0);
6718 	}
6719 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6720 		/*
6721 		 * bound all case: go through all ifns on the vrf
6722 		 */
6723 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6724 			if ((loopback_scope == 0) &&
6725 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6726 				continue;
6727 			}
6728 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6729 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6730 					continue;
6731 				switch (sctp_ifa->address.sa.sa_family) {
6732 #ifdef INET
6733 				case AF_INET:
6734 					if (ipv4_addr_legal) {
6735 						struct sockaddr_in *sin;
6736 
6737 						sin = &sctp_ifa->address.sin;
6738 						if (sin->sin_addr.s_addr == 0) {
6739 							/*
6740 							 * skip unspecified
6741 							 * addrs
6742 							 */
6743 							continue;
6744 						}
6745 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6746 						    &sin->sin_addr) != 0) {
6747 							continue;
6748 						}
6749 						if ((ipv4_local_scope == 0) &&
6750 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6751 							continue;
6752 						}
6753 						/* count this one */
6754 						count++;
6755 					} else {
6756 						continue;
6757 					}
6758 					break;
6759 #endif
6760 #ifdef INET6
6761 				case AF_INET6:
6762 					if (ipv6_addr_legal) {
6763 						struct sockaddr_in6 *sin6;
6764 
6765 						sin6 = &sctp_ifa->address.sin6;
6766 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6767 							continue;
6768 						}
6769 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6770 						    &sin6->sin6_addr) != 0) {
6771 							continue;
6772 						}
6773 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6774 							if (local_scope == 0)
6775 								continue;
6776 							if (sin6->sin6_scope_id == 0) {
6777 								if (sa6_recoverscope(sin6) != 0)
6778 									/*
6779 									 *
6780 									 * bad
6781 									 *
6782 									 * li
6783 									 * nk
6784 									 *
6785 									 * loc
6786 									 * al
6787 									 *
6788 									 * add
6789 									 * re
6790 									 * ss
6791 									 * */
6792 									continue;
6793 							}
6794 						}
6795 						if ((site_scope == 0) &&
6796 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6797 							continue;
6798 						}
6799 						/* count this one */
6800 						count++;
6801 					}
6802 					break;
6803 #endif
6804 				default:
6805 					/* TSNH */
6806 					break;
6807 				}
6808 			}
6809 		}
6810 	} else {
6811 		/*
6812 		 * subset bound case
6813 		 */
6814 		struct sctp_laddr *laddr;
6815 
6816 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6817 		    sctp_nxt_addr) {
6818 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6819 				continue;
6820 			}
6821 			/* count this one */
6822 			count++;
6823 		}
6824 	}
6825 	SCTP_IPI_ADDR_RUNLOCK();
6826 	return (count);
6827 }
6828 
6829 #if defined(SCTP_LOCAL_TRACE_BUF)
6830 
6831 void
6832 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6833 {
6834 	uint32_t saveindex, newindex;
6835 
6836 	do {
6837 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6838 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6839 			newindex = 1;
6840 		} else {
6841 			newindex = saveindex + 1;
6842 		}
6843 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6844 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6845 		saveindex = 0;
6846 	}
6847 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6848 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6849 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6850 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6851 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6852 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6853 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6854 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6855 }
6856 
6857 #endif
6858 static void
6859 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6860     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6861 {
6862 	struct ip *iph;
6863 
6864 #ifdef INET6
6865 	struct ip6_hdr *ip6;
6866 
6867 #endif
6868 	struct mbuf *sp, *last;
6869 	struct udphdr *uhdr;
6870 	uint16_t port;
6871 
6872 	if ((m->m_flags & M_PKTHDR) == 0) {
6873 		/* Can't handle one that is not a pkt hdr */
6874 		goto out;
6875 	}
6876 	/* Pull the src port */
6877 	iph = mtod(m, struct ip *);
6878 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6879 	port = uhdr->uh_sport;
6880 	/*
6881 	 * Split out the mbuf chain. Leave the IP header in m, place the
6882 	 * rest in the sp.
6883 	 */
6884 	sp = m_split(m, off, M_NOWAIT);
6885 	if (sp == NULL) {
6886 		/* Gak, drop packet, we can't do a split */
6887 		goto out;
6888 	}
6889 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6890 		/* Gak, packet can't have an SCTP header in it - too small */
6891 		m_freem(sp);
6892 		goto out;
6893 	}
6894 	/* Now pull up the UDP header and SCTP header together */
6895 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6896 	if (sp == NULL) {
6897 		/* Gak pullup failed */
6898 		goto out;
6899 	}
6900 	/* Trim out the UDP header */
6901 	m_adj(sp, sizeof(struct udphdr));
6902 
6903 	/* Now reconstruct the mbuf chain */
6904 	for (last = m; last->m_next; last = last->m_next);
6905 	last->m_next = sp;
6906 	m->m_pkthdr.len += sp->m_pkthdr.len;
6907 	iph = mtod(m, struct ip *);
6908 	switch (iph->ip_v) {
6909 #ifdef INET
6910 	case IPVERSION:
6911 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6912 		sctp_input_with_port(m, off, port);
6913 		break;
6914 #endif
6915 #ifdef INET6
6916 	case IPV6_VERSION >> 4:
6917 		ip6 = mtod(m, struct ip6_hdr *);
6918 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6919 		sctp6_input_with_port(&m, &off, port);
6920 		break;
6921 #endif
6922 	default:
6923 		goto out;
6924 		break;
6925 	}
6926 	return;
6927 out:
6928 	m_freem(m);
6929 }
6930 
6931 void
6932 sctp_over_udp_stop(void)
6933 {
6934 	/*
6935 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6936 	 * for writting!
6937 	 */
6938 #ifdef INET
6939 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6940 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6941 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6942 	}
6943 #endif
6944 #ifdef INET6
6945 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6946 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6947 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6948 	}
6949 #endif
6950 }
6951 
6952 int
6953 sctp_over_udp_start(void)
6954 {
6955 	uint16_t port;
6956 	int ret;
6957 
6958 #ifdef INET
6959 	struct sockaddr_in sin;
6960 
6961 #endif
6962 #ifdef INET6
6963 	struct sockaddr_in6 sin6;
6964 
6965 #endif
6966 	/*
6967 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6968 	 * for writting!
6969 	 */
6970 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6971 	if (ntohs(port) == 0) {
6972 		/* Must have a port set */
6973 		return (EINVAL);
6974 	}
6975 #ifdef INET
6976 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6977 		/* Already running -- must stop first */
6978 		return (EALREADY);
6979 	}
6980 #endif
6981 #ifdef INET6
6982 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6983 		/* Already running -- must stop first */
6984 		return (EALREADY);
6985 	}
6986 #endif
6987 #ifdef INET
6988 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6989 	    SOCK_DGRAM, IPPROTO_UDP,
6990 	    curthread->td_ucred, curthread))) {
6991 		sctp_over_udp_stop();
6992 		return (ret);
6993 	}
6994 	/* Call the special UDP hook. */
6995 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6996 	    sctp_recv_udp_tunneled_packet, NULL))) {
6997 		sctp_over_udp_stop();
6998 		return (ret);
6999 	}
7000 	/* Ok, we have a socket, bind it to the port. */
7001 	memset(&sin, 0, sizeof(struct sockaddr_in));
7002 	sin.sin_len = sizeof(struct sockaddr_in);
7003 	sin.sin_family = AF_INET;
7004 	sin.sin_port = htons(port);
7005 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7006 	    (struct sockaddr *)&sin, curthread))) {
7007 		sctp_over_udp_stop();
7008 		return (ret);
7009 	}
7010 #endif
7011 #ifdef INET6
7012 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7013 	    SOCK_DGRAM, IPPROTO_UDP,
7014 	    curthread->td_ucred, curthread))) {
7015 		sctp_over_udp_stop();
7016 		return (ret);
7017 	}
7018 	/* Call the special UDP hook. */
7019 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7020 	    sctp_recv_udp_tunneled_packet, NULL))) {
7021 		sctp_over_udp_stop();
7022 		return (ret);
7023 	}
7024 	/* Ok, we have a socket, bind it to the port. */
7025 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7026 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7027 	sin6.sin6_family = AF_INET6;
7028 	sin6.sin6_port = htons(port);
7029 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7030 	    (struct sockaddr *)&sin6, curthread))) {
7031 		sctp_over_udp_stop();
7032 		return (ret);
7033 	}
7034 #endif
7035 	return (0);
7036 }
7037