xref: /freebsd/sys/netinet/sctputil.c (revision 3b8f08459569bf0faa21473e5cec2491e95c9349)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 void
221 sctp_log_mb(struct mbuf *m, int from)
222 {
223 	struct sctp_cwnd_log sctp_clog;
224 
225 	sctp_clog.x.mb.mp = m;
226 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
227 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
228 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
229 	if (SCTP_BUF_IS_EXTENDED(m)) {
230 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
231 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
232 	} else {
233 		sctp_clog.x.mb.ext = 0;
234 		sctp_clog.x.mb.refcnt = 0;
235 	}
236 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
237 	    SCTP_LOG_EVENT_MBUF,
238 	    from,
239 	    sctp_clog.x.misc.log1,
240 	    sctp_clog.x.misc.log2,
241 	    sctp_clog.x.misc.log3,
242 	    sctp_clog.x.misc.log4);
243 }
244 
245 void
246 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
247 {
248 	struct sctp_cwnd_log sctp_clog;
249 
250 	if (control == NULL) {
251 		SCTP_PRINTF("Gak log of NULL?\n");
252 		return;
253 	}
254 	sctp_clog.x.strlog.stcb = control->stcb;
255 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257 	sctp_clog.x.strlog.strm = control->sinfo_stream;
258 	if (poschk != NULL) {
259 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261 	} else {
262 		sctp_clog.x.strlog.e_tsn = 0;
263 		sctp_clog.x.strlog.e_sseq = 0;
264 	}
265 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266 	    SCTP_LOG_EVENT_STRM,
267 	    from,
268 	    sctp_clog.x.misc.log1,
269 	    sctp_clog.x.misc.log2,
270 	    sctp_clog.x.misc.log3,
271 	    sctp_clog.x.misc.log4);
272 }
273 
274 void
275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276 {
277 	struct sctp_cwnd_log sctp_clog;
278 
279 	sctp_clog.x.cwnd.net = net;
280 	if (stcb->asoc.send_queue_cnt > 255)
281 		sctp_clog.x.cwnd.cnt_in_send = 255;
282 	else
283 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284 	if (stcb->asoc.stream_queue_cnt > 255)
285 		sctp_clog.x.cwnd.cnt_in_str = 255;
286 	else
287 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288 
289 	if (net) {
290 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291 		sctp_clog.x.cwnd.inflight = net->flight_size;
292 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295 	}
296 	if (SCTP_CWNDLOG_PRESEND == from) {
297 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298 	}
299 	sctp_clog.x.cwnd.cwnd_augment = augment;
300 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301 	    SCTP_LOG_EVENT_CWND,
302 	    from,
303 	    sctp_clog.x.misc.log1,
304 	    sctp_clog.x.misc.log2,
305 	    sctp_clog.x.misc.log3,
306 	    sctp_clog.x.misc.log4);
307 }
308 
309 void
310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311 {
312 	struct sctp_cwnd_log sctp_clog;
313 
314 	memset(&sctp_clog, 0, sizeof(sctp_clog));
315 	if (inp) {
316 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317 
318 	} else {
319 		sctp_clog.x.lock.sock = (void *)NULL;
320 	}
321 	sctp_clog.x.lock.inp = (void *)inp;
322 	if (stcb) {
323 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324 	} else {
325 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326 	}
327 	if (inp) {
328 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330 	} else {
331 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333 	}
334 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335 	if (inp && (inp->sctp_socket)) {
336 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339 	} else {
340 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345 	    SCTP_LOG_LOCK_EVENT,
346 	    from,
347 	    sctp_clog.x.misc.log1,
348 	    sctp_clog.x.misc.log2,
349 	    sctp_clog.x.misc.log3,
350 	    sctp_clog.x.misc.log4);
351 }
352 
353 void
354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355 {
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	sctp_clog.x.cwnd.net = net;
360 	sctp_clog.x.cwnd.cwnd_new_value = error;
361 	sctp_clog.x.cwnd.inflight = net->flight_size;
362 	sctp_clog.x.cwnd.cwnd_augment = burst;
363 	if (stcb->asoc.send_queue_cnt > 255)
364 		sctp_clog.x.cwnd.cnt_in_send = 255;
365 	else
366 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367 	if (stcb->asoc.stream_queue_cnt > 255)
368 		sctp_clog.x.cwnd.cnt_in_str = 255;
369 	else
370 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372 	    SCTP_LOG_EVENT_MAXBURST,
373 	    from,
374 	    sctp_clog.x.misc.log1,
375 	    sctp_clog.x.misc.log2,
376 	    sctp_clog.x.misc.log3,
377 	    sctp_clog.x.misc.log4);
378 }
379 
380 void
381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382 {
383 	struct sctp_cwnd_log sctp_clog;
384 
385 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386 	sctp_clog.x.rwnd.send_size = snd_size;
387 	sctp_clog.x.rwnd.overhead = overhead;
388 	sctp_clog.x.rwnd.new_rwnd = 0;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_RWND,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = flight_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422 	sctp_clog.x.mbcnt.size_change = book;
423 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_MBCNT,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 void
435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436 {
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_MISC_EVENT,
439 	    from,
440 	    a, b, c, d);
441 }
442 
443 void
444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445 {
446 	struct sctp_cwnd_log sctp_clog;
447 
448 	sctp_clog.x.wake.stcb = (void *)stcb;
449 	sctp_clog.x.wake.wake_cnt = wake_cnt;
450 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453 
454 	if (stcb->asoc.stream_queue_cnt < 0xff)
455 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456 	else
457 		sctp_clog.x.wake.stream_qcnt = 0xff;
458 
459 	if (stcb->asoc.chunks_on_out_queue < 0xff)
460 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461 	else
462 		sctp_clog.x.wake.chunks_on_oque = 0xff;
463 
464 	sctp_clog.x.wake.sctpflags = 0;
465 	/* set in the defered mode stuff */
466 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467 		sctp_clog.x.wake.sctpflags |= 1;
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469 		sctp_clog.x.wake.sctpflags |= 2;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471 		sctp_clog.x.wake.sctpflags |= 4;
472 	/* what about the sb */
473 	if (stcb->sctp_socket) {
474 		struct socket *so = stcb->sctp_socket;
475 
476 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477 	} else {
478 		sctp_clog.x.wake.sbflags = 0xff;
479 	}
480 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 	    SCTP_LOG_EVENT_WAKE,
482 	    from,
483 	    sctp_clog.x.misc.log1,
484 	    sctp_clog.x.misc.log2,
485 	    sctp_clog.x.misc.log3,
486 	    sctp_clog.x.misc.log4);
487 }
488 
489 void
490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491 {
492 	struct sctp_cwnd_log sctp_clog;
493 
494 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500 	sctp_clog.x.blk.sndlen = sendlen;
501 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 	    SCTP_LOG_EVENT_BLOCK,
503 	    from,
504 	    sctp_clog.x.misc.log1,
505 	    sctp_clog.x.misc.log2,
506 	    sctp_clog.x.misc.log3,
507 	    sctp_clog.x.misc.log4);
508 }
509 
510 int
511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512 {
513 	/* May need to fix this if ktrdump does not work */
514 	return (0);
515 }
516 
517 #ifdef SCTP_AUDITING_ENABLED
518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519 static int sctp_audit_indx = 0;
520 
521 static
522 void
523 sctp_print_audit_report(void)
524 {
525 	int i;
526 	int cnt;
527 
528 	cnt = 0;
529 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530 		if ((sctp_audit_data[i][0] == 0xe0) &&
531 		    (sctp_audit_data[i][1] == 0x01)) {
532 			cnt = 0;
533 			SCTP_PRINTF("\n");
534 		} else if (sctp_audit_data[i][0] == 0xf0) {
535 			cnt = 0;
536 			SCTP_PRINTF("\n");
537 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538 		    (sctp_audit_data[i][1] == 0x01)) {
539 			SCTP_PRINTF("\n");
540 			cnt = 0;
541 		}
542 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543 		    (uint32_t) sctp_audit_data[i][1]);
544 		cnt++;
545 		if ((cnt % 14) == 0)
546 			SCTP_PRINTF("\n");
547 	}
548 	for (i = 0; i < sctp_audit_indx; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	SCTP_PRINTF("\n");
568 }
569 
570 void
571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572     struct sctp_nets *net)
573 {
574 	int resend_cnt, tot_out, rep, tot_book_cnt;
575 	struct sctp_nets *lnet;
576 	struct sctp_tmit_chunk *chk;
577 
578 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580 	sctp_audit_indx++;
581 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582 		sctp_audit_indx = 0;
583 	}
584 	if (inp == NULL) {
585 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587 		sctp_audit_indx++;
588 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589 			sctp_audit_indx = 0;
590 		}
591 		return;
592 	}
593 	if (stcb == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603 	sctp_audit_data[sctp_audit_indx][1] =
604 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605 	sctp_audit_indx++;
606 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 		sctp_audit_indx = 0;
608 	}
609 	rep = 0;
610 	tot_book_cnt = 0;
611 	resend_cnt = tot_out = 0;
612 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614 			resend_cnt++;
615 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616 			tot_out += chk->book_size;
617 			tot_book_cnt++;
618 		}
619 	}
620 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623 		sctp_audit_indx++;
624 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 			sctp_audit_indx = 0;
626 		}
627 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629 		rep = 1;
630 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632 		sctp_audit_data[sctp_audit_indx][1] =
633 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634 		sctp_audit_indx++;
635 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 			sctp_audit_indx = 0;
637 		}
638 	}
639 	if (tot_out != stcb->asoc.total_flight) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		rep = 1;
647 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648 		    (int)stcb->asoc.total_flight);
649 		stcb->asoc.total_flight = tot_out;
650 	}
651 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 		rep = 1;
659 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660 
661 		stcb->asoc.total_flight_count = tot_book_cnt;
662 	}
663 	tot_out = 0;
664 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665 		tot_out += lnet->flight_size;
666 	}
667 	if (tot_out != stcb->asoc.total_flight) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("real flight:%d net total was %d\n",
676 		    stcb->asoc.total_flight, tot_out);
677 		/* now corrective action */
678 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679 
680 			tot_out = 0;
681 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682 				if ((chk->whoTo == lnet) &&
683 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684 					tot_out += chk->book_size;
685 				}
686 			}
687 			if (lnet->flight_size != tot_out) {
688 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689 				    (void *)lnet, lnet->flight_size,
690 				    tot_out);
691 				lnet->flight_size = tot_out;
692 			}
693 		}
694 	}
695 	if (rep) {
696 		sctp_print_audit_report();
697 	}
698 }
699 
700 void
701 sctp_audit_log(uint8_t ev, uint8_t fd)
702 {
703 
704 	sctp_audit_data[sctp_audit_indx][0] = ev;
705 	sctp_audit_data[sctp_audit_indx][1] = fd;
706 	sctp_audit_indx++;
707 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708 		sctp_audit_indx = 0;
709 	}
710 }
711 
712 #endif
713 
714 /*
715  * sctp_stop_timers_for_shutdown() should be called
716  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717  * state to make sure that all timers are stopped.
718  */
719 void
720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721 {
722 	struct sctp_association *asoc;
723 	struct sctp_nets *net;
724 
725 	asoc = &stcb->asoc;
726 
727 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735 	}
736 }
737 
738 /*
739  * a list of sizes based on typical mtu's, used only if next hop size not
740  * returned.
741  */
742 static uint32_t sctp_mtu_sizes[] = {
743 	68,
744 	296,
745 	508,
746 	512,
747 	544,
748 	576,
749 	1006,
750 	1492,
751 	1500,
752 	1536,
753 	2002,
754 	2048,
755 	4352,
756 	4464,
757 	8166,
758 	17914,
759 	32000,
760 	65535
761 };
762 
763 /*
764  * Return the largest MTU smaller than val. If there is no
765  * entry, just return val.
766  */
767 uint32_t
768 sctp_get_prev_mtu(uint32_t val)
769 {
770 	uint32_t i;
771 
772 	if (val <= sctp_mtu_sizes[0]) {
773 		return (val);
774 	}
775 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776 		if (val <= sctp_mtu_sizes[i]) {
777 			break;
778 		}
779 	}
780 	return (sctp_mtu_sizes[i - 1]);
781 }
782 
783 /*
784  * Return the smallest MTU larger than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_next_mtu(uint32_t val)
789 {
790 	/* select another MTU that is just bigger than this one */
791 	uint32_t i;
792 
793 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794 		if (val < sctp_mtu_sizes[i]) {
795 			return (sctp_mtu_sizes[i]);
796 		}
797 	}
798 	return (val);
799 }
800 
801 void
802 sctp_fill_random_store(struct sctp_pcb *m)
803 {
804 	/*
805 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806 	 * our counter. The result becomes our good random numbers and we
807 	 * then setup to give these out. Note that we do no locking to
808 	 * protect this. This is ok, since if competing folks call this we
809 	 * will get more gobbled gook in the random store which is what we
810 	 * want. There is a danger that two guys will use the same random
811 	 * numbers, but thats ok too since that is random as well :->
812 	 */
813 	m->store_at = 0;
814 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817 	m->random_counter++;
818 }
819 
820 uint32_t
821 sctp_select_initial_TSN(struct sctp_pcb *inp)
822 {
823 	/*
824 	 * A true implementation should use random selection process to get
825 	 * the initial stream sequence number, using RFC1750 as a good
826 	 * guideline
827 	 */
828 	uint32_t x, *xp;
829 	uint8_t *p;
830 	int store_at, new_store;
831 
832 	if (inp->initial_sequence_debug != 0) {
833 		uint32_t ret;
834 
835 		ret = inp->initial_sequence_debug;
836 		inp->initial_sequence_debug++;
837 		return (ret);
838 	}
839 retry:
840 	store_at = inp->store_at;
841 	new_store = store_at + sizeof(uint32_t);
842 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843 		new_store = 0;
844 	}
845 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846 		goto retry;
847 	}
848 	if (new_store == 0) {
849 		/* Refill the random store */
850 		sctp_fill_random_store(inp);
851 	}
852 	p = &inp->random_store[store_at];
853 	xp = (uint32_t *) p;
854 	x = *xp;
855 	return (x);
856 }
857 
858 uint32_t
859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860 {
861 	uint32_t x;
862 	struct timeval now;
863 
864 	if (check) {
865 		(void)SCTP_GETTIME_TIMEVAL(&now);
866 	}
867 	for (;;) {
868 		x = sctp_select_initial_TSN(&inp->sctp_ep);
869 		if (x == 0) {
870 			/* we never use 0 */
871 			continue;
872 		}
873 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874 			break;
875 		}
876 	}
877 	return (x);
878 }
879 
880 int
881 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
882     uint32_t override_tag, uint32_t vrf_id)
883 {
884 	struct sctp_association *asoc;
885 
886 	/*
887 	 * Anything set to zero is taken care of by the allocation routine's
888 	 * bzero
889 	 */
890 
891 	/*
892 	 * Up front select what scoping to apply on addresses I tell my peer
893 	 * Not sure what to do with these right now, we will need to come up
894 	 * with a way to set them. We may need to pass them through from the
895 	 * caller in the sctp_aloc_assoc() function.
896 	 */
897 	int i;
898 
899 	asoc = &stcb->asoc;
900 	/* init all variables to a known value. */
901 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902 	asoc->max_burst = inp->sctp_ep.max_burst;
903 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
904 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
906 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
907 	asoc->ecn_allowed = inp->sctp_ecn_enable;
908 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
909 	asoc->sctp_cmt_pf = (uint8_t) 0;
910 	asoc->sctp_frag_point = inp->sctp_frag_point;
911 	asoc->sctp_features = inp->sctp_features;
912 	asoc->default_dscp = inp->sctp_ep.default_dscp;
913 #ifdef INET6
914 	if (inp->sctp_ep.default_flowlabel) {
915 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
916 	} else {
917 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
918 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
919 			asoc->default_flowlabel &= 0x000fffff;
920 			asoc->default_flowlabel |= 0x80000000;
921 		} else {
922 			asoc->default_flowlabel = 0;
923 		}
924 	}
925 #endif
926 	asoc->sb_send_resv = 0;
927 	if (override_tag) {
928 		asoc->my_vtag = override_tag;
929 	} else {
930 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
931 	}
932 	/* Get the nonce tags */
933 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
934 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
935 	asoc->vrf_id = vrf_id;
936 
937 #ifdef SCTP_ASOCLOG_OF_TSNS
938 	asoc->tsn_in_at = 0;
939 	asoc->tsn_out_at = 0;
940 	asoc->tsn_in_wrapped = 0;
941 	asoc->tsn_out_wrapped = 0;
942 	asoc->cumack_log_at = 0;
943 	asoc->cumack_log_atsnt = 0;
944 #endif
945 #ifdef SCTP_FS_SPEC_LOG
946 	asoc->fs_index = 0;
947 #endif
948 	asoc->refcnt = 0;
949 	asoc->assoc_up_sent = 0;
950 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
951 	    sctp_select_initial_TSN(&inp->sctp_ep);
952 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
953 	/* we are optimisitic here */
954 	asoc->peer_supports_pktdrop = 1;
955 	asoc->peer_supports_nat = 0;
956 	asoc->sent_queue_retran_cnt = 0;
957 
958 	/* for CMT */
959 	asoc->last_net_cmt_send_started = NULL;
960 
961 	/* This will need to be adjusted */
962 	asoc->last_acked_seq = asoc->init_seq_number - 1;
963 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
964 	asoc->asconf_seq_in = asoc->last_acked_seq;
965 
966 	/* here we are different, we hold the next one we expect */
967 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
968 
969 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
970 	asoc->initial_rto = inp->sctp_ep.initial_rto;
971 
972 	asoc->max_init_times = inp->sctp_ep.max_init_times;
973 	asoc->max_send_times = inp->sctp_ep.max_send_times;
974 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
975 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
976 	asoc->free_chunk_cnt = 0;
977 
978 	asoc->iam_blocking = 0;
979 	asoc->context = inp->sctp_context;
980 	asoc->local_strreset_support = inp->local_strreset_support;
981 	asoc->def_send = inp->def_send;
982 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
984 	asoc->pr_sctp_cnt = 0;
985 	asoc->total_output_queue_size = 0;
986 
987 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988 		asoc->scope.ipv6_addr_legal = 1;
989 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
990 			asoc->scope.ipv4_addr_legal = 1;
991 		} else {
992 			asoc->scope.ipv4_addr_legal = 0;
993 		}
994 	} else {
995 		asoc->scope.ipv6_addr_legal = 0;
996 		asoc->scope.ipv4_addr_legal = 1;
997 	}
998 
999 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1000 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1001 
1002 	asoc->smallest_mtu = inp->sctp_frag_point;
1003 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1004 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1005 
1006 	asoc->locked_on_sending = NULL;
1007 	asoc->stream_locked_on = 0;
1008 	asoc->ecn_echo_cnt_onq = 0;
1009 	asoc->stream_locked = 0;
1010 
1011 	asoc->send_sack = 1;
1012 
1013 	LIST_INIT(&asoc->sctp_restricted_addrs);
1014 
1015 	TAILQ_INIT(&asoc->nets);
1016 	TAILQ_INIT(&asoc->pending_reply_queue);
1017 	TAILQ_INIT(&asoc->asconf_ack_sent);
1018 	/* Setup to fill the hb random cache at first HB */
1019 	asoc->hb_random_idx = 4;
1020 
1021 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1022 
1023 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1024 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1025 
1026 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1027 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1028 
1029 	/*
1030 	 * Now the stream parameters, here we allocate space for all streams
1031 	 * that we request by default.
1032 	 */
1033 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1034 	    inp->sctp_ep.pre_open_stream_count;
1035 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1036 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1037 	    SCTP_M_STRMO);
1038 	if (asoc->strmout == NULL) {
1039 		/* big trouble no memory */
1040 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1041 		return (ENOMEM);
1042 	}
1043 	for (i = 0; i < asoc->streamoutcnt; i++) {
1044 		/*
1045 		 * inbound side must be set to 0xffff, also NOTE when we get
1046 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1047 		 * count (streamoutcnt) but first check if we sent to any of
1048 		 * the upper streams that were dropped (if some were). Those
1049 		 * that were dropped must be notified to the upper layer as
1050 		 * failed to send.
1051 		 */
1052 		asoc->strmout[i].next_sequence_send = 0x0;
1053 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1054 		asoc->strmout[i].chunks_on_queues = 0;
1055 		asoc->strmout[i].stream_no = i;
1056 		asoc->strmout[i].last_msg_incomplete = 0;
1057 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1058 	}
1059 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1060 
1061 	/* Now the mapping array */
1062 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1063 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1064 	    SCTP_M_MAP);
1065 	if (asoc->mapping_array == NULL) {
1066 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1067 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1068 		return (ENOMEM);
1069 	}
1070 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1071 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1072 	    SCTP_M_MAP);
1073 	if (asoc->nr_mapping_array == NULL) {
1074 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1076 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1077 		return (ENOMEM);
1078 	}
1079 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1080 
1081 	/* Now the init of the other outqueues */
1082 	TAILQ_INIT(&asoc->free_chunks);
1083 	TAILQ_INIT(&asoc->control_send_queue);
1084 	TAILQ_INIT(&asoc->asconf_send_queue);
1085 	TAILQ_INIT(&asoc->send_queue);
1086 	TAILQ_INIT(&asoc->sent_queue);
1087 	TAILQ_INIT(&asoc->reasmqueue);
1088 	TAILQ_INIT(&asoc->resetHead);
1089 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1090 	TAILQ_INIT(&asoc->asconf_queue);
1091 	/* authentication fields */
1092 	asoc->authinfo.random = NULL;
1093 	asoc->authinfo.active_keyid = 0;
1094 	asoc->authinfo.assoc_key = NULL;
1095 	asoc->authinfo.assoc_keyid = 0;
1096 	asoc->authinfo.recv_key = NULL;
1097 	asoc->authinfo.recv_keyid = 0;
1098 	LIST_INIT(&asoc->shared_keys);
1099 	asoc->marked_retrans = 0;
1100 	asoc->port = inp->sctp_ep.port;
1101 	asoc->timoinit = 0;
1102 	asoc->timodata = 0;
1103 	asoc->timosack = 0;
1104 	asoc->timoshutdown = 0;
1105 	asoc->timoheartbeat = 0;
1106 	asoc->timocookie = 0;
1107 	asoc->timoshutdownack = 0;
1108 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1109 	asoc->discontinuity_time = asoc->start_time;
1110 	/*
1111 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1112 	 * freed later when the association is freed.
1113 	 */
1114 	return (0);
1115 }
1116 
1117 void
1118 sctp_print_mapping_array(struct sctp_association *asoc)
1119 {
1120 	unsigned int i, limit;
1121 
1122 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1123 	    asoc->mapping_array_size,
1124 	    asoc->mapping_array_base_tsn,
1125 	    asoc->cumulative_tsn,
1126 	    asoc->highest_tsn_inside_map,
1127 	    asoc->highest_tsn_inside_nr_map);
1128 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1129 		if (asoc->mapping_array[limit - 1] != 0) {
1130 			break;
1131 		}
1132 	}
1133 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1134 	for (i = 0; i < limit; i++) {
1135 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1136 	}
1137 	if (limit % 16)
1138 		SCTP_PRINTF("\n");
1139 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1140 		if (asoc->nr_mapping_array[limit - 1]) {
1141 			break;
1142 		}
1143 	}
1144 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1145 	for (i = 0; i < limit; i++) {
1146 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1147 	}
1148 	if (limit % 16)
1149 		SCTP_PRINTF("\n");
1150 }
1151 
1152 int
1153 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1154 {
1155 	/* mapping array needs to grow */
1156 	uint8_t *new_array1, *new_array2;
1157 	uint32_t new_size;
1158 
1159 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1160 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1161 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1162 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1163 		/* can't get more, forget it */
1164 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1165 		if (new_array1) {
1166 			SCTP_FREE(new_array1, SCTP_M_MAP);
1167 		}
1168 		if (new_array2) {
1169 			SCTP_FREE(new_array2, SCTP_M_MAP);
1170 		}
1171 		return (-1);
1172 	}
1173 	memset(new_array1, 0, new_size);
1174 	memset(new_array2, 0, new_size);
1175 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1176 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1177 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1178 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1179 	asoc->mapping_array = new_array1;
1180 	asoc->nr_mapping_array = new_array2;
1181 	asoc->mapping_array_size = new_size;
1182 	return (0);
1183 }
1184 
1185 
1186 static void
1187 sctp_iterator_work(struct sctp_iterator *it)
1188 {
1189 	int iteration_count = 0;
1190 	int inp_skip = 0;
1191 	int first_in = 1;
1192 	struct sctp_inpcb *tinp;
1193 
1194 	SCTP_INP_INFO_RLOCK();
1195 	SCTP_ITERATOR_LOCK();
1196 	if (it->inp) {
1197 		SCTP_INP_RLOCK(it->inp);
1198 		SCTP_INP_DECR_REF(it->inp);
1199 	}
1200 	if (it->inp == NULL) {
1201 		/* iterator is complete */
1202 done_with_iterator:
1203 		SCTP_ITERATOR_UNLOCK();
1204 		SCTP_INP_INFO_RUNLOCK();
1205 		if (it->function_atend != NULL) {
1206 			(*it->function_atend) (it->pointer, it->val);
1207 		}
1208 		SCTP_FREE(it, SCTP_M_ITER);
1209 		return;
1210 	}
1211 select_a_new_ep:
1212 	if (first_in) {
1213 		first_in = 0;
1214 	} else {
1215 		SCTP_INP_RLOCK(it->inp);
1216 	}
1217 	while (((it->pcb_flags) &&
1218 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1219 	    ((it->pcb_features) &&
1220 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1221 		/* endpoint flags or features don't match, so keep looking */
1222 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1223 			SCTP_INP_RUNLOCK(it->inp);
1224 			goto done_with_iterator;
1225 		}
1226 		tinp = it->inp;
1227 		it->inp = LIST_NEXT(it->inp, sctp_list);
1228 		SCTP_INP_RUNLOCK(tinp);
1229 		if (it->inp == NULL) {
1230 			goto done_with_iterator;
1231 		}
1232 		SCTP_INP_RLOCK(it->inp);
1233 	}
1234 	/* now go through each assoc which is in the desired state */
1235 	if (it->done_current_ep == 0) {
1236 		if (it->function_inp != NULL)
1237 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1238 		it->done_current_ep = 1;
1239 	}
1240 	if (it->stcb == NULL) {
1241 		/* run the per instance function */
1242 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1243 	}
1244 	if ((inp_skip) || it->stcb == NULL) {
1245 		if (it->function_inp_end != NULL) {
1246 			inp_skip = (*it->function_inp_end) (it->inp,
1247 			    it->pointer,
1248 			    it->val);
1249 		}
1250 		SCTP_INP_RUNLOCK(it->inp);
1251 		goto no_stcb;
1252 	}
1253 	while (it->stcb) {
1254 		SCTP_TCB_LOCK(it->stcb);
1255 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1256 			/* not in the right state... keep looking */
1257 			SCTP_TCB_UNLOCK(it->stcb);
1258 			goto next_assoc;
1259 		}
1260 		/* see if we have limited out the iterator loop */
1261 		iteration_count++;
1262 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1263 			/* Pause to let others grab the lock */
1264 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1265 			SCTP_TCB_UNLOCK(it->stcb);
1266 			SCTP_INP_INCR_REF(it->inp);
1267 			SCTP_INP_RUNLOCK(it->inp);
1268 			SCTP_ITERATOR_UNLOCK();
1269 			SCTP_INP_INFO_RUNLOCK();
1270 			SCTP_INP_INFO_RLOCK();
1271 			SCTP_ITERATOR_LOCK();
1272 			if (sctp_it_ctl.iterator_flags) {
1273 				/* We won't be staying here */
1274 				SCTP_INP_DECR_REF(it->inp);
1275 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1276 				if (sctp_it_ctl.iterator_flags &
1277 				    SCTP_ITERATOR_STOP_CUR_IT) {
1278 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1279 					goto done_with_iterator;
1280 				}
1281 				if (sctp_it_ctl.iterator_flags &
1282 				    SCTP_ITERATOR_STOP_CUR_INP) {
1283 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1284 					goto no_stcb;
1285 				}
1286 				/* If we reach here huh? */
1287 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1288 				    sctp_it_ctl.iterator_flags);
1289 				sctp_it_ctl.iterator_flags = 0;
1290 			}
1291 			SCTP_INP_RLOCK(it->inp);
1292 			SCTP_INP_DECR_REF(it->inp);
1293 			SCTP_TCB_LOCK(it->stcb);
1294 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1295 			iteration_count = 0;
1296 		}
1297 		/* run function on this one */
1298 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1299 
1300 		/*
1301 		 * we lie here, it really needs to have its own type but
1302 		 * first I must verify that this won't effect things :-0
1303 		 */
1304 		if (it->no_chunk_output == 0)
1305 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1306 
1307 		SCTP_TCB_UNLOCK(it->stcb);
1308 next_assoc:
1309 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1310 		if (it->stcb == NULL) {
1311 			/* Run last function */
1312 			if (it->function_inp_end != NULL) {
1313 				inp_skip = (*it->function_inp_end) (it->inp,
1314 				    it->pointer,
1315 				    it->val);
1316 			}
1317 		}
1318 	}
1319 	SCTP_INP_RUNLOCK(it->inp);
1320 no_stcb:
1321 	/* done with all assocs on this endpoint, move on to next endpoint */
1322 	it->done_current_ep = 0;
1323 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1324 		it->inp = NULL;
1325 	} else {
1326 		it->inp = LIST_NEXT(it->inp, sctp_list);
1327 	}
1328 	if (it->inp == NULL) {
1329 		goto done_with_iterator;
1330 	}
1331 	goto select_a_new_ep;
1332 }
1333 
1334 void
1335 sctp_iterator_worker(void)
1336 {
1337 	struct sctp_iterator *it, *nit;
1338 
1339 	/* This function is called with the WQ lock in place */
1340 
1341 	sctp_it_ctl.iterator_running = 1;
1342 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1343 		sctp_it_ctl.cur_it = it;
1344 		/* now lets work on this one */
1345 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1346 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1347 		CURVNET_SET(it->vn);
1348 		sctp_iterator_work(it);
1349 		sctp_it_ctl.cur_it = NULL;
1350 		CURVNET_RESTORE();
1351 		SCTP_IPI_ITERATOR_WQ_LOCK();
1352 		/* sa_ignore FREED_MEMORY */
1353 	}
1354 	sctp_it_ctl.iterator_running = 0;
1355 	return;
1356 }
1357 
1358 
1359 static void
1360 sctp_handle_addr_wq(void)
1361 {
1362 	/* deal with the ADDR wq from the rtsock calls */
1363 	struct sctp_laddr *wi, *nwi;
1364 	struct sctp_asconf_iterator *asc;
1365 
1366 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1367 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1368 	if (asc == NULL) {
1369 		/* Try later, no memory */
1370 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1371 		    (struct sctp_inpcb *)NULL,
1372 		    (struct sctp_tcb *)NULL,
1373 		    (struct sctp_nets *)NULL);
1374 		return;
1375 	}
1376 	LIST_INIT(&asc->list_of_work);
1377 	asc->cnt = 0;
1378 
1379 	SCTP_WQ_ADDR_LOCK();
1380 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1381 		LIST_REMOVE(wi, sctp_nxt_addr);
1382 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1383 		asc->cnt++;
1384 	}
1385 	SCTP_WQ_ADDR_UNLOCK();
1386 
1387 	if (asc->cnt == 0) {
1388 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1389 	} else {
1390 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1391 		    sctp_asconf_iterator_stcb,
1392 		    NULL,	/* No ep end for boundall */
1393 		    SCTP_PCB_FLAGS_BOUNDALL,
1394 		    SCTP_PCB_ANY_FEATURES,
1395 		    SCTP_ASOC_ANY_STATE,
1396 		    (void *)asc, 0,
1397 		    sctp_asconf_iterator_end, NULL, 0);
1398 	}
1399 }
1400 
1401 void
1402 sctp_timeout_handler(void *t)
1403 {
1404 	struct sctp_inpcb *inp;
1405 	struct sctp_tcb *stcb;
1406 	struct sctp_nets *net;
1407 	struct sctp_timer *tmr;
1408 
1409 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1410 	struct socket *so;
1411 
1412 #endif
1413 	int did_output, type;
1414 
1415 	tmr = (struct sctp_timer *)t;
1416 	inp = (struct sctp_inpcb *)tmr->ep;
1417 	stcb = (struct sctp_tcb *)tmr->tcb;
1418 	net = (struct sctp_nets *)tmr->net;
1419 	CURVNET_SET((struct vnet *)tmr->vnet);
1420 	did_output = 1;
1421 
1422 #ifdef SCTP_AUDITING_ENABLED
1423 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1424 	sctp_auditing(3, inp, stcb, net);
1425 #endif
1426 
1427 	/* sanity checks... */
1428 	if (tmr->self != (void *)tmr) {
1429 		/*
1430 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1431 		 * (void *)tmr);
1432 		 */
1433 		CURVNET_RESTORE();
1434 		return;
1435 	}
1436 	tmr->stopped_from = 0xa001;
1437 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1438 		/*
1439 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1440 		 * tmr->type);
1441 		 */
1442 		CURVNET_RESTORE();
1443 		return;
1444 	}
1445 	tmr->stopped_from = 0xa002;
1446 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1447 		CURVNET_RESTORE();
1448 		return;
1449 	}
1450 	/* if this is an iterator timeout, get the struct and clear inp */
1451 	tmr->stopped_from = 0xa003;
1452 	type = tmr->type;
1453 	if (inp) {
1454 		SCTP_INP_INCR_REF(inp);
1455 		if ((inp->sctp_socket == NULL) &&
1456 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1457 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1458 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1459 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1460 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1461 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1462 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1463 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1464 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1465 		    ) {
1466 			SCTP_INP_DECR_REF(inp);
1467 			CURVNET_RESTORE();
1468 			return;
1469 		}
1470 	}
1471 	tmr->stopped_from = 0xa004;
1472 	if (stcb) {
1473 		atomic_add_int(&stcb->asoc.refcnt, 1);
1474 		if (stcb->asoc.state == 0) {
1475 			atomic_add_int(&stcb->asoc.refcnt, -1);
1476 			if (inp) {
1477 				SCTP_INP_DECR_REF(inp);
1478 			}
1479 			CURVNET_RESTORE();
1480 			return;
1481 		}
1482 	}
1483 	tmr->stopped_from = 0xa005;
1484 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1485 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1486 		if (inp) {
1487 			SCTP_INP_DECR_REF(inp);
1488 		}
1489 		if (stcb) {
1490 			atomic_add_int(&stcb->asoc.refcnt, -1);
1491 		}
1492 		CURVNET_RESTORE();
1493 		return;
1494 	}
1495 	tmr->stopped_from = 0xa006;
1496 
1497 	if (stcb) {
1498 		SCTP_TCB_LOCK(stcb);
1499 		atomic_add_int(&stcb->asoc.refcnt, -1);
1500 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1501 		    ((stcb->asoc.state == 0) ||
1502 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1503 			SCTP_TCB_UNLOCK(stcb);
1504 			if (inp) {
1505 				SCTP_INP_DECR_REF(inp);
1506 			}
1507 			CURVNET_RESTORE();
1508 			return;
1509 		}
1510 	}
1511 	/* record in stopped what t-o occured */
1512 	tmr->stopped_from = tmr->type;
1513 
1514 	/* mark as being serviced now */
1515 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1516 		/*
1517 		 * Callout has been rescheduled.
1518 		 */
1519 		goto get_out;
1520 	}
1521 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1522 		/*
1523 		 * Not active, so no action.
1524 		 */
1525 		goto get_out;
1526 	}
1527 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1528 
1529 	/* call the handler for the appropriate timer type */
1530 	switch (tmr->type) {
1531 	case SCTP_TIMER_TYPE_ZERO_COPY:
1532 		if (inp == NULL) {
1533 			break;
1534 		}
1535 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1536 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1537 		}
1538 		break;
1539 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1540 		if (inp == NULL) {
1541 			break;
1542 		}
1543 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1545 		}
1546 		break;
1547 	case SCTP_TIMER_TYPE_ADDR_WQ:
1548 		sctp_handle_addr_wq();
1549 		break;
1550 	case SCTP_TIMER_TYPE_SEND:
1551 		if ((stcb == NULL) || (inp == NULL)) {
1552 			break;
1553 		}
1554 		SCTP_STAT_INCR(sctps_timodata);
1555 		stcb->asoc.timodata++;
1556 		stcb->asoc.num_send_timers_up--;
1557 		if (stcb->asoc.num_send_timers_up < 0) {
1558 			stcb->asoc.num_send_timers_up = 0;
1559 		}
1560 		SCTP_TCB_LOCK_ASSERT(stcb);
1561 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1562 			/* no need to unlock on tcb its gone */
1563 
1564 			goto out_decr;
1565 		}
1566 		SCTP_TCB_LOCK_ASSERT(stcb);
1567 #ifdef SCTP_AUDITING_ENABLED
1568 		sctp_auditing(4, inp, stcb, net);
1569 #endif
1570 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1571 		if ((stcb->asoc.num_send_timers_up == 0) &&
1572 		    (stcb->asoc.sent_queue_cnt > 0)) {
1573 			struct sctp_tmit_chunk *chk;
1574 
1575 			/*
1576 			 * safeguard. If there on some on the sent queue
1577 			 * somewhere but no timers running something is
1578 			 * wrong... so we start a timer on the first chunk
1579 			 * on the send queue on whatever net it is sent to.
1580 			 */
1581 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1582 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1583 			    chk->whoTo);
1584 		}
1585 		break;
1586 	case SCTP_TIMER_TYPE_INIT:
1587 		if ((stcb == NULL) || (inp == NULL)) {
1588 			break;
1589 		}
1590 		SCTP_STAT_INCR(sctps_timoinit);
1591 		stcb->asoc.timoinit++;
1592 		if (sctp_t1init_timer(inp, stcb, net)) {
1593 			/* no need to unlock on tcb its gone */
1594 			goto out_decr;
1595 		}
1596 		/* We do output but not here */
1597 		did_output = 0;
1598 		break;
1599 	case SCTP_TIMER_TYPE_RECV:
1600 		if ((stcb == NULL) || (inp == NULL)) {
1601 			break;
1602 		}
1603 		SCTP_STAT_INCR(sctps_timosack);
1604 		stcb->asoc.timosack++;
1605 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1606 #ifdef SCTP_AUDITING_ENABLED
1607 		sctp_auditing(4, inp, stcb, net);
1608 #endif
1609 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1610 		break;
1611 	case SCTP_TIMER_TYPE_SHUTDOWN:
1612 		if ((stcb == NULL) || (inp == NULL)) {
1613 			break;
1614 		}
1615 		if (sctp_shutdown_timer(inp, stcb, net)) {
1616 			/* no need to unlock on tcb its gone */
1617 			goto out_decr;
1618 		}
1619 		SCTP_STAT_INCR(sctps_timoshutdown);
1620 		stcb->asoc.timoshutdown++;
1621 #ifdef SCTP_AUDITING_ENABLED
1622 		sctp_auditing(4, inp, stcb, net);
1623 #endif
1624 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1625 		break;
1626 	case SCTP_TIMER_TYPE_HEARTBEAT:
1627 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1628 			break;
1629 		}
1630 		SCTP_STAT_INCR(sctps_timoheartbeat);
1631 		stcb->asoc.timoheartbeat++;
1632 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1633 			/* no need to unlock on tcb its gone */
1634 			goto out_decr;
1635 		}
1636 #ifdef SCTP_AUDITING_ENABLED
1637 		sctp_auditing(4, inp, stcb, net);
1638 #endif
1639 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1640 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1641 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_COOKIE:
1645 		if ((stcb == NULL) || (inp == NULL)) {
1646 			break;
1647 		}
1648 		if (sctp_cookie_timer(inp, stcb, net)) {
1649 			/* no need to unlock on tcb its gone */
1650 			goto out_decr;
1651 		}
1652 		SCTP_STAT_INCR(sctps_timocookie);
1653 		stcb->asoc.timocookie++;
1654 #ifdef SCTP_AUDITING_ENABLED
1655 		sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 		/*
1658 		 * We consider T3 and Cookie timer pretty much the same with
1659 		 * respect to where from in chunk_output.
1660 		 */
1661 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1662 		break;
1663 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1664 		{
1665 			struct timeval tv;
1666 			int i, secret;
1667 
1668 			if (inp == NULL) {
1669 				break;
1670 			}
1671 			SCTP_STAT_INCR(sctps_timosecret);
1672 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1673 			SCTP_INP_WLOCK(inp);
1674 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1675 			inp->sctp_ep.last_secret_number =
1676 			    inp->sctp_ep.current_secret_number;
1677 			inp->sctp_ep.current_secret_number++;
1678 			if (inp->sctp_ep.current_secret_number >=
1679 			    SCTP_HOW_MANY_SECRETS) {
1680 				inp->sctp_ep.current_secret_number = 0;
1681 			}
1682 			secret = (int)inp->sctp_ep.current_secret_number;
1683 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1684 				inp->sctp_ep.secret_key[secret][i] =
1685 				    sctp_select_initial_TSN(&inp->sctp_ep);
1686 			}
1687 			SCTP_INP_WUNLOCK(inp);
1688 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1689 		}
1690 		did_output = 0;
1691 		break;
1692 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		SCTP_STAT_INCR(sctps_timopathmtu);
1697 		sctp_pathmtu_timer(inp, stcb, net);
1698 		did_output = 0;
1699 		break;
1700 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1701 		if ((stcb == NULL) || (inp == NULL)) {
1702 			break;
1703 		}
1704 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1705 			/* no need to unlock on tcb its gone */
1706 			goto out_decr;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timoshutdownack);
1709 		stcb->asoc.timoshutdownack++;
1710 #ifdef SCTP_AUDITING_ENABLED
1711 		sctp_auditing(4, inp, stcb, net);
1712 #endif
1713 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1714 		break;
1715 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1716 		if ((stcb == NULL) || (inp == NULL)) {
1717 			break;
1718 		}
1719 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1720 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1721 		/* no need to unlock on tcb its gone */
1722 		goto out_decr;
1723 
1724 	case SCTP_TIMER_TYPE_STRRESET:
1725 		if ((stcb == NULL) || (inp == NULL)) {
1726 			break;
1727 		}
1728 		if (sctp_strreset_timer(inp, stcb, net)) {
1729 			/* no need to unlock on tcb its gone */
1730 			goto out_decr;
1731 		}
1732 		SCTP_STAT_INCR(sctps_timostrmrst);
1733 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1734 		break;
1735 	case SCTP_TIMER_TYPE_ASCONF:
1736 		if ((stcb == NULL) || (inp == NULL)) {
1737 			break;
1738 		}
1739 		if (sctp_asconf_timer(inp, stcb, net)) {
1740 			/* no need to unlock on tcb its gone */
1741 			goto out_decr;
1742 		}
1743 		SCTP_STAT_INCR(sctps_timoasconf);
1744 #ifdef SCTP_AUDITING_ENABLED
1745 		sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1748 		break;
1749 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		sctp_delete_prim_timer(inp, stcb, net);
1754 		SCTP_STAT_INCR(sctps_timodelprim);
1755 		break;
1756 
1757 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1758 		if ((stcb == NULL) || (inp == NULL)) {
1759 			break;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timoautoclose);
1762 		sctp_autoclose_timer(inp, stcb, net);
1763 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1764 		did_output = 0;
1765 		break;
1766 	case SCTP_TIMER_TYPE_ASOCKILL:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timoassockill);
1771 		/* Can we free it yet? */
1772 		SCTP_INP_DECR_REF(inp);
1773 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1775 		so = SCTP_INP_SO(inp);
1776 		atomic_add_int(&stcb->asoc.refcnt, 1);
1777 		SCTP_TCB_UNLOCK(stcb);
1778 		SCTP_SOCKET_LOCK(so, 1);
1779 		SCTP_TCB_LOCK(stcb);
1780 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1781 #endif
1782 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1783 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1784 		SCTP_SOCKET_UNLOCK(so, 1);
1785 #endif
1786 		/*
1787 		 * free asoc, always unlocks (or destroy's) so prevent
1788 		 * duplicate unlock or unlock of a free mtx :-0
1789 		 */
1790 		stcb = NULL;
1791 		goto out_no_decr;
1792 	case SCTP_TIMER_TYPE_INPKILL:
1793 		SCTP_STAT_INCR(sctps_timoinpkill);
1794 		if (inp == NULL) {
1795 			break;
1796 		}
1797 		/*
1798 		 * special case, take away our increment since WE are the
1799 		 * killer
1800 		 */
1801 		SCTP_INP_DECR_REF(inp);
1802 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1803 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1804 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1805 		inp = NULL;
1806 		goto out_no_decr;
1807 	default:
1808 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1809 		    tmr->type);
1810 		break;
1811 	}
1812 #ifdef SCTP_AUDITING_ENABLED
1813 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1814 	if (inp)
1815 		sctp_auditing(5, inp, stcb, net);
1816 #endif
1817 	if ((did_output) && stcb) {
1818 		/*
1819 		 * Now we need to clean up the control chunk chain if an
1820 		 * ECNE is on it. It must be marked as UNSENT again so next
1821 		 * call will continue to send it until such time that we get
1822 		 * a CWR, to remove it. It is, however, less likely that we
1823 		 * will find a ecn echo on the chain though.
1824 		 */
1825 		sctp_fix_ecn_echo(&stcb->asoc);
1826 	}
1827 get_out:
1828 	if (stcb) {
1829 		SCTP_TCB_UNLOCK(stcb);
1830 	}
1831 out_decr:
1832 	if (inp) {
1833 		SCTP_INP_DECR_REF(inp);
1834 	}
1835 out_no_decr:
1836 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1837 	    type);
1838 	CURVNET_RESTORE();
1839 }
1840 
1841 void
1842 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1843     struct sctp_nets *net)
1844 {
1845 	uint32_t to_ticks;
1846 	struct sctp_timer *tmr;
1847 
1848 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1849 		return;
1850 
1851 	tmr = NULL;
1852 	if (stcb) {
1853 		SCTP_TCB_LOCK_ASSERT(stcb);
1854 	}
1855 	switch (t_type) {
1856 	case SCTP_TIMER_TYPE_ZERO_COPY:
1857 		tmr = &inp->sctp_ep.zero_copy_timer;
1858 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1859 		break;
1860 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1861 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1862 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1863 		break;
1864 	case SCTP_TIMER_TYPE_ADDR_WQ:
1865 		/* Only 1 tick away :-) */
1866 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1867 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1868 		break;
1869 	case SCTP_TIMER_TYPE_SEND:
1870 		/* Here we use the RTO timer */
1871 		{
1872 			int rto_val;
1873 
1874 			if ((stcb == NULL) || (net == NULL)) {
1875 				return;
1876 			}
1877 			tmr = &net->rxt_timer;
1878 			if (net->RTO == 0) {
1879 				rto_val = stcb->asoc.initial_rto;
1880 			} else {
1881 				rto_val = net->RTO;
1882 			}
1883 			to_ticks = MSEC_TO_TICKS(rto_val);
1884 		}
1885 		break;
1886 	case SCTP_TIMER_TYPE_INIT:
1887 		/*
1888 		 * Here we use the INIT timer default usually about 1
1889 		 * minute.
1890 		 */
1891 		if ((stcb == NULL) || (net == NULL)) {
1892 			return;
1893 		}
1894 		tmr = &net->rxt_timer;
1895 		if (net->RTO == 0) {
1896 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1897 		} else {
1898 			to_ticks = MSEC_TO_TICKS(net->RTO);
1899 		}
1900 		break;
1901 	case SCTP_TIMER_TYPE_RECV:
1902 		/*
1903 		 * Here we use the Delayed-Ack timer value from the inp
1904 		 * ususually about 200ms.
1905 		 */
1906 		if (stcb == NULL) {
1907 			return;
1908 		}
1909 		tmr = &stcb->asoc.dack_timer;
1910 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1911 		break;
1912 	case SCTP_TIMER_TYPE_SHUTDOWN:
1913 		/* Here we use the RTO of the destination. */
1914 		if ((stcb == NULL) || (net == NULL)) {
1915 			return;
1916 		}
1917 		if (net->RTO == 0) {
1918 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1919 		} else {
1920 			to_ticks = MSEC_TO_TICKS(net->RTO);
1921 		}
1922 		tmr = &net->rxt_timer;
1923 		break;
1924 	case SCTP_TIMER_TYPE_HEARTBEAT:
1925 		/*
1926 		 * the net is used here so that we can add in the RTO. Even
1927 		 * though we use a different timer. We also add the HB timer
1928 		 * PLUS a random jitter.
1929 		 */
1930 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1931 			return;
1932 		} else {
1933 			uint32_t rndval;
1934 			uint32_t jitter;
1935 
1936 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1937 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1938 				return;
1939 			}
1940 			if (net->RTO == 0) {
1941 				to_ticks = stcb->asoc.initial_rto;
1942 			} else {
1943 				to_ticks = net->RTO;
1944 			}
1945 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1946 			jitter = rndval % to_ticks;
1947 			if (jitter >= (to_ticks >> 1)) {
1948 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1949 			} else {
1950 				to_ticks = to_ticks - jitter;
1951 			}
1952 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1953 			    !(net->dest_state & SCTP_ADDR_PF)) {
1954 				to_ticks += net->heart_beat_delay;
1955 			}
1956 			/*
1957 			 * Now we must convert the to_ticks that are now in
1958 			 * ms to ticks.
1959 			 */
1960 			to_ticks = MSEC_TO_TICKS(to_ticks);
1961 			tmr = &net->hb_timer;
1962 		}
1963 		break;
1964 	case SCTP_TIMER_TYPE_COOKIE:
1965 		/*
1966 		 * Here we can use the RTO timer from the network since one
1967 		 * RTT was compelete. If a retran happened then we will be
1968 		 * using the RTO initial value.
1969 		 */
1970 		if ((stcb == NULL) || (net == NULL)) {
1971 			return;
1972 		}
1973 		if (net->RTO == 0) {
1974 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1975 		} else {
1976 			to_ticks = MSEC_TO_TICKS(net->RTO);
1977 		}
1978 		tmr = &net->rxt_timer;
1979 		break;
1980 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1981 		/*
1982 		 * nothing needed but the endpoint here ususually about 60
1983 		 * minutes.
1984 		 */
1985 		if (inp == NULL) {
1986 			return;
1987 		}
1988 		tmr = &inp->sctp_ep.signature_change;
1989 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1990 		break;
1991 	case SCTP_TIMER_TYPE_ASOCKILL:
1992 		if (stcb == NULL) {
1993 			return;
1994 		}
1995 		tmr = &stcb->asoc.strreset_timer;
1996 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1997 		break;
1998 	case SCTP_TIMER_TYPE_INPKILL:
1999 		/*
2000 		 * The inp is setup to die. We re-use the signature_chage
2001 		 * timer since that has stopped and we are in the GONE
2002 		 * state.
2003 		 */
2004 		if (inp == NULL) {
2005 			return;
2006 		}
2007 		tmr = &inp->sctp_ep.signature_change;
2008 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2009 		break;
2010 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2011 		/*
2012 		 * Here we use the value found in the EP for PMTU ususually
2013 		 * about 10 minutes.
2014 		 */
2015 		if ((stcb == NULL) || (inp == NULL)) {
2016 			return;
2017 		}
2018 		if (net == NULL) {
2019 			return;
2020 		}
2021 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2022 			return;
2023 		}
2024 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2025 		tmr = &net->pmtu_timer;
2026 		break;
2027 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2028 		/* Here we use the RTO of the destination */
2029 		if ((stcb == NULL) || (net == NULL)) {
2030 			return;
2031 		}
2032 		if (net->RTO == 0) {
2033 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2034 		} else {
2035 			to_ticks = MSEC_TO_TICKS(net->RTO);
2036 		}
2037 		tmr = &net->rxt_timer;
2038 		break;
2039 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2040 		/*
2041 		 * Here we use the endpoints shutdown guard timer usually
2042 		 * about 3 minutes.
2043 		 */
2044 		if ((inp == NULL) || (stcb == NULL)) {
2045 			return;
2046 		}
2047 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2048 		tmr = &stcb->asoc.shut_guard_timer;
2049 		break;
2050 	case SCTP_TIMER_TYPE_STRRESET:
2051 		/*
2052 		 * Here the timer comes from the stcb but its value is from
2053 		 * the net's RTO.
2054 		 */
2055 		if ((stcb == NULL) || (net == NULL)) {
2056 			return;
2057 		}
2058 		if (net->RTO == 0) {
2059 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2060 		} else {
2061 			to_ticks = MSEC_TO_TICKS(net->RTO);
2062 		}
2063 		tmr = &stcb->asoc.strreset_timer;
2064 		break;
2065 	case SCTP_TIMER_TYPE_ASCONF:
2066 		/*
2067 		 * Here the timer comes from the stcb but its value is from
2068 		 * the net's RTO.
2069 		 */
2070 		if ((stcb == NULL) || (net == NULL)) {
2071 			return;
2072 		}
2073 		if (net->RTO == 0) {
2074 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2075 		} else {
2076 			to_ticks = MSEC_TO_TICKS(net->RTO);
2077 		}
2078 		tmr = &stcb->asoc.asconf_timer;
2079 		break;
2080 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2081 		if ((stcb == NULL) || (net != NULL)) {
2082 			return;
2083 		}
2084 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2085 		tmr = &stcb->asoc.delete_prim_timer;
2086 		break;
2087 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2088 		if (stcb == NULL) {
2089 			return;
2090 		}
2091 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2092 			/*
2093 			 * Really an error since stcb is NOT set to
2094 			 * autoclose
2095 			 */
2096 			return;
2097 		}
2098 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2099 		tmr = &stcb->asoc.autoclose_timer;
2100 		break;
2101 	default:
2102 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2103 		    __FUNCTION__, t_type);
2104 		return;
2105 		break;
2106 	}
2107 	if ((to_ticks <= 0) || (tmr == NULL)) {
2108 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2109 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2110 		return;
2111 	}
2112 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2113 		/*
2114 		 * we do NOT allow you to have it already running. if it is
2115 		 * we leave the current one up unchanged
2116 		 */
2117 		return;
2118 	}
2119 	/* At this point we can proceed */
2120 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2121 		stcb->asoc.num_send_timers_up++;
2122 	}
2123 	tmr->stopped_from = 0;
2124 	tmr->type = t_type;
2125 	tmr->ep = (void *)inp;
2126 	tmr->tcb = (void *)stcb;
2127 	tmr->net = (void *)net;
2128 	tmr->self = (void *)tmr;
2129 	tmr->vnet = (void *)curvnet;
2130 	tmr->ticks = sctp_get_tick_count();
2131 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2132 	return;
2133 }
2134 
2135 void
2136 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2137     struct sctp_nets *net, uint32_t from)
2138 {
2139 	struct sctp_timer *tmr;
2140 
2141 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2142 	    (inp == NULL))
2143 		return;
2144 
2145 	tmr = NULL;
2146 	if (stcb) {
2147 		SCTP_TCB_LOCK_ASSERT(stcb);
2148 	}
2149 	switch (t_type) {
2150 	case SCTP_TIMER_TYPE_ZERO_COPY:
2151 		tmr = &inp->sctp_ep.zero_copy_timer;
2152 		break;
2153 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2154 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2155 		break;
2156 	case SCTP_TIMER_TYPE_ADDR_WQ:
2157 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2158 		break;
2159 	case SCTP_TIMER_TYPE_SEND:
2160 		if ((stcb == NULL) || (net == NULL)) {
2161 			return;
2162 		}
2163 		tmr = &net->rxt_timer;
2164 		break;
2165 	case SCTP_TIMER_TYPE_INIT:
2166 		if ((stcb == NULL) || (net == NULL)) {
2167 			return;
2168 		}
2169 		tmr = &net->rxt_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_RECV:
2172 		if (stcb == NULL) {
2173 			return;
2174 		}
2175 		tmr = &stcb->asoc.dack_timer;
2176 		break;
2177 	case SCTP_TIMER_TYPE_SHUTDOWN:
2178 		if ((stcb == NULL) || (net == NULL)) {
2179 			return;
2180 		}
2181 		tmr = &net->rxt_timer;
2182 		break;
2183 	case SCTP_TIMER_TYPE_HEARTBEAT:
2184 		if ((stcb == NULL) || (net == NULL)) {
2185 			return;
2186 		}
2187 		tmr = &net->hb_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_COOKIE:
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		tmr = &net->rxt_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2196 		/* nothing needed but the endpoint here */
2197 		tmr = &inp->sctp_ep.signature_change;
2198 		/*
2199 		 * We re-use the newcookie timer for the INP kill timer. We
2200 		 * must assure that we do not kill it by accident.
2201 		 */
2202 		break;
2203 	case SCTP_TIMER_TYPE_ASOCKILL:
2204 		/*
2205 		 * Stop the asoc kill timer.
2206 		 */
2207 		if (stcb == NULL) {
2208 			return;
2209 		}
2210 		tmr = &stcb->asoc.strreset_timer;
2211 		break;
2212 
2213 	case SCTP_TIMER_TYPE_INPKILL:
2214 		/*
2215 		 * The inp is setup to die. We re-use the signature_chage
2216 		 * timer since that has stopped and we are in the GONE
2217 		 * state.
2218 		 */
2219 		tmr = &inp->sctp_ep.signature_change;
2220 		break;
2221 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2222 		if ((stcb == NULL) || (net == NULL)) {
2223 			return;
2224 		}
2225 		tmr = &net->pmtu_timer;
2226 		break;
2227 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2228 		if ((stcb == NULL) || (net == NULL)) {
2229 			return;
2230 		}
2231 		tmr = &net->rxt_timer;
2232 		break;
2233 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2234 		if (stcb == NULL) {
2235 			return;
2236 		}
2237 		tmr = &stcb->asoc.shut_guard_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_STRRESET:
2240 		if (stcb == NULL) {
2241 			return;
2242 		}
2243 		tmr = &stcb->asoc.strreset_timer;
2244 		break;
2245 	case SCTP_TIMER_TYPE_ASCONF:
2246 		if (stcb == NULL) {
2247 			return;
2248 		}
2249 		tmr = &stcb->asoc.asconf_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2252 		if (stcb == NULL) {
2253 			return;
2254 		}
2255 		tmr = &stcb->asoc.delete_prim_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2258 		if (stcb == NULL) {
2259 			return;
2260 		}
2261 		tmr = &stcb->asoc.autoclose_timer;
2262 		break;
2263 	default:
2264 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2265 		    __FUNCTION__, t_type);
2266 		break;
2267 	}
2268 	if (tmr == NULL) {
2269 		return;
2270 	}
2271 	if ((tmr->type != t_type) && tmr->type) {
2272 		/*
2273 		 * Ok we have a timer that is under joint use. Cookie timer
2274 		 * per chance with the SEND timer. We therefore are NOT
2275 		 * running the timer that the caller wants stopped.  So just
2276 		 * return.
2277 		 */
2278 		return;
2279 	}
2280 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2281 		stcb->asoc.num_send_timers_up--;
2282 		if (stcb->asoc.num_send_timers_up < 0) {
2283 			stcb->asoc.num_send_timers_up = 0;
2284 		}
2285 	}
2286 	tmr->self = NULL;
2287 	tmr->stopped_from = from;
2288 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2289 	return;
2290 }
2291 
2292 uint32_t
2293 sctp_calculate_len(struct mbuf *m)
2294 {
2295 	uint32_t tlen = 0;
2296 	struct mbuf *at;
2297 
2298 	at = m;
2299 	while (at) {
2300 		tlen += SCTP_BUF_LEN(at);
2301 		at = SCTP_BUF_NEXT(at);
2302 	}
2303 	return (tlen);
2304 }
2305 
2306 void
2307 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2308     struct sctp_association *asoc, uint32_t mtu)
2309 {
2310 	/*
2311 	 * Reset the P-MTU size on this association, this involves changing
2312 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2313 	 * allow the DF flag to be cleared.
2314 	 */
2315 	struct sctp_tmit_chunk *chk;
2316 	unsigned int eff_mtu, ovh;
2317 
2318 	asoc->smallest_mtu = mtu;
2319 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2320 		ovh = SCTP_MIN_OVERHEAD;
2321 	} else {
2322 		ovh = SCTP_MIN_V4_OVERHEAD;
2323 	}
2324 	eff_mtu = mtu - ovh;
2325 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2326 		if (chk->send_size > eff_mtu) {
2327 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2328 		}
2329 	}
2330 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2331 		if (chk->send_size > eff_mtu) {
2332 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2333 		}
2334 	}
2335 }
2336 
2337 
2338 /*
2339  * given an association and starting time of the current RTT period return
2340  * RTO in number of msecs net should point to the current network
2341  */
2342 
2343 uint32_t
2344 sctp_calculate_rto(struct sctp_tcb *stcb,
2345     struct sctp_association *asoc,
2346     struct sctp_nets *net,
2347     struct timeval *told,
2348     int safe, int rtt_from_sack)
2349 {
2350 	/*-
2351 	 * given an association and the starting time of the current RTT
2352 	 * period (in value1/value2) return RTO in number of msecs.
2353 	 */
2354 	int32_t rtt;		/* RTT in ms */
2355 	uint32_t new_rto;
2356 	int first_measure = 0;
2357 	struct timeval now, then, *old;
2358 
2359 	/* Copy it out for sparc64 */
2360 	if (safe == sctp_align_unsafe_makecopy) {
2361 		old = &then;
2362 		memcpy(&then, told, sizeof(struct timeval));
2363 	} else if (safe == sctp_align_safe_nocopy) {
2364 		old = told;
2365 	} else {
2366 		/* error */
2367 		SCTP_PRINTF("Huh, bad rto calc call\n");
2368 		return (0);
2369 	}
2370 	/************************/
2371 	/* 1. calculate new RTT */
2372 	/************************/
2373 	/* get the current time */
2374 	if (stcb->asoc.use_precise_time) {
2375 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2376 	} else {
2377 		(void)SCTP_GETTIME_TIMEVAL(&now);
2378 	}
2379 	timevalsub(&now, old);
2380 	/* store the current RTT in us */
2381 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2382 	        (uint64_t) now.tv_usec;
2383 
2384 	/* computer rtt in ms */
2385 	rtt = net->rtt / 1000;
2386 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2387 		/*
2388 		 * Tell the CC module that a new update has just occurred
2389 		 * from a sack
2390 		 */
2391 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2392 	}
2393 	/*
2394 	 * Do we need to determine the lan? We do this only on sacks i.e.
2395 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2396 	 */
2397 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2398 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2399 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2400 			net->lan_type = SCTP_LAN_INTERNET;
2401 		} else {
2402 			net->lan_type = SCTP_LAN_LOCAL;
2403 		}
2404 	}
2405 	/***************************/
2406 	/* 2. update RTTVAR & SRTT */
2407 	/***************************/
2408 	/*-
2409 	 * Compute the scaled average lastsa and the
2410 	 * scaled variance lastsv as described in van Jacobson
2411 	 * Paper "Congestion Avoidance and Control", Annex A.
2412 	 *
2413 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2414 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2415 	 */
2416 	if (net->RTO_measured) {
2417 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2418 		net->lastsa += rtt;
2419 		if (rtt < 0) {
2420 			rtt = -rtt;
2421 		}
2422 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2423 		net->lastsv += rtt;
2424 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2425 			rto_logging(net, SCTP_LOG_RTTVAR);
2426 		}
2427 	} else {
2428 		/* First RTO measurment */
2429 		net->RTO_measured = 1;
2430 		first_measure = 1;
2431 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2432 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2434 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2435 		}
2436 	}
2437 	if (net->lastsv == 0) {
2438 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2439 	}
2440 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2441 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2442 	    (stcb->asoc.sat_network_lockout == 0)) {
2443 		stcb->asoc.sat_network = 1;
2444 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2445 		stcb->asoc.sat_network = 0;
2446 		stcb->asoc.sat_network_lockout = 1;
2447 	}
2448 	/* bound it, per C6/C7 in Section 5.3.1 */
2449 	if (new_rto < stcb->asoc.minrto) {
2450 		new_rto = stcb->asoc.minrto;
2451 	}
2452 	if (new_rto > stcb->asoc.maxrto) {
2453 		new_rto = stcb->asoc.maxrto;
2454 	}
2455 	/* we are now returning the RTO */
2456 	return (new_rto);
2457 }
2458 
2459 /*
2460  * return a pointer to a contiguous piece of data from the given mbuf chain
2461  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2462  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2463  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2464  */
2465 caddr_t
2466 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2467 {
2468 	uint32_t count;
2469 	uint8_t *ptr;
2470 
2471 	ptr = in_ptr;
2472 	if ((off < 0) || (len <= 0))
2473 		return (NULL);
2474 
2475 	/* find the desired start location */
2476 	while ((m != NULL) && (off > 0)) {
2477 		if (off < SCTP_BUF_LEN(m))
2478 			break;
2479 		off -= SCTP_BUF_LEN(m);
2480 		m = SCTP_BUF_NEXT(m);
2481 	}
2482 	if (m == NULL)
2483 		return (NULL);
2484 
2485 	/* is the current mbuf large enough (eg. contiguous)? */
2486 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2487 		return (mtod(m, caddr_t)+off);
2488 	} else {
2489 		/* else, it spans more than one mbuf, so save a temp copy... */
2490 		while ((m != NULL) && (len > 0)) {
2491 			count = min(SCTP_BUF_LEN(m) - off, len);
2492 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2493 			len -= count;
2494 			ptr += count;
2495 			off = 0;
2496 			m = SCTP_BUF_NEXT(m);
2497 		}
2498 		if ((m == NULL) && (len > 0))
2499 			return (NULL);
2500 		else
2501 			return ((caddr_t)in_ptr);
2502 	}
2503 }
2504 
2505 
2506 
2507 struct sctp_paramhdr *
2508 sctp_get_next_param(struct mbuf *m,
2509     int offset,
2510     struct sctp_paramhdr *pull,
2511     int pull_limit)
2512 {
2513 	/* This just provides a typed signature to Peter's Pull routine */
2514 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2515 	    (uint8_t *) pull));
2516 }
2517 
2518 
2519 int
2520 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2521 {
2522 	/*
2523 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2524 	 * padlen is > 3 this routine will fail.
2525 	 */
2526 	uint8_t *dp;
2527 	int i;
2528 
2529 	if (padlen > 3) {
2530 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2531 		return (ENOBUFS);
2532 	}
2533 	if (padlen <= M_TRAILINGSPACE(m)) {
2534 		/*
2535 		 * The easy way. We hope the majority of the time we hit
2536 		 * here :)
2537 		 */
2538 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2539 		SCTP_BUF_LEN(m) += padlen;
2540 	} else {
2541 		/* Hard way we must grow the mbuf */
2542 		struct mbuf *tmp;
2543 
2544 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2545 		if (tmp == NULL) {
2546 			/* Out of space GAK! we are in big trouble. */
2547 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2548 			return (ENOBUFS);
2549 		}
2550 		/* setup and insert in middle */
2551 		SCTP_BUF_LEN(tmp) = padlen;
2552 		SCTP_BUF_NEXT(tmp) = NULL;
2553 		SCTP_BUF_NEXT(m) = tmp;
2554 		dp = mtod(tmp, uint8_t *);
2555 	}
2556 	/* zero out the pad */
2557 	for (i = 0; i < padlen; i++) {
2558 		*dp = 0;
2559 		dp++;
2560 	}
2561 	return (0);
2562 }
2563 
2564 int
2565 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2566 {
2567 	/* find the last mbuf in chain and pad it */
2568 	struct mbuf *m_at;
2569 
2570 	if (last_mbuf) {
2571 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2572 	} else {
2573 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2574 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2575 				return (sctp_add_pad_tombuf(m_at, padval));
2576 			}
2577 		}
2578 	}
2579 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2580 	return (EFAULT);
2581 }
2582 
2583 static void
2584 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2585     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2586 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2587     SCTP_UNUSED
2588 #endif
2589 )
2590 {
2591 	struct mbuf *m_notify;
2592 	struct sctp_assoc_change *sac;
2593 	struct sctp_queued_to_read *control;
2594 	size_t notif_len, abort_len;
2595 	unsigned int i;
2596 
2597 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2598 	struct socket *so;
2599 
2600 #endif
2601 
2602 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2603 		notif_len = sizeof(struct sctp_assoc_change);
2604 		if (abort != NULL) {
2605 			abort_len = ntohs(abort->ch.chunk_length);
2606 		} else {
2607 			abort_len = 0;
2608 		}
2609 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2610 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2611 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2612 			notif_len += abort_len;
2613 		}
2614 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2615 		if (m_notify == NULL) {
2616 			/* Retry with smaller value. */
2617 			notif_len = sizeof(struct sctp_assoc_change);
2618 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2619 			if (m_notify == NULL) {
2620 				goto set_error;
2621 			}
2622 		}
2623 		SCTP_BUF_NEXT(m_notify) = NULL;
2624 		sac = mtod(m_notify, struct sctp_assoc_change *);
2625 		sac->sac_type = SCTP_ASSOC_CHANGE;
2626 		sac->sac_flags = 0;
2627 		sac->sac_length = sizeof(struct sctp_assoc_change);
2628 		sac->sac_state = state;
2629 		sac->sac_error = error;
2630 		/* XXX verify these stream counts */
2631 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2632 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2633 		sac->sac_assoc_id = sctp_get_associd(stcb);
2634 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2635 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2636 				i = 0;
2637 				if (stcb->asoc.peer_supports_prsctp) {
2638 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2639 				}
2640 				if (stcb->asoc.peer_supports_auth) {
2641 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2642 				}
2643 				if (stcb->asoc.peer_supports_asconf) {
2644 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2645 				}
2646 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2647 				if (stcb->asoc.peer_supports_strreset) {
2648 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2649 				}
2650 				sac->sac_length += i;
2651 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2652 				memcpy(sac->sac_info, abort, abort_len);
2653 				sac->sac_length += abort_len;
2654 			}
2655 		}
2656 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2657 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2658 		    0, 0, stcb->asoc.context, 0, 0, 0,
2659 		    m_notify);
2660 		if (control != NULL) {
2661 			control->length = SCTP_BUF_LEN(m_notify);
2662 			/* not that we need this */
2663 			control->tail_mbuf = m_notify;
2664 			control->spec_flags = M_NOTIFICATION;
2665 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2666 			    control,
2667 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2668 			    so_locked);
2669 		} else {
2670 			sctp_m_freem(m_notify);
2671 		}
2672 	}
2673 	/*
2674 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2675 	 * comes in.
2676 	 */
2677 set_error:
2678 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2679 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2680 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2681 		SOCK_LOCK(stcb->sctp_socket);
2682 		if (from_peer) {
2683 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2684 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2685 				stcb->sctp_socket->so_error = ECONNREFUSED;
2686 			} else {
2687 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2688 				stcb->sctp_socket->so_error = ECONNRESET;
2689 			}
2690 		} else {
2691 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2692 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2693 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2694 				stcb->sctp_socket->so_error = ETIMEDOUT;
2695 			} else {
2696 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2697 				stcb->sctp_socket->so_error = ECONNABORTED;
2698 			}
2699 		}
2700 	}
2701 	/* Wake ANY sleepers */
2702 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2703 	so = SCTP_INP_SO(stcb->sctp_ep);
2704 	if (!so_locked) {
2705 		atomic_add_int(&stcb->asoc.refcnt, 1);
2706 		SCTP_TCB_UNLOCK(stcb);
2707 		SCTP_SOCKET_LOCK(so, 1);
2708 		SCTP_TCB_LOCK(stcb);
2709 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2710 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2711 			SCTP_SOCKET_UNLOCK(so, 1);
2712 			return;
2713 		}
2714 	}
2715 #endif
2716 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2717 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2718 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2719 		socantrcvmore_locked(stcb->sctp_socket);
2720 	}
2721 	sorwakeup(stcb->sctp_socket);
2722 	sowwakeup(stcb->sctp_socket);
2723 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2724 	if (!so_locked) {
2725 		SCTP_SOCKET_UNLOCK(so, 1);
2726 	}
2727 #endif
2728 }
2729 
2730 static void
2731 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2732     struct sockaddr *sa, uint32_t error)
2733 {
2734 	struct mbuf *m_notify;
2735 	struct sctp_paddr_change *spc;
2736 	struct sctp_queued_to_read *control;
2737 
2738 	if ((stcb == NULL) ||
2739 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2740 		/* event not enabled */
2741 		return;
2742 	}
2743 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2744 	if (m_notify == NULL)
2745 		return;
2746 	SCTP_BUF_LEN(m_notify) = 0;
2747 	spc = mtod(m_notify, struct sctp_paddr_change *);
2748 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2749 	spc->spc_flags = 0;
2750 	spc->spc_length = sizeof(struct sctp_paddr_change);
2751 	switch (sa->sa_family) {
2752 #ifdef INET
2753 	case AF_INET:
2754 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2755 		break;
2756 #endif
2757 #ifdef INET6
2758 	case AF_INET6:
2759 		{
2760 			struct sockaddr_in6 *sin6;
2761 
2762 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2763 
2764 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2765 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2766 				if (sin6->sin6_scope_id == 0) {
2767 					/* recover scope_id for user */
2768 					(void)sa6_recoverscope(sin6);
2769 				} else {
2770 					/* clear embedded scope_id for user */
2771 					in6_clearscope(&sin6->sin6_addr);
2772 				}
2773 			}
2774 			break;
2775 		}
2776 #endif
2777 	default:
2778 		/* TSNH */
2779 		break;
2780 	}
2781 	spc->spc_state = state;
2782 	spc->spc_error = error;
2783 	spc->spc_assoc_id = sctp_get_associd(stcb);
2784 
2785 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2786 	SCTP_BUF_NEXT(m_notify) = NULL;
2787 
2788 	/* append to socket */
2789 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2790 	    0, 0, stcb->asoc.context, 0, 0, 0,
2791 	    m_notify);
2792 	if (control == NULL) {
2793 		/* no memory */
2794 		sctp_m_freem(m_notify);
2795 		return;
2796 	}
2797 	control->length = SCTP_BUF_LEN(m_notify);
2798 	control->spec_flags = M_NOTIFICATION;
2799 	/* not that we need this */
2800 	control->tail_mbuf = m_notify;
2801 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2802 	    control,
2803 	    &stcb->sctp_socket->so_rcv, 1,
2804 	    SCTP_READ_LOCK_NOT_HELD,
2805 	    SCTP_SO_NOT_LOCKED);
2806 }
2807 
2808 
2809 static void
2810 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2811     struct sctp_tmit_chunk *chk, int so_locked
2812 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2813     SCTP_UNUSED
2814 #endif
2815 )
2816 {
2817 	struct mbuf *m_notify;
2818 	struct sctp_send_failed *ssf;
2819 	struct sctp_send_failed_event *ssfe;
2820 	struct sctp_queued_to_read *control;
2821 	int length;
2822 
2823 	if ((stcb == NULL) ||
2824 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2825 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2826 		/* event not enabled */
2827 		return;
2828 	}
2829 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2830 		length = sizeof(struct sctp_send_failed_event);
2831 	} else {
2832 		length = sizeof(struct sctp_send_failed);
2833 	}
2834 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2835 	if (m_notify == NULL)
2836 		/* no space left */
2837 		return;
2838 	length += chk->send_size;
2839 	length -= sizeof(struct sctp_data_chunk);
2840 	SCTP_BUF_LEN(m_notify) = 0;
2841 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2842 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2843 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2844 		if (sent) {
2845 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2846 		} else {
2847 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2848 		}
2849 		ssfe->ssfe_length = length;
2850 		ssfe->ssfe_error = error;
2851 		/* not exactly what the user sent in, but should be close :) */
2852 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2853 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2854 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2855 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2856 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2857 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2858 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2859 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2860 	} else {
2861 		ssf = mtod(m_notify, struct sctp_send_failed *);
2862 		ssf->ssf_type = SCTP_SEND_FAILED;
2863 		if (sent) {
2864 			ssf->ssf_flags = SCTP_DATA_SENT;
2865 		} else {
2866 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2867 		}
2868 		ssf->ssf_length = length;
2869 		ssf->ssf_error = error;
2870 		/* not exactly what the user sent in, but should be close :) */
2871 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2872 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2873 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2874 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2875 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2876 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2877 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2878 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2879 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2880 	}
2881 	if (chk->data) {
2882 		/*
2883 		 * trim off the sctp chunk header(it should be there)
2884 		 */
2885 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2886 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2887 			sctp_mbuf_crush(chk->data);
2888 			chk->send_size -= sizeof(struct sctp_data_chunk);
2889 		}
2890 	}
2891 	SCTP_BUF_NEXT(m_notify) = chk->data;
2892 	/* Steal off the mbuf */
2893 	chk->data = NULL;
2894 	/*
2895 	 * For this case, we check the actual socket buffer, since the assoc
2896 	 * is going away we don't want to overfill the socket buffer for a
2897 	 * non-reader
2898 	 */
2899 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2900 		sctp_m_freem(m_notify);
2901 		return;
2902 	}
2903 	/* append to socket */
2904 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2905 	    0, 0, stcb->asoc.context, 0, 0, 0,
2906 	    m_notify);
2907 	if (control == NULL) {
2908 		/* no memory */
2909 		sctp_m_freem(m_notify);
2910 		return;
2911 	}
2912 	control->spec_flags = M_NOTIFICATION;
2913 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2914 	    control,
2915 	    &stcb->sctp_socket->so_rcv, 1,
2916 	    SCTP_READ_LOCK_NOT_HELD,
2917 	    so_locked);
2918 }
2919 
2920 
2921 static void
2922 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2923     struct sctp_stream_queue_pending *sp, int so_locked
2924 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2925     SCTP_UNUSED
2926 #endif
2927 )
2928 {
2929 	struct mbuf *m_notify;
2930 	struct sctp_send_failed *ssf;
2931 	struct sctp_send_failed_event *ssfe;
2932 	struct sctp_queued_to_read *control;
2933 	int length;
2934 
2935 	if ((stcb == NULL) ||
2936 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2937 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2938 		/* event not enabled */
2939 		return;
2940 	}
2941 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2942 		length = sizeof(struct sctp_send_failed_event);
2943 	} else {
2944 		length = sizeof(struct sctp_send_failed);
2945 	}
2946 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2947 	if (m_notify == NULL) {
2948 		/* no space left */
2949 		return;
2950 	}
2951 	length += sp->length;
2952 	SCTP_BUF_LEN(m_notify) = 0;
2953 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2954 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2955 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2956 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2957 		ssfe->ssfe_length = length;
2958 		ssfe->ssfe_error = error;
2959 		/* not exactly what the user sent in, but should be close :) */
2960 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2961 		ssfe->ssfe_info.snd_sid = sp->stream;
2962 		if (sp->some_taken) {
2963 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2964 		} else {
2965 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2966 		}
2967 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2968 		ssfe->ssfe_info.snd_context = sp->context;
2969 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2970 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2971 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2972 	} else {
2973 		ssf = mtod(m_notify, struct sctp_send_failed *);
2974 		ssf->ssf_type = SCTP_SEND_FAILED;
2975 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2976 		ssf->ssf_length = length;
2977 		ssf->ssf_error = error;
2978 		/* not exactly what the user sent in, but should be close :) */
2979 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2980 		ssf->ssf_info.sinfo_stream = sp->stream;
2981 		ssf->ssf_info.sinfo_ssn = 0;
2982 		if (sp->some_taken) {
2983 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2984 		} else {
2985 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2986 		}
2987 		ssf->ssf_info.sinfo_ppid = sp->ppid;
2988 		ssf->ssf_info.sinfo_context = sp->context;
2989 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2990 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2991 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2992 	}
2993 	SCTP_BUF_NEXT(m_notify) = sp->data;
2994 
2995 	/* Steal off the mbuf */
2996 	sp->data = NULL;
2997 	/*
2998 	 * For this case, we check the actual socket buffer, since the assoc
2999 	 * is going away we don't want to overfill the socket buffer for a
3000 	 * non-reader
3001 	 */
3002 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3003 		sctp_m_freem(m_notify);
3004 		return;
3005 	}
3006 	/* append to socket */
3007 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3008 	    0, 0, stcb->asoc.context, 0, 0, 0,
3009 	    m_notify);
3010 	if (control == NULL) {
3011 		/* no memory */
3012 		sctp_m_freem(m_notify);
3013 		return;
3014 	}
3015 	control->spec_flags = M_NOTIFICATION;
3016 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3017 	    control,
3018 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3019 }
3020 
3021 
3022 
3023 static void
3024 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3025 {
3026 	struct mbuf *m_notify;
3027 	struct sctp_adaptation_event *sai;
3028 	struct sctp_queued_to_read *control;
3029 
3030 	if ((stcb == NULL) ||
3031 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3032 		/* event not enabled */
3033 		return;
3034 	}
3035 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3036 	if (m_notify == NULL)
3037 		/* no space left */
3038 		return;
3039 	SCTP_BUF_LEN(m_notify) = 0;
3040 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3041 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3042 	sai->sai_flags = 0;
3043 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3044 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3045 	sai->sai_assoc_id = sctp_get_associd(stcb);
3046 
3047 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3048 	SCTP_BUF_NEXT(m_notify) = NULL;
3049 
3050 	/* append to socket */
3051 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3052 	    0, 0, stcb->asoc.context, 0, 0, 0,
3053 	    m_notify);
3054 	if (control == NULL) {
3055 		/* no memory */
3056 		sctp_m_freem(m_notify);
3057 		return;
3058 	}
3059 	control->length = SCTP_BUF_LEN(m_notify);
3060 	control->spec_flags = M_NOTIFICATION;
3061 	/* not that we need this */
3062 	control->tail_mbuf = m_notify;
3063 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3064 	    control,
3065 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3066 }
3067 
3068 /* This always must be called with the read-queue LOCKED in the INP */
3069 static void
3070 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3071     uint32_t val, int so_locked
3072 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3073     SCTP_UNUSED
3074 #endif
3075 )
3076 {
3077 	struct mbuf *m_notify;
3078 	struct sctp_pdapi_event *pdapi;
3079 	struct sctp_queued_to_read *control;
3080 	struct sockbuf *sb;
3081 
3082 	if ((stcb == NULL) ||
3083 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3084 		/* event not enabled */
3085 		return;
3086 	}
3087 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3088 		return;
3089 	}
3090 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3091 	if (m_notify == NULL)
3092 		/* no space left */
3093 		return;
3094 	SCTP_BUF_LEN(m_notify) = 0;
3095 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3096 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3097 	pdapi->pdapi_flags = 0;
3098 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3099 	pdapi->pdapi_indication = error;
3100 	pdapi->pdapi_stream = (val >> 16);
3101 	pdapi->pdapi_seq = (val & 0x0000ffff);
3102 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3103 
3104 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3105 	SCTP_BUF_NEXT(m_notify) = NULL;
3106 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3107 	    0, 0, stcb->asoc.context, 0, 0, 0,
3108 	    m_notify);
3109 	if (control == NULL) {
3110 		/* no memory */
3111 		sctp_m_freem(m_notify);
3112 		return;
3113 	}
3114 	control->spec_flags = M_NOTIFICATION;
3115 	control->length = SCTP_BUF_LEN(m_notify);
3116 	/* not that we need this */
3117 	control->tail_mbuf = m_notify;
3118 	control->held_length = 0;
3119 	control->length = 0;
3120 	sb = &stcb->sctp_socket->so_rcv;
3121 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3122 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3123 	}
3124 	sctp_sballoc(stcb, sb, m_notify);
3125 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3126 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3127 	}
3128 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3129 	control->end_added = 1;
3130 	if (stcb->asoc.control_pdapi)
3131 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3132 	else {
3133 		/* we really should not see this case */
3134 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3135 	}
3136 	if (stcb->sctp_ep && stcb->sctp_socket) {
3137 		/* This should always be the case */
3138 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3139 		struct socket *so;
3140 
3141 		so = SCTP_INP_SO(stcb->sctp_ep);
3142 		if (!so_locked) {
3143 			atomic_add_int(&stcb->asoc.refcnt, 1);
3144 			SCTP_TCB_UNLOCK(stcb);
3145 			SCTP_SOCKET_LOCK(so, 1);
3146 			SCTP_TCB_LOCK(stcb);
3147 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3148 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3149 				SCTP_SOCKET_UNLOCK(so, 1);
3150 				return;
3151 			}
3152 		}
3153 #endif
3154 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3155 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3156 		if (!so_locked) {
3157 			SCTP_SOCKET_UNLOCK(so, 1);
3158 		}
3159 #endif
3160 	}
3161 }
3162 
3163 static void
3164 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3165 {
3166 	struct mbuf *m_notify;
3167 	struct sctp_shutdown_event *sse;
3168 	struct sctp_queued_to_read *control;
3169 
3170 	/*
3171 	 * For TCP model AND UDP connected sockets we will send an error up
3172 	 * when an SHUTDOWN completes
3173 	 */
3174 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3175 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3176 		/* mark socket closed for read/write and wakeup! */
3177 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3178 		struct socket *so;
3179 
3180 		so = SCTP_INP_SO(stcb->sctp_ep);
3181 		atomic_add_int(&stcb->asoc.refcnt, 1);
3182 		SCTP_TCB_UNLOCK(stcb);
3183 		SCTP_SOCKET_LOCK(so, 1);
3184 		SCTP_TCB_LOCK(stcb);
3185 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3186 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3187 			SCTP_SOCKET_UNLOCK(so, 1);
3188 			return;
3189 		}
3190 #endif
3191 		socantsendmore(stcb->sctp_socket);
3192 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3193 		SCTP_SOCKET_UNLOCK(so, 1);
3194 #endif
3195 	}
3196 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3197 		/* event not enabled */
3198 		return;
3199 	}
3200 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3201 	if (m_notify == NULL)
3202 		/* no space left */
3203 		return;
3204 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3205 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3206 	sse->sse_flags = 0;
3207 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3208 	sse->sse_assoc_id = sctp_get_associd(stcb);
3209 
3210 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3211 	SCTP_BUF_NEXT(m_notify) = NULL;
3212 
3213 	/* append to socket */
3214 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3215 	    0, 0, stcb->asoc.context, 0, 0, 0,
3216 	    m_notify);
3217 	if (control == NULL) {
3218 		/* no memory */
3219 		sctp_m_freem(m_notify);
3220 		return;
3221 	}
3222 	control->spec_flags = M_NOTIFICATION;
3223 	control->length = SCTP_BUF_LEN(m_notify);
3224 	/* not that we need this */
3225 	control->tail_mbuf = m_notify;
3226 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3227 	    control,
3228 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3229 }
3230 
3231 static void
3232 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3233     int so_locked
3234 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3235     SCTP_UNUSED
3236 #endif
3237 )
3238 {
3239 	struct mbuf *m_notify;
3240 	struct sctp_sender_dry_event *event;
3241 	struct sctp_queued_to_read *control;
3242 
3243 	if ((stcb == NULL) ||
3244 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3245 		/* event not enabled */
3246 		return;
3247 	}
3248 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3249 	if (m_notify == NULL) {
3250 		/* no space left */
3251 		return;
3252 	}
3253 	SCTP_BUF_LEN(m_notify) = 0;
3254 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3255 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3256 	event->sender_dry_flags = 0;
3257 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3258 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3259 
3260 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3261 	SCTP_BUF_NEXT(m_notify) = NULL;
3262 
3263 	/* append to socket */
3264 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3265 	    0, 0, stcb->asoc.context, 0, 0, 0,
3266 	    m_notify);
3267 	if (control == NULL) {
3268 		/* no memory */
3269 		sctp_m_freem(m_notify);
3270 		return;
3271 	}
3272 	control->length = SCTP_BUF_LEN(m_notify);
3273 	control->spec_flags = M_NOTIFICATION;
3274 	/* not that we need this */
3275 	control->tail_mbuf = m_notify;
3276 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3277 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3278 }
3279 
3280 
3281 void
3282 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3283 {
3284 	struct mbuf *m_notify;
3285 	struct sctp_queued_to_read *control;
3286 	struct sctp_stream_change_event *stradd;
3287 	int len;
3288 
3289 	if ((stcb == NULL) ||
3290 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3291 		/* event not enabled */
3292 		return;
3293 	}
3294 	if ((stcb->asoc.peer_req_out) && flag) {
3295 		/* Peer made the request, don't tell the local user */
3296 		stcb->asoc.peer_req_out = 0;
3297 		return;
3298 	}
3299 	stcb->asoc.peer_req_out = 0;
3300 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3301 	if (m_notify == NULL)
3302 		/* no space left */
3303 		return;
3304 	SCTP_BUF_LEN(m_notify) = 0;
3305 	len = sizeof(struct sctp_stream_change_event);
3306 	if (len > M_TRAILINGSPACE(m_notify)) {
3307 		/* never enough room */
3308 		sctp_m_freem(m_notify);
3309 		return;
3310 	}
3311 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3312 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3313 	stradd->strchange_flags = flag;
3314 	stradd->strchange_length = len;
3315 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3316 	stradd->strchange_instrms = numberin;
3317 	stradd->strchange_outstrms = numberout;
3318 	SCTP_BUF_LEN(m_notify) = len;
3319 	SCTP_BUF_NEXT(m_notify) = NULL;
3320 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3321 		/* no space */
3322 		sctp_m_freem(m_notify);
3323 		return;
3324 	}
3325 	/* append to socket */
3326 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3327 	    0, 0, stcb->asoc.context, 0, 0, 0,
3328 	    m_notify);
3329 	if (control == NULL) {
3330 		/* no memory */
3331 		sctp_m_freem(m_notify);
3332 		return;
3333 	}
3334 	control->spec_flags = M_NOTIFICATION;
3335 	control->length = SCTP_BUF_LEN(m_notify);
3336 	/* not that we need this */
3337 	control->tail_mbuf = m_notify;
3338 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3339 	    control,
3340 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3341 }
3342 
3343 void
3344 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3345 {
3346 	struct mbuf *m_notify;
3347 	struct sctp_queued_to_read *control;
3348 	struct sctp_assoc_reset_event *strasoc;
3349 	int len;
3350 
3351 	if ((stcb == NULL) ||
3352 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3353 		/* event not enabled */
3354 		return;
3355 	}
3356 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3357 	if (m_notify == NULL)
3358 		/* no space left */
3359 		return;
3360 	SCTP_BUF_LEN(m_notify) = 0;
3361 	len = sizeof(struct sctp_assoc_reset_event);
3362 	if (len > M_TRAILINGSPACE(m_notify)) {
3363 		/* never enough room */
3364 		sctp_m_freem(m_notify);
3365 		return;
3366 	}
3367 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3368 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3369 	strasoc->assocreset_flags = flag;
3370 	strasoc->assocreset_length = len;
3371 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3372 	strasoc->assocreset_local_tsn = sending_tsn;
3373 	strasoc->assocreset_remote_tsn = recv_tsn;
3374 	SCTP_BUF_LEN(m_notify) = len;
3375 	SCTP_BUF_NEXT(m_notify) = NULL;
3376 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3377 		/* no space */
3378 		sctp_m_freem(m_notify);
3379 		return;
3380 	}
3381 	/* append to socket */
3382 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3383 	    0, 0, stcb->asoc.context, 0, 0, 0,
3384 	    m_notify);
3385 	if (control == NULL) {
3386 		/* no memory */
3387 		sctp_m_freem(m_notify);
3388 		return;
3389 	}
3390 	control->spec_flags = M_NOTIFICATION;
3391 	control->length = SCTP_BUF_LEN(m_notify);
3392 	/* not that we need this */
3393 	control->tail_mbuf = m_notify;
3394 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3395 	    control,
3396 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3397 }
3398 
3399 
3400 
3401 static void
3402 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3403     int number_entries, uint16_t * list, int flag)
3404 {
3405 	struct mbuf *m_notify;
3406 	struct sctp_queued_to_read *control;
3407 	struct sctp_stream_reset_event *strreset;
3408 	int len;
3409 
3410 	if ((stcb == NULL) ||
3411 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3412 		/* event not enabled */
3413 		return;
3414 	}
3415 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3416 	if (m_notify == NULL)
3417 		/* no space left */
3418 		return;
3419 	SCTP_BUF_LEN(m_notify) = 0;
3420 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3421 	if (len > M_TRAILINGSPACE(m_notify)) {
3422 		/* never enough room */
3423 		sctp_m_freem(m_notify);
3424 		return;
3425 	}
3426 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3427 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3428 	strreset->strreset_flags = flag;
3429 	strreset->strreset_length = len;
3430 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3431 	if (number_entries) {
3432 		int i;
3433 
3434 		for (i = 0; i < number_entries; i++) {
3435 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3436 		}
3437 	}
3438 	SCTP_BUF_LEN(m_notify) = len;
3439 	SCTP_BUF_NEXT(m_notify) = NULL;
3440 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3441 		/* no space */
3442 		sctp_m_freem(m_notify);
3443 		return;
3444 	}
3445 	/* append to socket */
3446 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3447 	    0, 0, stcb->asoc.context, 0, 0, 0,
3448 	    m_notify);
3449 	if (control == NULL) {
3450 		/* no memory */
3451 		sctp_m_freem(m_notify);
3452 		return;
3453 	}
3454 	control->spec_flags = M_NOTIFICATION;
3455 	control->length = SCTP_BUF_LEN(m_notify);
3456 	/* not that we need this */
3457 	control->tail_mbuf = m_notify;
3458 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3459 	    control,
3460 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3461 }
3462 
3463 
3464 static void
3465 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3466 {
3467 	struct mbuf *m_notify;
3468 	struct sctp_remote_error *sre;
3469 	struct sctp_queued_to_read *control;
3470 	size_t notif_len, chunk_len;
3471 
3472 	if ((stcb == NULL) ||
3473 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3474 		return;
3475 	}
3476 	if (chunk != NULL) {
3477 		chunk_len = ntohs(chunk->ch.chunk_length);
3478 	} else {
3479 		chunk_len = 0;
3480 	}
3481 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3482 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3483 	if (m_notify == NULL) {
3484 		/* Retry with smaller value. */
3485 		notif_len = sizeof(struct sctp_remote_error);
3486 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3487 		if (m_notify == NULL) {
3488 			return;
3489 		}
3490 	}
3491 	SCTP_BUF_NEXT(m_notify) = NULL;
3492 	sre = mtod(m_notify, struct sctp_remote_error *);
3493 	sre->sre_type = SCTP_REMOTE_ERROR;
3494 	sre->sre_flags = 0;
3495 	sre->sre_length = sizeof(struct sctp_remote_error);
3496 	sre->sre_error = error;
3497 	sre->sre_assoc_id = sctp_get_associd(stcb);
3498 	if (notif_len > sizeof(struct sctp_remote_error)) {
3499 		memcpy(sre->sre_data, chunk, chunk_len);
3500 		sre->sre_length += chunk_len;
3501 	}
3502 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3503 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3504 	    0, 0, stcb->asoc.context, 0, 0, 0,
3505 	    m_notify);
3506 	if (control != NULL) {
3507 		control->length = SCTP_BUF_LEN(m_notify);
3508 		/* not that we need this */
3509 		control->tail_mbuf = m_notify;
3510 		control->spec_flags = M_NOTIFICATION;
3511 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3512 		    control,
3513 		    &stcb->sctp_socket->so_rcv, 1,
3514 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3515 	} else {
3516 		sctp_m_freem(m_notify);
3517 	}
3518 }
3519 
3520 
3521 void
3522 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3523     uint32_t error, void *data, int so_locked
3524 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3525     SCTP_UNUSED
3526 #endif
3527 )
3528 {
3529 	if ((stcb == NULL) ||
3530 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3531 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3532 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3533 		/* If the socket is gone we are out of here */
3534 		return;
3535 	}
3536 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3537 		return;
3538 	}
3539 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3540 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3541 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3542 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3543 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3544 			/* Don't report these in front states */
3545 			return;
3546 		}
3547 	}
3548 	switch (notification) {
3549 	case SCTP_NOTIFY_ASSOC_UP:
3550 		if (stcb->asoc.assoc_up_sent == 0) {
3551 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3552 			stcb->asoc.assoc_up_sent = 1;
3553 		}
3554 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3555 			sctp_notify_adaptation_layer(stcb);
3556 		}
3557 		if (stcb->asoc.peer_supports_auth == 0) {
3558 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3559 			    NULL, so_locked);
3560 		}
3561 		break;
3562 	case SCTP_NOTIFY_ASSOC_DOWN:
3563 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3564 		break;
3565 	case SCTP_NOTIFY_INTERFACE_DOWN:
3566 		{
3567 			struct sctp_nets *net;
3568 
3569 			net = (struct sctp_nets *)data;
3570 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3571 			    (struct sockaddr *)&net->ro._l_addr, error);
3572 			break;
3573 		}
3574 	case SCTP_NOTIFY_INTERFACE_UP:
3575 		{
3576 			struct sctp_nets *net;
3577 
3578 			net = (struct sctp_nets *)data;
3579 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3580 			    (struct sockaddr *)&net->ro._l_addr, error);
3581 			break;
3582 		}
3583 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3584 		{
3585 			struct sctp_nets *net;
3586 
3587 			net = (struct sctp_nets *)data;
3588 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3589 			    (struct sockaddr *)&net->ro._l_addr, error);
3590 			break;
3591 		}
3592 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3593 		sctp_notify_send_failed2(stcb, error,
3594 		    (struct sctp_stream_queue_pending *)data, so_locked);
3595 		break;
3596 	case SCTP_NOTIFY_SENT_DG_FAIL:
3597 		sctp_notify_send_failed(stcb, 1, error,
3598 		    (struct sctp_tmit_chunk *)data, so_locked);
3599 		break;
3600 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3601 		sctp_notify_send_failed(stcb, 0, error,
3602 		    (struct sctp_tmit_chunk *)data, so_locked);
3603 		break;
3604 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3605 		{
3606 			uint32_t val;
3607 
3608 			val = *((uint32_t *) data);
3609 
3610 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3611 			break;
3612 		}
3613 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3614 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3615 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3616 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3617 		} else {
3618 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3619 		}
3620 		break;
3621 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3622 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3623 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3624 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3625 		} else {
3626 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3627 		}
3628 		break;
3629 	case SCTP_NOTIFY_ASSOC_RESTART:
3630 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3631 		if (stcb->asoc.peer_supports_auth == 0) {
3632 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3633 			    NULL, so_locked);
3634 		}
3635 		break;
3636 	case SCTP_NOTIFY_STR_RESET_SEND:
3637 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3638 		break;
3639 	case SCTP_NOTIFY_STR_RESET_RECV:
3640 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3641 		break;
3642 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3643 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3644 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3645 		break;
3646 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3647 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3648 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3649 		break;
3650 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3651 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3652 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3653 		break;
3654 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3655 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3656 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3657 		break;
3658 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3659 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3660 		    error);
3661 		break;
3662 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3663 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3664 		    error);
3665 		break;
3666 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3667 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3668 		    error);
3669 		break;
3670 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3671 		sctp_notify_shutdown_event(stcb);
3672 		break;
3673 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3674 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3675 		    (uint16_t) (uintptr_t) data,
3676 		    so_locked);
3677 		break;
3678 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3679 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3680 		    (uint16_t) (uintptr_t) data,
3681 		    so_locked);
3682 		break;
3683 	case SCTP_NOTIFY_NO_PEER_AUTH:
3684 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3685 		    (uint16_t) (uintptr_t) data,
3686 		    so_locked);
3687 		break;
3688 	case SCTP_NOTIFY_SENDER_DRY:
3689 		sctp_notify_sender_dry_event(stcb, so_locked);
3690 		break;
3691 	case SCTP_NOTIFY_REMOTE_ERROR:
3692 		sctp_notify_remote_error(stcb, error, data);
3693 		break;
3694 	default:
3695 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3696 		    __FUNCTION__, notification, notification);
3697 		break;
3698 	}			/* end switch */
3699 }
3700 
3701 void
3702 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3703 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3704     SCTP_UNUSED
3705 #endif
3706 )
3707 {
3708 	struct sctp_association *asoc;
3709 	struct sctp_stream_out *outs;
3710 	struct sctp_tmit_chunk *chk, *nchk;
3711 	struct sctp_stream_queue_pending *sp, *nsp;
3712 	int i;
3713 
3714 	if (stcb == NULL) {
3715 		return;
3716 	}
3717 	asoc = &stcb->asoc;
3718 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3719 		/* already being freed */
3720 		return;
3721 	}
3722 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3723 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3724 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3725 		return;
3726 	}
3727 	/* now through all the gunk freeing chunks */
3728 	if (holds_lock == 0) {
3729 		SCTP_TCB_SEND_LOCK(stcb);
3730 	}
3731 	/* sent queue SHOULD be empty */
3732 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3733 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3734 		asoc->sent_queue_cnt--;
3735 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3736 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3737 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3738 #ifdef INVARIANTS
3739 			} else {
3740 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3741 #endif
3742 			}
3743 		}
3744 		if (chk->data != NULL) {
3745 			sctp_free_bufspace(stcb, asoc, chk, 1);
3746 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3747 			    error, chk, so_locked);
3748 			if (chk->data) {
3749 				sctp_m_freem(chk->data);
3750 				chk->data = NULL;
3751 			}
3752 		}
3753 		sctp_free_a_chunk(stcb, chk, so_locked);
3754 		/* sa_ignore FREED_MEMORY */
3755 	}
3756 	/* pending send queue SHOULD be empty */
3757 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3758 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3759 		asoc->send_queue_cnt--;
3760 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3761 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3762 #ifdef INVARIANTS
3763 		} else {
3764 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3765 #endif
3766 		}
3767 		if (chk->data != NULL) {
3768 			sctp_free_bufspace(stcb, asoc, chk, 1);
3769 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3770 			    error, chk, so_locked);
3771 			if (chk->data) {
3772 				sctp_m_freem(chk->data);
3773 				chk->data = NULL;
3774 			}
3775 		}
3776 		sctp_free_a_chunk(stcb, chk, so_locked);
3777 		/* sa_ignore FREED_MEMORY */
3778 	}
3779 	for (i = 0; i < asoc->streamoutcnt; i++) {
3780 		/* For each stream */
3781 		outs = &asoc->strmout[i];
3782 		/* clean up any sends there */
3783 		asoc->locked_on_sending = NULL;
3784 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3785 			asoc->stream_queue_cnt--;
3786 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3787 			sctp_free_spbufspace(stcb, asoc, sp);
3788 			if (sp->data) {
3789 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3790 				    error, (void *)sp, so_locked);
3791 				if (sp->data) {
3792 					sctp_m_freem(sp->data);
3793 					sp->data = NULL;
3794 					sp->tail_mbuf = NULL;
3795 					sp->length = 0;
3796 				}
3797 			}
3798 			if (sp->net) {
3799 				sctp_free_remote_addr(sp->net);
3800 				sp->net = NULL;
3801 			}
3802 			/* Free the chunk */
3803 			sctp_free_a_strmoq(stcb, sp, so_locked);
3804 			/* sa_ignore FREED_MEMORY */
3805 		}
3806 	}
3807 
3808 	if (holds_lock == 0) {
3809 		SCTP_TCB_SEND_UNLOCK(stcb);
3810 	}
3811 }
3812 
3813 void
3814 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3815     struct sctp_abort_chunk *abort, int so_locked
3816 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3817     SCTP_UNUSED
3818 #endif
3819 )
3820 {
3821 	if (stcb == NULL) {
3822 		return;
3823 	}
3824 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3825 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3826 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3827 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3828 	}
3829 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3830 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3831 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3832 		return;
3833 	}
3834 	/* Tell them we lost the asoc */
3835 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3836 	if (from_peer) {
3837 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3838 	} else {
3839 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3840 	}
3841 }
3842 
3843 void
3844 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3845     struct mbuf *m, int iphlen,
3846     struct sockaddr *src, struct sockaddr *dst,
3847     struct sctphdr *sh, struct mbuf *op_err,
3848     uint8_t use_mflowid, uint32_t mflowid,
3849     uint32_t vrf_id, uint16_t port)
3850 {
3851 	uint32_t vtag;
3852 
3853 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3854 	struct socket *so;
3855 
3856 #endif
3857 
3858 	vtag = 0;
3859 	if (stcb != NULL) {
3860 		/* We have a TCB to abort, send notification too */
3861 		vtag = stcb->asoc.peer_vtag;
3862 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3863 		/* get the assoc vrf id and table id */
3864 		vrf_id = stcb->asoc.vrf_id;
3865 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3866 	}
3867 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3868 	    use_mflowid, mflowid,
3869 	    vrf_id, port);
3870 	if (stcb != NULL) {
3871 		/* Ok, now lets free it */
3872 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3873 		so = SCTP_INP_SO(inp);
3874 		atomic_add_int(&stcb->asoc.refcnt, 1);
3875 		SCTP_TCB_UNLOCK(stcb);
3876 		SCTP_SOCKET_LOCK(so, 1);
3877 		SCTP_TCB_LOCK(stcb);
3878 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3879 #endif
3880 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3881 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3882 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3883 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3884 		}
3885 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3887 		SCTP_SOCKET_UNLOCK(so, 1);
3888 #endif
3889 	}
3890 }
3891 
3892 #ifdef SCTP_ASOCLOG_OF_TSNS
3893 void
3894 sctp_print_out_track_log(struct sctp_tcb *stcb)
3895 {
3896 #ifdef NOSIY_PRINTS
3897 	int i;
3898 
3899 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3900 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3901 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3902 		SCTP_PRINTF("None rcvd\n");
3903 		goto none_in;
3904 	}
3905 	if (stcb->asoc.tsn_in_wrapped) {
3906 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3907 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3908 			    stcb->asoc.in_tsnlog[i].tsn,
3909 			    stcb->asoc.in_tsnlog[i].strm,
3910 			    stcb->asoc.in_tsnlog[i].seq,
3911 			    stcb->asoc.in_tsnlog[i].flgs,
3912 			    stcb->asoc.in_tsnlog[i].sz);
3913 		}
3914 	}
3915 	if (stcb->asoc.tsn_in_at) {
3916 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3917 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3918 			    stcb->asoc.in_tsnlog[i].tsn,
3919 			    stcb->asoc.in_tsnlog[i].strm,
3920 			    stcb->asoc.in_tsnlog[i].seq,
3921 			    stcb->asoc.in_tsnlog[i].flgs,
3922 			    stcb->asoc.in_tsnlog[i].sz);
3923 		}
3924 	}
3925 none_in:
3926 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3927 	if ((stcb->asoc.tsn_out_at == 0) &&
3928 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3929 		SCTP_PRINTF("None sent\n");
3930 	}
3931 	if (stcb->asoc.tsn_out_wrapped) {
3932 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3933 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3934 			    stcb->asoc.out_tsnlog[i].tsn,
3935 			    stcb->asoc.out_tsnlog[i].strm,
3936 			    stcb->asoc.out_tsnlog[i].seq,
3937 			    stcb->asoc.out_tsnlog[i].flgs,
3938 			    stcb->asoc.out_tsnlog[i].sz);
3939 		}
3940 	}
3941 	if (stcb->asoc.tsn_out_at) {
3942 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3943 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3944 			    stcb->asoc.out_tsnlog[i].tsn,
3945 			    stcb->asoc.out_tsnlog[i].strm,
3946 			    stcb->asoc.out_tsnlog[i].seq,
3947 			    stcb->asoc.out_tsnlog[i].flgs,
3948 			    stcb->asoc.out_tsnlog[i].sz);
3949 		}
3950 	}
3951 #endif
3952 }
3953 
3954 #endif
3955 
3956 void
3957 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3958     struct mbuf *op_err,
3959     int so_locked
3960 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3961     SCTP_UNUSED
3962 #endif
3963 )
3964 {
3965 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3966 	struct socket *so;
3967 
3968 #endif
3969 
3970 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3971 	so = SCTP_INP_SO(inp);
3972 #endif
3973 	if (stcb == NULL) {
3974 		/* Got to have a TCB */
3975 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3976 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
3977 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3978 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3979 			}
3980 		}
3981 		return;
3982 	} else {
3983 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3984 	}
3985 	/* notify the ulp */
3986 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3987 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3988 	}
3989 	/* notify the peer */
3990 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3991 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3992 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3993 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3994 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3995 	}
3996 	/* now free the asoc */
3997 #ifdef SCTP_ASOCLOG_OF_TSNS
3998 	sctp_print_out_track_log(stcb);
3999 #endif
4000 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4001 	if (!so_locked) {
4002 		atomic_add_int(&stcb->asoc.refcnt, 1);
4003 		SCTP_TCB_UNLOCK(stcb);
4004 		SCTP_SOCKET_LOCK(so, 1);
4005 		SCTP_TCB_LOCK(stcb);
4006 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4007 	}
4008 #endif
4009 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4010 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011 	if (!so_locked) {
4012 		SCTP_SOCKET_UNLOCK(so, 1);
4013 	}
4014 #endif
4015 }
4016 
4017 void
4018 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4019     struct sockaddr *src, struct sockaddr *dst,
4020     struct sctphdr *sh, struct sctp_inpcb *inp,
4021     struct mbuf *cause,
4022     uint8_t use_mflowid, uint32_t mflowid,
4023     uint32_t vrf_id, uint16_t port)
4024 {
4025 	struct sctp_chunkhdr *ch, chunk_buf;
4026 	unsigned int chk_length;
4027 	int contains_init_chunk;
4028 
4029 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4030 	/* Generate a TO address for future reference */
4031 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4032 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4033 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4034 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4035 		}
4036 	}
4037 	contains_init_chunk = 0;
4038 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4039 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4040 	while (ch != NULL) {
4041 		chk_length = ntohs(ch->chunk_length);
4042 		if (chk_length < sizeof(*ch)) {
4043 			/* break to abort land */
4044 			break;
4045 		}
4046 		switch (ch->chunk_type) {
4047 		case SCTP_INIT:
4048 			contains_init_chunk = 1;
4049 			break;
4050 		case SCTP_PACKET_DROPPED:
4051 			/* we don't respond to pkt-dropped */
4052 			return;
4053 		case SCTP_ABORT_ASSOCIATION:
4054 			/* we don't respond with an ABORT to an ABORT */
4055 			return;
4056 		case SCTP_SHUTDOWN_COMPLETE:
4057 			/*
4058 			 * we ignore it since we are not waiting for it and
4059 			 * peer is gone
4060 			 */
4061 			return;
4062 		case SCTP_SHUTDOWN_ACK:
4063 			sctp_send_shutdown_complete2(src, dst, sh,
4064 			    use_mflowid, mflowid,
4065 			    vrf_id, port);
4066 			return;
4067 		default:
4068 			break;
4069 		}
4070 		offset += SCTP_SIZE32(chk_length);
4071 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4072 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4073 	}
4074 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4075 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4076 	    (contains_init_chunk == 0))) {
4077 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4078 		    use_mflowid, mflowid,
4079 		    vrf_id, port);
4080 	}
4081 }
4082 
4083 /*
4084  * check the inbound datagram to make sure there is not an abort inside it,
4085  * if there is return 1, else return 0.
4086  */
4087 int
4088 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4089 {
4090 	struct sctp_chunkhdr *ch;
4091 	struct sctp_init_chunk *init_chk, chunk_buf;
4092 	int offset;
4093 	unsigned int chk_length;
4094 
4095 	offset = iphlen + sizeof(struct sctphdr);
4096 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4097 	    (uint8_t *) & chunk_buf);
4098 	while (ch != NULL) {
4099 		chk_length = ntohs(ch->chunk_length);
4100 		if (chk_length < sizeof(*ch)) {
4101 			/* packet is probably corrupt */
4102 			break;
4103 		}
4104 		/* we seem to be ok, is it an abort? */
4105 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4106 			/* yep, tell them */
4107 			return (1);
4108 		}
4109 		if (ch->chunk_type == SCTP_INITIATION) {
4110 			/* need to update the Vtag */
4111 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4112 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4113 			if (init_chk != NULL) {
4114 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4115 			}
4116 		}
4117 		/* Nope, move to the next chunk */
4118 		offset += SCTP_SIZE32(chk_length);
4119 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4120 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4121 	}
4122 	return (0);
4123 }
4124 
4125 /*
4126  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4127  * set (i.e. it's 0) so, create this function to compare link local scopes
4128  */
4129 #ifdef INET6
4130 uint32_t
4131 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4132 {
4133 	struct sockaddr_in6 a, b;
4134 
4135 	/* save copies */
4136 	a = *addr1;
4137 	b = *addr2;
4138 
4139 	if (a.sin6_scope_id == 0)
4140 		if (sa6_recoverscope(&a)) {
4141 			/* can't get scope, so can't match */
4142 			return (0);
4143 		}
4144 	if (b.sin6_scope_id == 0)
4145 		if (sa6_recoverscope(&b)) {
4146 			/* can't get scope, so can't match */
4147 			return (0);
4148 		}
4149 	if (a.sin6_scope_id != b.sin6_scope_id)
4150 		return (0);
4151 
4152 	return (1);
4153 }
4154 
4155 /*
4156  * returns a sockaddr_in6 with embedded scope recovered and removed
4157  */
4158 struct sockaddr_in6 *
4159 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4160 {
4161 	/* check and strip embedded scope junk */
4162 	if (addr->sin6_family == AF_INET6) {
4163 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4164 			if (addr->sin6_scope_id == 0) {
4165 				*store = *addr;
4166 				if (!sa6_recoverscope(store)) {
4167 					/* use the recovered scope */
4168 					addr = store;
4169 				}
4170 			} else {
4171 				/* else, return the original "to" addr */
4172 				in6_clearscope(&addr->sin6_addr);
4173 			}
4174 		}
4175 	}
4176 	return (addr);
4177 }
4178 
4179 #endif
4180 
4181 /*
4182  * are the two addresses the same?  currently a "scopeless" check returns: 1
4183  * if same, 0 if not
4184  */
4185 int
4186 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4187 {
4188 
4189 	/* must be valid */
4190 	if (sa1 == NULL || sa2 == NULL)
4191 		return (0);
4192 
4193 	/* must be the same family */
4194 	if (sa1->sa_family != sa2->sa_family)
4195 		return (0);
4196 
4197 	switch (sa1->sa_family) {
4198 #ifdef INET6
4199 	case AF_INET6:
4200 		{
4201 			/* IPv6 addresses */
4202 			struct sockaddr_in6 *sin6_1, *sin6_2;
4203 
4204 			sin6_1 = (struct sockaddr_in6 *)sa1;
4205 			sin6_2 = (struct sockaddr_in6 *)sa2;
4206 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4207 			    sin6_2));
4208 		}
4209 #endif
4210 #ifdef INET
4211 	case AF_INET:
4212 		{
4213 			/* IPv4 addresses */
4214 			struct sockaddr_in *sin_1, *sin_2;
4215 
4216 			sin_1 = (struct sockaddr_in *)sa1;
4217 			sin_2 = (struct sockaddr_in *)sa2;
4218 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4219 		}
4220 #endif
4221 	default:
4222 		/* we don't do these... */
4223 		return (0);
4224 	}
4225 }
4226 
4227 void
4228 sctp_print_address(struct sockaddr *sa)
4229 {
4230 #ifdef INET6
4231 	char ip6buf[INET6_ADDRSTRLEN];
4232 
4233 #endif
4234 
4235 	switch (sa->sa_family) {
4236 #ifdef INET6
4237 	case AF_INET6:
4238 		{
4239 			struct sockaddr_in6 *sin6;
4240 
4241 			sin6 = (struct sockaddr_in6 *)sa;
4242 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4243 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4244 			    ntohs(sin6->sin6_port),
4245 			    sin6->sin6_scope_id);
4246 			break;
4247 		}
4248 #endif
4249 #ifdef INET
4250 	case AF_INET:
4251 		{
4252 			struct sockaddr_in *sin;
4253 			unsigned char *p;
4254 
4255 			sin = (struct sockaddr_in *)sa;
4256 			p = (unsigned char *)&sin->sin_addr;
4257 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4258 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4259 			break;
4260 		}
4261 #endif
4262 	default:
4263 		SCTP_PRINTF("?\n");
4264 		break;
4265 	}
4266 }
4267 
4268 void
4269 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4270     struct sctp_inpcb *new_inp,
4271     struct sctp_tcb *stcb,
4272     int waitflags)
4273 {
4274 	/*
4275 	 * go through our old INP and pull off any control structures that
4276 	 * belong to stcb and move then to the new inp.
4277 	 */
4278 	struct socket *old_so, *new_so;
4279 	struct sctp_queued_to_read *control, *nctl;
4280 	struct sctp_readhead tmp_queue;
4281 	struct mbuf *m;
4282 	int error = 0;
4283 
4284 	old_so = old_inp->sctp_socket;
4285 	new_so = new_inp->sctp_socket;
4286 	TAILQ_INIT(&tmp_queue);
4287 	error = sblock(&old_so->so_rcv, waitflags);
4288 	if (error) {
4289 		/*
4290 		 * Gak, can't get sblock, we have a problem. data will be
4291 		 * left stranded.. and we don't dare look at it since the
4292 		 * other thread may be reading something. Oh well, its a
4293 		 * screwed up app that does a peeloff OR a accept while
4294 		 * reading from the main socket... actually its only the
4295 		 * peeloff() case, since I think read will fail on a
4296 		 * listening socket..
4297 		 */
4298 		return;
4299 	}
4300 	/* lock the socket buffers */
4301 	SCTP_INP_READ_LOCK(old_inp);
4302 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4303 		/* Pull off all for out target stcb */
4304 		if (control->stcb == stcb) {
4305 			/* remove it we want it */
4306 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4307 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4308 			m = control->data;
4309 			while (m) {
4310 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4311 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4312 				}
4313 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4314 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4315 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4316 				}
4317 				m = SCTP_BUF_NEXT(m);
4318 			}
4319 		}
4320 	}
4321 	SCTP_INP_READ_UNLOCK(old_inp);
4322 	/* Remove the sb-lock on the old socket */
4323 
4324 	sbunlock(&old_so->so_rcv);
4325 	/* Now we move them over to the new socket buffer */
4326 	SCTP_INP_READ_LOCK(new_inp);
4327 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4328 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4329 		m = control->data;
4330 		while (m) {
4331 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4332 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4333 			}
4334 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4335 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4336 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4337 			}
4338 			m = SCTP_BUF_NEXT(m);
4339 		}
4340 	}
4341 	SCTP_INP_READ_UNLOCK(new_inp);
4342 }
4343 
4344 void
4345 sctp_add_to_readq(struct sctp_inpcb *inp,
4346     struct sctp_tcb *stcb,
4347     struct sctp_queued_to_read *control,
4348     struct sockbuf *sb,
4349     int end,
4350     int inp_read_lock_held,
4351     int so_locked
4352 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4353     SCTP_UNUSED
4354 #endif
4355 )
4356 {
4357 	/*
4358 	 * Here we must place the control on the end of the socket read
4359 	 * queue AND increment sb_cc so that select will work properly on
4360 	 * read.
4361 	 */
4362 	struct mbuf *m, *prev = NULL;
4363 
4364 	if (inp == NULL) {
4365 		/* Gak, TSNH!! */
4366 #ifdef INVARIANTS
4367 		panic("Gak, inp NULL on add_to_readq");
4368 #endif
4369 		return;
4370 	}
4371 	if (inp_read_lock_held == 0)
4372 		SCTP_INP_READ_LOCK(inp);
4373 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4374 		sctp_free_remote_addr(control->whoFrom);
4375 		if (control->data) {
4376 			sctp_m_freem(control->data);
4377 			control->data = NULL;
4378 		}
4379 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4380 		if (inp_read_lock_held == 0)
4381 			SCTP_INP_READ_UNLOCK(inp);
4382 		return;
4383 	}
4384 	if (!(control->spec_flags & M_NOTIFICATION)) {
4385 		atomic_add_int(&inp->total_recvs, 1);
4386 		if (!control->do_not_ref_stcb) {
4387 			atomic_add_int(&stcb->total_recvs, 1);
4388 		}
4389 	}
4390 	m = control->data;
4391 	control->held_length = 0;
4392 	control->length = 0;
4393 	while (m) {
4394 		if (SCTP_BUF_LEN(m) == 0) {
4395 			/* Skip mbufs with NO length */
4396 			if (prev == NULL) {
4397 				/* First one */
4398 				control->data = sctp_m_free(m);
4399 				m = control->data;
4400 			} else {
4401 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4402 				m = SCTP_BUF_NEXT(prev);
4403 			}
4404 			if (m == NULL) {
4405 				control->tail_mbuf = prev;
4406 			}
4407 			continue;
4408 		}
4409 		prev = m;
4410 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4411 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4412 		}
4413 		sctp_sballoc(stcb, sb, m);
4414 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4415 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4416 		}
4417 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4418 		m = SCTP_BUF_NEXT(m);
4419 	}
4420 	if (prev != NULL) {
4421 		control->tail_mbuf = prev;
4422 	} else {
4423 		/* Everything got collapsed out?? */
4424 		sctp_free_remote_addr(control->whoFrom);
4425 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4426 		if (inp_read_lock_held == 0)
4427 			SCTP_INP_READ_UNLOCK(inp);
4428 		return;
4429 	}
4430 	if (end) {
4431 		control->end_added = 1;
4432 	}
4433 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4434 	if (inp_read_lock_held == 0)
4435 		SCTP_INP_READ_UNLOCK(inp);
4436 	if (inp && inp->sctp_socket) {
4437 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4438 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4439 		} else {
4440 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4441 			struct socket *so;
4442 
4443 			so = SCTP_INP_SO(inp);
4444 			if (!so_locked) {
4445 				if (stcb) {
4446 					atomic_add_int(&stcb->asoc.refcnt, 1);
4447 					SCTP_TCB_UNLOCK(stcb);
4448 				}
4449 				SCTP_SOCKET_LOCK(so, 1);
4450 				if (stcb) {
4451 					SCTP_TCB_LOCK(stcb);
4452 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4453 				}
4454 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4455 					SCTP_SOCKET_UNLOCK(so, 1);
4456 					return;
4457 				}
4458 			}
4459 #endif
4460 			sctp_sorwakeup(inp, inp->sctp_socket);
4461 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4462 			if (!so_locked) {
4463 				SCTP_SOCKET_UNLOCK(so, 1);
4464 			}
4465 #endif
4466 		}
4467 	}
4468 }
4469 
4470 
4471 int
4472 sctp_append_to_readq(struct sctp_inpcb *inp,
4473     struct sctp_tcb *stcb,
4474     struct sctp_queued_to_read *control,
4475     struct mbuf *m,
4476     int end,
4477     int ctls_cumack,
4478     struct sockbuf *sb)
4479 {
4480 	/*
4481 	 * A partial delivery API event is underway. OR we are appending on
4482 	 * the reassembly queue.
4483 	 *
4484 	 * If PDAPI this means we need to add m to the end of the data.
4485 	 * Increase the length in the control AND increment the sb_cc.
4486 	 * Otherwise sb is NULL and all we need to do is put it at the end
4487 	 * of the mbuf chain.
4488 	 */
4489 	int len = 0;
4490 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4491 
4492 	if (inp) {
4493 		SCTP_INP_READ_LOCK(inp);
4494 	}
4495 	if (control == NULL) {
4496 get_out:
4497 		if (inp) {
4498 			SCTP_INP_READ_UNLOCK(inp);
4499 		}
4500 		return (-1);
4501 	}
4502 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4503 		SCTP_INP_READ_UNLOCK(inp);
4504 		return (0);
4505 	}
4506 	if (control->end_added) {
4507 		/* huh this one is complete? */
4508 		goto get_out;
4509 	}
4510 	mm = m;
4511 	if (mm == NULL) {
4512 		goto get_out;
4513 	}
4514 	while (mm) {
4515 		if (SCTP_BUF_LEN(mm) == 0) {
4516 			/* Skip mbufs with NO lenght */
4517 			if (prev == NULL) {
4518 				/* First one */
4519 				m = sctp_m_free(mm);
4520 				mm = m;
4521 			} else {
4522 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4523 				mm = SCTP_BUF_NEXT(prev);
4524 			}
4525 			continue;
4526 		}
4527 		prev = mm;
4528 		len += SCTP_BUF_LEN(mm);
4529 		if (sb) {
4530 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4531 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4532 			}
4533 			sctp_sballoc(stcb, sb, mm);
4534 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4535 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4536 			}
4537 		}
4538 		mm = SCTP_BUF_NEXT(mm);
4539 	}
4540 	if (prev) {
4541 		tail = prev;
4542 	} else {
4543 		/* Really there should always be a prev */
4544 		if (m == NULL) {
4545 			/* Huh nothing left? */
4546 #ifdef INVARIANTS
4547 			panic("Nothing left to add?");
4548 #else
4549 			goto get_out;
4550 #endif
4551 		}
4552 		tail = m;
4553 	}
4554 	if (control->tail_mbuf) {
4555 		/* append */
4556 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4557 		control->tail_mbuf = tail;
4558 	} else {
4559 		/* nothing there */
4560 #ifdef INVARIANTS
4561 		if (control->data != NULL) {
4562 			panic("This should NOT happen");
4563 		}
4564 #endif
4565 		control->data = m;
4566 		control->tail_mbuf = tail;
4567 	}
4568 	atomic_add_int(&control->length, len);
4569 	if (end) {
4570 		/* message is complete */
4571 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4572 			stcb->asoc.control_pdapi = NULL;
4573 		}
4574 		control->held_length = 0;
4575 		control->end_added = 1;
4576 	}
4577 	if (stcb == NULL) {
4578 		control->do_not_ref_stcb = 1;
4579 	}
4580 	/*
4581 	 * When we are appending in partial delivery, the cum-ack is used
4582 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4583 	 * is populated in the outbound sinfo structure from the true cumack
4584 	 * if the association exists...
4585 	 */
4586 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4587 	if (inp) {
4588 		SCTP_INP_READ_UNLOCK(inp);
4589 	}
4590 	if (inp && inp->sctp_socket) {
4591 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4592 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4593 		} else {
4594 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4595 			struct socket *so;
4596 
4597 			so = SCTP_INP_SO(inp);
4598 			if (stcb) {
4599 				atomic_add_int(&stcb->asoc.refcnt, 1);
4600 				SCTP_TCB_UNLOCK(stcb);
4601 			}
4602 			SCTP_SOCKET_LOCK(so, 1);
4603 			if (stcb) {
4604 				SCTP_TCB_LOCK(stcb);
4605 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4606 			}
4607 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4608 				SCTP_SOCKET_UNLOCK(so, 1);
4609 				return (0);
4610 			}
4611 #endif
4612 			sctp_sorwakeup(inp, inp->sctp_socket);
4613 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4614 			SCTP_SOCKET_UNLOCK(so, 1);
4615 #endif
4616 		}
4617 	}
4618 	return (0);
4619 }
4620 
4621 
4622 
4623 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4624  *************ALTERNATE ROUTING CODE
4625  */
4626 
4627 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4628  *************ALTERNATE ROUTING CODE
4629  */
4630 
4631 struct mbuf *
4632 sctp_generate_cause(uint16_t code, char *info)
4633 {
4634 	struct mbuf *m;
4635 	struct sctp_gen_error_cause *cause;
4636 	size_t info_len, len;
4637 
4638 	if ((code == 0) || (info == NULL)) {
4639 		return (NULL);
4640 	}
4641 	info_len = strlen(info);
4642 	len = sizeof(struct sctp_paramhdr) + info_len;
4643 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4644 	if (m != NULL) {
4645 		SCTP_BUF_LEN(m) = len;
4646 		cause = mtod(m, struct sctp_gen_error_cause *);
4647 		cause->code = htons(code);
4648 		cause->length = htons((uint16_t) len);
4649 		memcpy(cause->info, info, info_len);
4650 	}
4651 	return (m);
4652 }
4653 
4654 struct mbuf *
4655 sctp_generate_no_user_data_cause(uint32_t tsn)
4656 {
4657 	struct mbuf *m;
4658 	struct sctp_error_no_user_data *no_user_data_cause;
4659 	size_t len;
4660 
4661 	len = sizeof(struct sctp_error_no_user_data);
4662 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4663 	if (m != NULL) {
4664 		SCTP_BUF_LEN(m) = len;
4665 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4666 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4667 		no_user_data_cause->cause.length = htons((uint16_t) len);
4668 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4669 	}
4670 	return (m);
4671 }
4672 
4673 #ifdef SCTP_MBCNT_LOGGING
4674 void
4675 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4676     struct sctp_tmit_chunk *tp1, int chk_cnt)
4677 {
4678 	if (tp1->data == NULL) {
4679 		return;
4680 	}
4681 	asoc->chunks_on_out_queue -= chk_cnt;
4682 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4683 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4684 		    asoc->total_output_queue_size,
4685 		    tp1->book_size,
4686 		    0,
4687 		    tp1->mbcnt);
4688 	}
4689 	if (asoc->total_output_queue_size >= tp1->book_size) {
4690 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4691 	} else {
4692 		asoc->total_output_queue_size = 0;
4693 	}
4694 
4695 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4696 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4697 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4698 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4699 		} else {
4700 			stcb->sctp_socket->so_snd.sb_cc = 0;
4701 
4702 		}
4703 	}
4704 }
4705 
4706 #endif
4707 
4708 int
4709 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4710     uint8_t sent, int so_locked
4711 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4712     SCTP_UNUSED
4713 #endif
4714 )
4715 {
4716 	struct sctp_stream_out *strq;
4717 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4718 	struct sctp_stream_queue_pending *sp;
4719 	uint16_t stream = 0, seq = 0;
4720 	uint8_t foundeom = 0;
4721 	int ret_sz = 0;
4722 	int notdone;
4723 	int do_wakeup_routine = 0;
4724 
4725 	stream = tp1->rec.data.stream_number;
4726 	seq = tp1->rec.data.stream_seq;
4727 	do {
4728 		ret_sz += tp1->book_size;
4729 		if (tp1->data != NULL) {
4730 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4731 				sctp_flight_size_decrease(tp1);
4732 				sctp_total_flight_decrease(stcb, tp1);
4733 			}
4734 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4735 			stcb->asoc.peers_rwnd += tp1->send_size;
4736 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4737 			if (sent) {
4738 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4739 			} else {
4740 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4741 			}
4742 			if (tp1->data) {
4743 				sctp_m_freem(tp1->data);
4744 				tp1->data = NULL;
4745 			}
4746 			do_wakeup_routine = 1;
4747 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4748 				stcb->asoc.sent_queue_cnt_removeable--;
4749 			}
4750 		}
4751 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4752 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4753 		    SCTP_DATA_NOT_FRAG) {
4754 			/* not frag'ed we ae done   */
4755 			notdone = 0;
4756 			foundeom = 1;
4757 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4758 			/* end of frag, we are done */
4759 			notdone = 0;
4760 			foundeom = 1;
4761 		} else {
4762 			/*
4763 			 * Its a begin or middle piece, we must mark all of
4764 			 * it
4765 			 */
4766 			notdone = 1;
4767 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4768 		}
4769 	} while (tp1 && notdone);
4770 	if (foundeom == 0) {
4771 		/*
4772 		 * The multi-part message was scattered across the send and
4773 		 * sent queue.
4774 		 */
4775 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4776 			if ((tp1->rec.data.stream_number != stream) ||
4777 			    (tp1->rec.data.stream_seq != seq)) {
4778 				break;
4779 			}
4780 			/*
4781 			 * save to chk in case we have some on stream out
4782 			 * queue. If so and we have an un-transmitted one we
4783 			 * don't have to fudge the TSN.
4784 			 */
4785 			chk = tp1;
4786 			ret_sz += tp1->book_size;
4787 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4788 			if (sent) {
4789 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4790 			} else {
4791 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4792 			}
4793 			if (tp1->data) {
4794 				sctp_m_freem(tp1->data);
4795 				tp1->data = NULL;
4796 			}
4797 			/* No flight involved here book the size to 0 */
4798 			tp1->book_size = 0;
4799 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4800 				foundeom = 1;
4801 			}
4802 			do_wakeup_routine = 1;
4803 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4804 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4805 			/*
4806 			 * on to the sent queue so we can wait for it to be
4807 			 * passed by.
4808 			 */
4809 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4810 			    sctp_next);
4811 			stcb->asoc.send_queue_cnt--;
4812 			stcb->asoc.sent_queue_cnt++;
4813 		}
4814 	}
4815 	if (foundeom == 0) {
4816 		/*
4817 		 * Still no eom found. That means there is stuff left on the
4818 		 * stream out queue.. yuck.
4819 		 */
4820 		SCTP_TCB_SEND_LOCK(stcb);
4821 		strq = &stcb->asoc.strmout[stream];
4822 		sp = TAILQ_FIRST(&strq->outqueue);
4823 		if (sp != NULL) {
4824 			sp->discard_rest = 1;
4825 			/*
4826 			 * We may need to put a chunk on the queue that
4827 			 * holds the TSN that would have been sent with the
4828 			 * LAST bit.
4829 			 */
4830 			if (chk == NULL) {
4831 				/* Yep, we have to */
4832 				sctp_alloc_a_chunk(stcb, chk);
4833 				if (chk == NULL) {
4834 					/*
4835 					 * we are hosed. All we can do is
4836 					 * nothing.. which will cause an
4837 					 * abort if the peer is paying
4838 					 * attention.
4839 					 */
4840 					goto oh_well;
4841 				}
4842 				memset(chk, 0, sizeof(*chk));
4843 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4844 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4845 				chk->asoc = &stcb->asoc;
4846 				chk->rec.data.stream_seq = strq->next_sequence_send;
4847 				chk->rec.data.stream_number = sp->stream;
4848 				chk->rec.data.payloadtype = sp->ppid;
4849 				chk->rec.data.context = sp->context;
4850 				chk->flags = sp->act_flags;
4851 				if (sp->net)
4852 					chk->whoTo = sp->net;
4853 				else
4854 					chk->whoTo = stcb->asoc.primary_destination;
4855 				atomic_add_int(&chk->whoTo->ref_count, 1);
4856 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4857 				stcb->asoc.pr_sctp_cnt++;
4858 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4859 				stcb->asoc.sent_queue_cnt++;
4860 				stcb->asoc.pr_sctp_cnt++;
4861 			} else {
4862 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4863 			}
4864 			strq->next_sequence_send++;
4865 	oh_well:
4866 			if (sp->data) {
4867 				/*
4868 				 * Pull any data to free up the SB and allow
4869 				 * sender to "add more" while we will throw
4870 				 * away :-)
4871 				 */
4872 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4873 				ret_sz += sp->length;
4874 				do_wakeup_routine = 1;
4875 				sp->some_taken = 1;
4876 				sctp_m_freem(sp->data);
4877 				sp->data = NULL;
4878 				sp->tail_mbuf = NULL;
4879 				sp->length = 0;
4880 			}
4881 		}
4882 		SCTP_TCB_SEND_UNLOCK(stcb);
4883 	}
4884 	if (do_wakeup_routine) {
4885 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4886 		struct socket *so;
4887 
4888 		so = SCTP_INP_SO(stcb->sctp_ep);
4889 		if (!so_locked) {
4890 			atomic_add_int(&stcb->asoc.refcnt, 1);
4891 			SCTP_TCB_UNLOCK(stcb);
4892 			SCTP_SOCKET_LOCK(so, 1);
4893 			SCTP_TCB_LOCK(stcb);
4894 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4895 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4896 				/* assoc was freed while we were unlocked */
4897 				SCTP_SOCKET_UNLOCK(so, 1);
4898 				return (ret_sz);
4899 			}
4900 		}
4901 #endif
4902 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4903 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4904 		if (!so_locked) {
4905 			SCTP_SOCKET_UNLOCK(so, 1);
4906 		}
4907 #endif
4908 	}
4909 	return (ret_sz);
4910 }
4911 
4912 /*
4913  * checks to see if the given address, sa, is one that is currently known by
4914  * the kernel note: can't distinguish the same address on multiple interfaces
4915  * and doesn't handle multiple addresses with different zone/scope id's note:
4916  * ifa_ifwithaddr() compares the entire sockaddr struct
4917  */
4918 struct sctp_ifa *
4919 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4920     int holds_lock)
4921 {
4922 	struct sctp_laddr *laddr;
4923 
4924 	if (holds_lock == 0) {
4925 		SCTP_INP_RLOCK(inp);
4926 	}
4927 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4928 		if (laddr->ifa == NULL)
4929 			continue;
4930 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4931 			continue;
4932 #ifdef INET
4933 		if (addr->sa_family == AF_INET) {
4934 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4935 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4936 				/* found him. */
4937 				if (holds_lock == 0) {
4938 					SCTP_INP_RUNLOCK(inp);
4939 				}
4940 				return (laddr->ifa);
4941 				break;
4942 			}
4943 		}
4944 #endif
4945 #ifdef INET6
4946 		if (addr->sa_family == AF_INET6) {
4947 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4948 			    &laddr->ifa->address.sin6)) {
4949 				/* found him. */
4950 				if (holds_lock == 0) {
4951 					SCTP_INP_RUNLOCK(inp);
4952 				}
4953 				return (laddr->ifa);
4954 				break;
4955 			}
4956 		}
4957 #endif
4958 	}
4959 	if (holds_lock == 0) {
4960 		SCTP_INP_RUNLOCK(inp);
4961 	}
4962 	return (NULL);
4963 }
4964 
4965 uint32_t
4966 sctp_get_ifa_hash_val(struct sockaddr *addr)
4967 {
4968 	switch (addr->sa_family) {
4969 #ifdef INET
4970 	case AF_INET:
4971 		{
4972 			struct sockaddr_in *sin;
4973 
4974 			sin = (struct sockaddr_in *)addr;
4975 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4976 		}
4977 #endif
4978 #ifdef INET6
4979 	case AF_INET6:
4980 		{
4981 			struct sockaddr_in6 *sin6;
4982 			uint32_t hash_of_addr;
4983 
4984 			sin6 = (struct sockaddr_in6 *)addr;
4985 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4986 			    sin6->sin6_addr.s6_addr32[1] +
4987 			    sin6->sin6_addr.s6_addr32[2] +
4988 			    sin6->sin6_addr.s6_addr32[3]);
4989 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4990 			return (hash_of_addr);
4991 		}
4992 #endif
4993 	default:
4994 		break;
4995 	}
4996 	return (0);
4997 }
4998 
4999 struct sctp_ifa *
5000 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5001 {
5002 	struct sctp_ifa *sctp_ifap;
5003 	struct sctp_vrf *vrf;
5004 	struct sctp_ifalist *hash_head;
5005 	uint32_t hash_of_addr;
5006 
5007 	if (holds_lock == 0)
5008 		SCTP_IPI_ADDR_RLOCK();
5009 
5010 	vrf = sctp_find_vrf(vrf_id);
5011 	if (vrf == NULL) {
5012 stage_right:
5013 		if (holds_lock == 0)
5014 			SCTP_IPI_ADDR_RUNLOCK();
5015 		return (NULL);
5016 	}
5017 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5018 
5019 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5020 	if (hash_head == NULL) {
5021 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5022 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5023 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5024 		sctp_print_address(addr);
5025 		SCTP_PRINTF("No such bucket for address\n");
5026 		if (holds_lock == 0)
5027 			SCTP_IPI_ADDR_RUNLOCK();
5028 
5029 		return (NULL);
5030 	}
5031 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5032 		if (sctp_ifap == NULL) {
5033 #ifdef INVARIANTS
5034 			panic("Huh LIST_FOREACH corrupt");
5035 			goto stage_right;
5036 #else
5037 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5038 			goto stage_right;
5039 #endif
5040 		}
5041 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5042 			continue;
5043 #ifdef INET
5044 		if (addr->sa_family == AF_INET) {
5045 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5046 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5047 				/* found him. */
5048 				if (holds_lock == 0)
5049 					SCTP_IPI_ADDR_RUNLOCK();
5050 				return (sctp_ifap);
5051 				break;
5052 			}
5053 		}
5054 #endif
5055 #ifdef INET6
5056 		if (addr->sa_family == AF_INET6) {
5057 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5058 			    &sctp_ifap->address.sin6)) {
5059 				/* found him. */
5060 				if (holds_lock == 0)
5061 					SCTP_IPI_ADDR_RUNLOCK();
5062 				return (sctp_ifap);
5063 				break;
5064 			}
5065 		}
5066 #endif
5067 	}
5068 	if (holds_lock == 0)
5069 		SCTP_IPI_ADDR_RUNLOCK();
5070 	return (NULL);
5071 }
5072 
5073 static void
5074 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5075     uint32_t rwnd_req)
5076 {
5077 	/* User pulled some data, do we need a rwnd update? */
5078 	int r_unlocked = 0;
5079 	uint32_t dif, rwnd;
5080 	struct socket *so = NULL;
5081 
5082 	if (stcb == NULL)
5083 		return;
5084 
5085 	atomic_add_int(&stcb->asoc.refcnt, 1);
5086 
5087 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5088 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5089 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5090 		/* Pre-check If we are freeing no update */
5091 		goto no_lock;
5092 	}
5093 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5094 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5095 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5096 		goto out;
5097 	}
5098 	so = stcb->sctp_socket;
5099 	if (so == NULL) {
5100 		goto out;
5101 	}
5102 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5103 	/* Have you have freed enough to look */
5104 	*freed_so_far = 0;
5105 	/* Yep, its worth a look and the lock overhead */
5106 
5107 	/* Figure out what the rwnd would be */
5108 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5109 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5110 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5111 	} else {
5112 		dif = 0;
5113 	}
5114 	if (dif >= rwnd_req) {
5115 		if (hold_rlock) {
5116 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5117 			r_unlocked = 1;
5118 		}
5119 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5120 			/*
5121 			 * One last check before we allow the guy possibly
5122 			 * to get in. There is a race, where the guy has not
5123 			 * reached the gate. In that case
5124 			 */
5125 			goto out;
5126 		}
5127 		SCTP_TCB_LOCK(stcb);
5128 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5129 			/* No reports here */
5130 			SCTP_TCB_UNLOCK(stcb);
5131 			goto out;
5132 		}
5133 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5134 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5135 
5136 		sctp_chunk_output(stcb->sctp_ep, stcb,
5137 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5138 		/* make sure no timer is running */
5139 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5140 		SCTP_TCB_UNLOCK(stcb);
5141 	} else {
5142 		/* Update how much we have pending */
5143 		stcb->freed_by_sorcv_sincelast = dif;
5144 	}
5145 out:
5146 	if (so && r_unlocked && hold_rlock) {
5147 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5148 	}
5149 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5150 no_lock:
5151 	atomic_add_int(&stcb->asoc.refcnt, -1);
5152 	return;
5153 }
5154 
5155 int
5156 sctp_sorecvmsg(struct socket *so,
5157     struct uio *uio,
5158     struct mbuf **mp,
5159     struct sockaddr *from,
5160     int fromlen,
5161     int *msg_flags,
5162     struct sctp_sndrcvinfo *sinfo,
5163     int filling_sinfo)
5164 {
5165 	/*
5166 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5167 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5168 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5169 	 * On the way out we may send out any combination of:
5170 	 * MSG_NOTIFICATION MSG_EOR
5171 	 *
5172 	 */
5173 	struct sctp_inpcb *inp = NULL;
5174 	int my_len = 0;
5175 	int cp_len = 0, error = 0;
5176 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5177 	struct mbuf *m = NULL;
5178 	struct sctp_tcb *stcb = NULL;
5179 	int wakeup_read_socket = 0;
5180 	int freecnt_applied = 0;
5181 	int out_flags = 0, in_flags = 0;
5182 	int block_allowed = 1;
5183 	uint32_t freed_so_far = 0;
5184 	uint32_t copied_so_far = 0;
5185 	int in_eeor_mode = 0;
5186 	int no_rcv_needed = 0;
5187 	uint32_t rwnd_req = 0;
5188 	int hold_sblock = 0;
5189 	int hold_rlock = 0;
5190 	int slen = 0;
5191 	uint32_t held_length = 0;
5192 	int sockbuf_lock = 0;
5193 
5194 	if (uio == NULL) {
5195 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5196 		return (EINVAL);
5197 	}
5198 	if (msg_flags) {
5199 		in_flags = *msg_flags;
5200 		if (in_flags & MSG_PEEK)
5201 			SCTP_STAT_INCR(sctps_read_peeks);
5202 	} else {
5203 		in_flags = 0;
5204 	}
5205 	slen = uio->uio_resid;
5206 
5207 	/* Pull in and set up our int flags */
5208 	if (in_flags & MSG_OOB) {
5209 		/* Out of band's NOT supported */
5210 		return (EOPNOTSUPP);
5211 	}
5212 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5213 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5214 		return (EINVAL);
5215 	}
5216 	if ((in_flags & (MSG_DONTWAIT
5217 	    | MSG_NBIO
5218 	    )) ||
5219 	    SCTP_SO_IS_NBIO(so)) {
5220 		block_allowed = 0;
5221 	}
5222 	/* setup the endpoint */
5223 	inp = (struct sctp_inpcb *)so->so_pcb;
5224 	if (inp == NULL) {
5225 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5226 		return (EFAULT);
5227 	}
5228 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5229 	/* Must be at least a MTU's worth */
5230 	if (rwnd_req < SCTP_MIN_RWND)
5231 		rwnd_req = SCTP_MIN_RWND;
5232 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5233 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5234 		sctp_misc_ints(SCTP_SORECV_ENTER,
5235 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5236 	}
5237 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5238 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5239 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5240 	}
5241 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5242 	if (error) {
5243 		goto release_unlocked;
5244 	}
5245 	sockbuf_lock = 1;
5246 restart:
5247 
5248 
5249 restart_nosblocks:
5250 	if (hold_sblock == 0) {
5251 		SOCKBUF_LOCK(&so->so_rcv);
5252 		hold_sblock = 1;
5253 	}
5254 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5255 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5256 		goto out;
5257 	}
5258 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5259 		if (so->so_error) {
5260 			error = so->so_error;
5261 			if ((in_flags & MSG_PEEK) == 0)
5262 				so->so_error = 0;
5263 			goto out;
5264 		} else {
5265 			if (so->so_rcv.sb_cc == 0) {
5266 				/* indicate EOF */
5267 				error = 0;
5268 				goto out;
5269 			}
5270 		}
5271 	}
5272 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5273 		/* we need to wait for data */
5274 		if ((so->so_rcv.sb_cc == 0) &&
5275 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5276 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5277 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5278 				/*
5279 				 * For active open side clear flags for
5280 				 * re-use passive open is blocked by
5281 				 * connect.
5282 				 */
5283 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5284 					/*
5285 					 * You were aborted, passive side
5286 					 * always hits here
5287 					 */
5288 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5289 					error = ECONNRESET;
5290 				}
5291 				so->so_state &= ~(SS_ISCONNECTING |
5292 				    SS_ISDISCONNECTING |
5293 				    SS_ISCONFIRMING |
5294 				    SS_ISCONNECTED);
5295 				if (error == 0) {
5296 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5297 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5298 						error = ENOTCONN;
5299 					}
5300 				}
5301 				goto out;
5302 			}
5303 		}
5304 		error = sbwait(&so->so_rcv);
5305 		if (error) {
5306 			goto out;
5307 		}
5308 		held_length = 0;
5309 		goto restart_nosblocks;
5310 	} else if (so->so_rcv.sb_cc == 0) {
5311 		if (so->so_error) {
5312 			error = so->so_error;
5313 			if ((in_flags & MSG_PEEK) == 0)
5314 				so->so_error = 0;
5315 		} else {
5316 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5317 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5318 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5319 					/*
5320 					 * For active open side clear flags
5321 					 * for re-use passive open is
5322 					 * blocked by connect.
5323 					 */
5324 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5325 						/*
5326 						 * You were aborted, passive
5327 						 * side always hits here
5328 						 */
5329 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5330 						error = ECONNRESET;
5331 					}
5332 					so->so_state &= ~(SS_ISCONNECTING |
5333 					    SS_ISDISCONNECTING |
5334 					    SS_ISCONFIRMING |
5335 					    SS_ISCONNECTED);
5336 					if (error == 0) {
5337 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5338 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5339 							error = ENOTCONN;
5340 						}
5341 					}
5342 					goto out;
5343 				}
5344 			}
5345 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5346 			error = EWOULDBLOCK;
5347 		}
5348 		goto out;
5349 	}
5350 	if (hold_sblock == 1) {
5351 		SOCKBUF_UNLOCK(&so->so_rcv);
5352 		hold_sblock = 0;
5353 	}
5354 	/* we possibly have data we can read */
5355 	/* sa_ignore FREED_MEMORY */
5356 	control = TAILQ_FIRST(&inp->read_queue);
5357 	if (control == NULL) {
5358 		/*
5359 		 * This could be happening since the appender did the
5360 		 * increment but as not yet did the tailq insert onto the
5361 		 * read_queue
5362 		 */
5363 		if (hold_rlock == 0) {
5364 			SCTP_INP_READ_LOCK(inp);
5365 		}
5366 		control = TAILQ_FIRST(&inp->read_queue);
5367 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5368 #ifdef INVARIANTS
5369 			panic("Huh, its non zero and nothing on control?");
5370 #endif
5371 			so->so_rcv.sb_cc = 0;
5372 		}
5373 		SCTP_INP_READ_UNLOCK(inp);
5374 		hold_rlock = 0;
5375 		goto restart;
5376 	}
5377 	if ((control->length == 0) &&
5378 	    (control->do_not_ref_stcb)) {
5379 		/*
5380 		 * Clean up code for freeing assoc that left behind a
5381 		 * pdapi.. maybe a peer in EEOR that just closed after
5382 		 * sending and never indicated a EOR.
5383 		 */
5384 		if (hold_rlock == 0) {
5385 			hold_rlock = 1;
5386 			SCTP_INP_READ_LOCK(inp);
5387 		}
5388 		control->held_length = 0;
5389 		if (control->data) {
5390 			/* Hmm there is data here .. fix */
5391 			struct mbuf *m_tmp;
5392 			int cnt = 0;
5393 
5394 			m_tmp = control->data;
5395 			while (m_tmp) {
5396 				cnt += SCTP_BUF_LEN(m_tmp);
5397 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5398 					control->tail_mbuf = m_tmp;
5399 					control->end_added = 1;
5400 				}
5401 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5402 			}
5403 			control->length = cnt;
5404 		} else {
5405 			/* remove it */
5406 			TAILQ_REMOVE(&inp->read_queue, control, next);
5407 			/* Add back any hiddend data */
5408 			sctp_free_remote_addr(control->whoFrom);
5409 			sctp_free_a_readq(stcb, control);
5410 		}
5411 		if (hold_rlock) {
5412 			hold_rlock = 0;
5413 			SCTP_INP_READ_UNLOCK(inp);
5414 		}
5415 		goto restart;
5416 	}
5417 	if ((control->length == 0) &&
5418 	    (control->end_added == 1)) {
5419 		/*
5420 		 * Do we also need to check for (control->pdapi_aborted ==
5421 		 * 1)?
5422 		 */
5423 		if (hold_rlock == 0) {
5424 			hold_rlock = 1;
5425 			SCTP_INP_READ_LOCK(inp);
5426 		}
5427 		TAILQ_REMOVE(&inp->read_queue, control, next);
5428 		if (control->data) {
5429 #ifdef INVARIANTS
5430 			panic("control->data not null but control->length == 0");
5431 #else
5432 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5433 			sctp_m_freem(control->data);
5434 			control->data = NULL;
5435 #endif
5436 		}
5437 		if (control->aux_data) {
5438 			sctp_m_free(control->aux_data);
5439 			control->aux_data = NULL;
5440 		}
5441 		sctp_free_remote_addr(control->whoFrom);
5442 		sctp_free_a_readq(stcb, control);
5443 		if (hold_rlock) {
5444 			hold_rlock = 0;
5445 			SCTP_INP_READ_UNLOCK(inp);
5446 		}
5447 		goto restart;
5448 	}
5449 	if (control->length == 0) {
5450 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5451 		    (filling_sinfo)) {
5452 			/* find a more suitable one then this */
5453 			ctl = TAILQ_NEXT(control, next);
5454 			while (ctl) {
5455 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5456 				    (ctl->some_taken ||
5457 				    (ctl->spec_flags & M_NOTIFICATION) ||
5458 				    ((ctl->do_not_ref_stcb == 0) &&
5459 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5460 				    ) {
5461 					/*-
5462 					 * If we have a different TCB next, and there is data
5463 					 * present. If we have already taken some (pdapi), OR we can
5464 					 * ref the tcb and no delivery as started on this stream, we
5465 					 * take it. Note we allow a notification on a different
5466 					 * assoc to be delivered..
5467 					 */
5468 					control = ctl;
5469 					goto found_one;
5470 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5471 					    (ctl->length) &&
5472 					    ((ctl->some_taken) ||
5473 					    ((ctl->do_not_ref_stcb == 0) &&
5474 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5475 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5476 					/*-
5477 					 * If we have the same tcb, and there is data present, and we
5478 					 * have the strm interleave feature present. Then if we have
5479 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5480 					 * not started a delivery for this stream, we can take it.
5481 					 * Note we do NOT allow a notificaiton on the same assoc to
5482 					 * be delivered.
5483 					 */
5484 					control = ctl;
5485 					goto found_one;
5486 				}
5487 				ctl = TAILQ_NEXT(ctl, next);
5488 			}
5489 		}
5490 		/*
5491 		 * if we reach here, not suitable replacement is available
5492 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5493 		 * into the our held count, and its time to sleep again.
5494 		 */
5495 		held_length = so->so_rcv.sb_cc;
5496 		control->held_length = so->so_rcv.sb_cc;
5497 		goto restart;
5498 	}
5499 	/* Clear the held length since there is something to read */
5500 	control->held_length = 0;
5501 	if (hold_rlock) {
5502 		SCTP_INP_READ_UNLOCK(inp);
5503 		hold_rlock = 0;
5504 	}
5505 found_one:
5506 	/*
5507 	 * If we reach here, control has a some data for us to read off.
5508 	 * Note that stcb COULD be NULL.
5509 	 */
5510 	control->some_taken++;
5511 	if (hold_sblock) {
5512 		SOCKBUF_UNLOCK(&so->so_rcv);
5513 		hold_sblock = 0;
5514 	}
5515 	stcb = control->stcb;
5516 	if (stcb) {
5517 		if ((control->do_not_ref_stcb == 0) &&
5518 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5519 			if (freecnt_applied == 0)
5520 				stcb = NULL;
5521 		} else if (control->do_not_ref_stcb == 0) {
5522 			/* you can't free it on me please */
5523 			/*
5524 			 * The lock on the socket buffer protects us so the
5525 			 * free code will stop. But since we used the
5526 			 * socketbuf lock and the sender uses the tcb_lock
5527 			 * to increment, we need to use the atomic add to
5528 			 * the refcnt
5529 			 */
5530 			if (freecnt_applied) {
5531 #ifdef INVARIANTS
5532 				panic("refcnt already incremented");
5533 #else
5534 				SCTP_PRINTF("refcnt already incremented?\n");
5535 #endif
5536 			} else {
5537 				atomic_add_int(&stcb->asoc.refcnt, 1);
5538 				freecnt_applied = 1;
5539 			}
5540 			/*
5541 			 * Setup to remember how much we have not yet told
5542 			 * the peer our rwnd has opened up. Note we grab the
5543 			 * value from the tcb from last time. Note too that
5544 			 * sack sending clears this when a sack is sent,
5545 			 * which is fine. Once we hit the rwnd_req, we then
5546 			 * will go to the sctp_user_rcvd() that will not
5547 			 * lock until it KNOWs it MUST send a WUP-SACK.
5548 			 */
5549 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5550 			stcb->freed_by_sorcv_sincelast = 0;
5551 		}
5552 	}
5553 	if (stcb &&
5554 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5555 	    control->do_not_ref_stcb == 0) {
5556 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5557 	}
5558 	/* First lets get off the sinfo and sockaddr info */
5559 	if ((sinfo) && filling_sinfo) {
5560 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5561 		nxt = TAILQ_NEXT(control, next);
5562 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5563 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5564 			struct sctp_extrcvinfo *s_extra;
5565 
5566 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5567 			if ((nxt) &&
5568 			    (nxt->length)) {
5569 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5570 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5571 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5572 				}
5573 				if (nxt->spec_flags & M_NOTIFICATION) {
5574 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5575 				}
5576 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5577 				s_extra->sreinfo_next_length = nxt->length;
5578 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5579 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5580 				if (nxt->tail_mbuf != NULL) {
5581 					if (nxt->end_added) {
5582 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5583 					}
5584 				}
5585 			} else {
5586 				/*
5587 				 * we explicitly 0 this, since the memcpy
5588 				 * got some other things beyond the older
5589 				 * sinfo_ that is on the control's structure
5590 				 * :-D
5591 				 */
5592 				nxt = NULL;
5593 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5594 				s_extra->sreinfo_next_aid = 0;
5595 				s_extra->sreinfo_next_length = 0;
5596 				s_extra->sreinfo_next_ppid = 0;
5597 				s_extra->sreinfo_next_stream = 0;
5598 			}
5599 		}
5600 		/*
5601 		 * update off the real current cum-ack, if we have an stcb.
5602 		 */
5603 		if ((control->do_not_ref_stcb == 0) && stcb)
5604 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5605 		/*
5606 		 * mask off the high bits, we keep the actual chunk bits in
5607 		 * there.
5608 		 */
5609 		sinfo->sinfo_flags &= 0x00ff;
5610 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5611 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5612 		}
5613 	}
5614 #ifdef SCTP_ASOCLOG_OF_TSNS
5615 	{
5616 		int index, newindex;
5617 		struct sctp_pcbtsn_rlog *entry;
5618 
5619 		do {
5620 			index = inp->readlog_index;
5621 			newindex = index + 1;
5622 			if (newindex >= SCTP_READ_LOG_SIZE) {
5623 				newindex = 0;
5624 			}
5625 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5626 		entry = &inp->readlog[index];
5627 		entry->vtag = control->sinfo_assoc_id;
5628 		entry->strm = control->sinfo_stream;
5629 		entry->seq = control->sinfo_ssn;
5630 		entry->sz = control->length;
5631 		entry->flgs = control->sinfo_flags;
5632 	}
5633 #endif
5634 	if (fromlen && from) {
5635 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5636 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5637 #ifdef INET6
5638 		case AF_INET6:
5639 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5640 			break;
5641 #endif
5642 #ifdef INET
5643 		case AF_INET:
5644 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5645 			break;
5646 #endif
5647 		default:
5648 			break;
5649 		}
5650 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5651 
5652 #if defined(INET) && defined(INET6)
5653 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5654 		    (from->sa_family == AF_INET) &&
5655 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5656 			struct sockaddr_in *sin;
5657 			struct sockaddr_in6 sin6;
5658 
5659 			sin = (struct sockaddr_in *)from;
5660 			bzero(&sin6, sizeof(sin6));
5661 			sin6.sin6_family = AF_INET6;
5662 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5663 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5664 			bcopy(&sin->sin_addr,
5665 			    &sin6.sin6_addr.s6_addr32[3],
5666 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5667 			sin6.sin6_port = sin->sin_port;
5668 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5669 		}
5670 #endif
5671 #ifdef INET6
5672 		{
5673 			struct sockaddr_in6 lsa6, *from6;
5674 
5675 			from6 = (struct sockaddr_in6 *)from;
5676 			sctp_recover_scope_mac(from6, (&lsa6));
5677 		}
5678 #endif
5679 	}
5680 	/* now copy out what data we can */
5681 	if (mp == NULL) {
5682 		/* copy out each mbuf in the chain up to length */
5683 get_more_data:
5684 		m = control->data;
5685 		while (m) {
5686 			/* Move out all we can */
5687 			cp_len = (int)uio->uio_resid;
5688 			my_len = (int)SCTP_BUF_LEN(m);
5689 			if (cp_len > my_len) {
5690 				/* not enough in this buf */
5691 				cp_len = my_len;
5692 			}
5693 			if (hold_rlock) {
5694 				SCTP_INP_READ_UNLOCK(inp);
5695 				hold_rlock = 0;
5696 			}
5697 			if (cp_len > 0)
5698 				error = uiomove(mtod(m, char *), cp_len, uio);
5699 			/* re-read */
5700 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5701 				goto release;
5702 			}
5703 			if ((control->do_not_ref_stcb == 0) && stcb &&
5704 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5705 				no_rcv_needed = 1;
5706 			}
5707 			if (error) {
5708 				/* error we are out of here */
5709 				goto release;
5710 			}
5711 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5712 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5713 			    ((control->end_added == 0) ||
5714 			    (control->end_added &&
5715 			    (TAILQ_NEXT(control, next) == NULL)))
5716 			    ) {
5717 				SCTP_INP_READ_LOCK(inp);
5718 				hold_rlock = 1;
5719 			}
5720 			if (cp_len == SCTP_BUF_LEN(m)) {
5721 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5722 				    (control->end_added)) {
5723 					out_flags |= MSG_EOR;
5724 					if ((control->do_not_ref_stcb == 0) &&
5725 					    (control->stcb != NULL) &&
5726 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5727 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5728 				}
5729 				if (control->spec_flags & M_NOTIFICATION) {
5730 					out_flags |= MSG_NOTIFICATION;
5731 				}
5732 				/* we ate up the mbuf */
5733 				if (in_flags & MSG_PEEK) {
5734 					/* just looking */
5735 					m = SCTP_BUF_NEXT(m);
5736 					copied_so_far += cp_len;
5737 				} else {
5738 					/* dispose of the mbuf */
5739 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5740 						sctp_sblog(&so->so_rcv,
5741 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5742 					}
5743 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5744 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5745 						sctp_sblog(&so->so_rcv,
5746 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5747 					}
5748 					copied_so_far += cp_len;
5749 					freed_so_far += cp_len;
5750 					freed_so_far += MSIZE;
5751 					atomic_subtract_int(&control->length, cp_len);
5752 					control->data = sctp_m_free(m);
5753 					m = control->data;
5754 					/*
5755 					 * been through it all, must hold sb
5756 					 * lock ok to null tail
5757 					 */
5758 					if (control->data == NULL) {
5759 #ifdef INVARIANTS
5760 						if ((control->end_added == 0) ||
5761 						    (TAILQ_NEXT(control, next) == NULL)) {
5762 							/*
5763 							 * If the end is not
5764 							 * added, OR the
5765 							 * next is NOT null
5766 							 * we MUST have the
5767 							 * lock.
5768 							 */
5769 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5770 								panic("Hmm we don't own the lock?");
5771 							}
5772 						}
5773 #endif
5774 						control->tail_mbuf = NULL;
5775 #ifdef INVARIANTS
5776 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5777 							panic("end_added, nothing left and no MSG_EOR");
5778 						}
5779 #endif
5780 					}
5781 				}
5782 			} else {
5783 				/* Do we need to trim the mbuf? */
5784 				if (control->spec_flags & M_NOTIFICATION) {
5785 					out_flags |= MSG_NOTIFICATION;
5786 				}
5787 				if ((in_flags & MSG_PEEK) == 0) {
5788 					SCTP_BUF_RESV_UF(m, cp_len);
5789 					SCTP_BUF_LEN(m) -= cp_len;
5790 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5791 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5792 					}
5793 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5794 					if ((control->do_not_ref_stcb == 0) &&
5795 					    stcb) {
5796 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5797 					}
5798 					copied_so_far += cp_len;
5799 					freed_so_far += cp_len;
5800 					freed_so_far += MSIZE;
5801 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5802 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5803 						    SCTP_LOG_SBRESULT, 0);
5804 					}
5805 					atomic_subtract_int(&control->length, cp_len);
5806 				} else {
5807 					copied_so_far += cp_len;
5808 				}
5809 			}
5810 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5811 				break;
5812 			}
5813 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5814 			    (control->do_not_ref_stcb == 0) &&
5815 			    (freed_so_far >= rwnd_req)) {
5816 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5817 			}
5818 		}		/* end while(m) */
5819 		/*
5820 		 * At this point we have looked at it all and we either have
5821 		 * a MSG_EOR/or read all the user wants... <OR>
5822 		 * control->length == 0.
5823 		 */
5824 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5825 			/* we are done with this control */
5826 			if (control->length == 0) {
5827 				if (control->data) {
5828 #ifdef INVARIANTS
5829 					panic("control->data not null at read eor?");
5830 #else
5831 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5832 					sctp_m_freem(control->data);
5833 					control->data = NULL;
5834 #endif
5835 				}
5836 		done_with_control:
5837 				if (TAILQ_NEXT(control, next) == NULL) {
5838 					/*
5839 					 * If we don't have a next we need a
5840 					 * lock, if there is a next
5841 					 * interrupt is filling ahead of us
5842 					 * and we don't need a lock to
5843 					 * remove this guy (which is the
5844 					 * head of the queue).
5845 					 */
5846 					if (hold_rlock == 0) {
5847 						SCTP_INP_READ_LOCK(inp);
5848 						hold_rlock = 1;
5849 					}
5850 				}
5851 				TAILQ_REMOVE(&inp->read_queue, control, next);
5852 				/* Add back any hiddend data */
5853 				if (control->held_length) {
5854 					held_length = 0;
5855 					control->held_length = 0;
5856 					wakeup_read_socket = 1;
5857 				}
5858 				if (control->aux_data) {
5859 					sctp_m_free(control->aux_data);
5860 					control->aux_data = NULL;
5861 				}
5862 				no_rcv_needed = control->do_not_ref_stcb;
5863 				sctp_free_remote_addr(control->whoFrom);
5864 				control->data = NULL;
5865 				sctp_free_a_readq(stcb, control);
5866 				control = NULL;
5867 				if ((freed_so_far >= rwnd_req) &&
5868 				    (no_rcv_needed == 0))
5869 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5870 
5871 			} else {
5872 				/*
5873 				 * The user did not read all of this
5874 				 * message, turn off the returned MSG_EOR
5875 				 * since we are leaving more behind on the
5876 				 * control to read.
5877 				 */
5878 #ifdef INVARIANTS
5879 				if (control->end_added &&
5880 				    (control->data == NULL) &&
5881 				    (control->tail_mbuf == NULL)) {
5882 					panic("Gak, control->length is corrupt?");
5883 				}
5884 #endif
5885 				no_rcv_needed = control->do_not_ref_stcb;
5886 				out_flags &= ~MSG_EOR;
5887 			}
5888 		}
5889 		if (out_flags & MSG_EOR) {
5890 			goto release;
5891 		}
5892 		if ((uio->uio_resid == 0) ||
5893 		    ((in_eeor_mode) &&
5894 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5895 			goto release;
5896 		}
5897 		/*
5898 		 * If I hit here the receiver wants more and this message is
5899 		 * NOT done (pd-api). So two questions. Can we block? if not
5900 		 * we are done. Did the user NOT set MSG_WAITALL?
5901 		 */
5902 		if (block_allowed == 0) {
5903 			goto release;
5904 		}
5905 		/*
5906 		 * We need to wait for more data a few things: - We don't
5907 		 * sbunlock() so we don't get someone else reading. - We
5908 		 * must be sure to account for the case where what is added
5909 		 * is NOT to our control when we wakeup.
5910 		 */
5911 
5912 		/*
5913 		 * Do we need to tell the transport a rwnd update might be
5914 		 * needed before we go to sleep?
5915 		 */
5916 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5917 		    ((freed_so_far >= rwnd_req) &&
5918 		    (control->do_not_ref_stcb == 0) &&
5919 		    (no_rcv_needed == 0))) {
5920 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5921 		}
5922 wait_some_more:
5923 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5924 			goto release;
5925 		}
5926 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5927 			goto release;
5928 
5929 		if (hold_rlock == 1) {
5930 			SCTP_INP_READ_UNLOCK(inp);
5931 			hold_rlock = 0;
5932 		}
5933 		if (hold_sblock == 0) {
5934 			SOCKBUF_LOCK(&so->so_rcv);
5935 			hold_sblock = 1;
5936 		}
5937 		if ((copied_so_far) && (control->length == 0) &&
5938 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5939 			goto release;
5940 		}
5941 		if (so->so_rcv.sb_cc <= control->held_length) {
5942 			error = sbwait(&so->so_rcv);
5943 			if (error) {
5944 				goto release;
5945 			}
5946 			control->held_length = 0;
5947 		}
5948 		if (hold_sblock) {
5949 			SOCKBUF_UNLOCK(&so->so_rcv);
5950 			hold_sblock = 0;
5951 		}
5952 		if (control->length == 0) {
5953 			/* still nothing here */
5954 			if (control->end_added == 1) {
5955 				/* he aborted, or is done i.e.did a shutdown */
5956 				out_flags |= MSG_EOR;
5957 				if (control->pdapi_aborted) {
5958 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5959 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5960 
5961 					out_flags |= MSG_TRUNC;
5962 				} else {
5963 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5964 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5965 				}
5966 				goto done_with_control;
5967 			}
5968 			if (so->so_rcv.sb_cc > held_length) {
5969 				control->held_length = so->so_rcv.sb_cc;
5970 				held_length = 0;
5971 			}
5972 			goto wait_some_more;
5973 		} else if (control->data == NULL) {
5974 			/*
5975 			 * we must re-sync since data is probably being
5976 			 * added
5977 			 */
5978 			SCTP_INP_READ_LOCK(inp);
5979 			if ((control->length > 0) && (control->data == NULL)) {
5980 				/*
5981 				 * big trouble.. we have the lock and its
5982 				 * corrupt?
5983 				 */
5984 #ifdef INVARIANTS
5985 				panic("Impossible data==NULL length !=0");
5986 #endif
5987 				out_flags |= MSG_EOR;
5988 				out_flags |= MSG_TRUNC;
5989 				control->length = 0;
5990 				SCTP_INP_READ_UNLOCK(inp);
5991 				goto done_with_control;
5992 			}
5993 			SCTP_INP_READ_UNLOCK(inp);
5994 			/* We will fall around to get more data */
5995 		}
5996 		goto get_more_data;
5997 	} else {
5998 		/*-
5999 		 * Give caller back the mbuf chain,
6000 		 * store in uio_resid the length
6001 		 */
6002 		wakeup_read_socket = 0;
6003 		if ((control->end_added == 0) ||
6004 		    (TAILQ_NEXT(control, next) == NULL)) {
6005 			/* Need to get rlock */
6006 			if (hold_rlock == 0) {
6007 				SCTP_INP_READ_LOCK(inp);
6008 				hold_rlock = 1;
6009 			}
6010 		}
6011 		if (control->end_added) {
6012 			out_flags |= MSG_EOR;
6013 			if ((control->do_not_ref_stcb == 0) &&
6014 			    (control->stcb != NULL) &&
6015 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6016 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6017 		}
6018 		if (control->spec_flags & M_NOTIFICATION) {
6019 			out_flags |= MSG_NOTIFICATION;
6020 		}
6021 		uio->uio_resid = control->length;
6022 		*mp = control->data;
6023 		m = control->data;
6024 		while (m) {
6025 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6026 				sctp_sblog(&so->so_rcv,
6027 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6028 			}
6029 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6030 			freed_so_far += SCTP_BUF_LEN(m);
6031 			freed_so_far += MSIZE;
6032 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6033 				sctp_sblog(&so->so_rcv,
6034 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6035 			}
6036 			m = SCTP_BUF_NEXT(m);
6037 		}
6038 		control->data = control->tail_mbuf = NULL;
6039 		control->length = 0;
6040 		if (out_flags & MSG_EOR) {
6041 			/* Done with this control */
6042 			goto done_with_control;
6043 		}
6044 	}
6045 release:
6046 	if (hold_rlock == 1) {
6047 		SCTP_INP_READ_UNLOCK(inp);
6048 		hold_rlock = 0;
6049 	}
6050 	if (hold_sblock == 1) {
6051 		SOCKBUF_UNLOCK(&so->so_rcv);
6052 		hold_sblock = 0;
6053 	}
6054 	sbunlock(&so->so_rcv);
6055 	sockbuf_lock = 0;
6056 
6057 release_unlocked:
6058 	if (hold_sblock) {
6059 		SOCKBUF_UNLOCK(&so->so_rcv);
6060 		hold_sblock = 0;
6061 	}
6062 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6063 		if ((freed_so_far >= rwnd_req) &&
6064 		    (control && (control->do_not_ref_stcb == 0)) &&
6065 		    (no_rcv_needed == 0))
6066 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6067 	}
6068 out:
6069 	if (msg_flags) {
6070 		*msg_flags = out_flags;
6071 	}
6072 	if (((out_flags & MSG_EOR) == 0) &&
6073 	    ((in_flags & MSG_PEEK) == 0) &&
6074 	    (sinfo) &&
6075 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6076 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6077 		struct sctp_extrcvinfo *s_extra;
6078 
6079 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6080 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6081 	}
6082 	if (hold_rlock == 1) {
6083 		SCTP_INP_READ_UNLOCK(inp);
6084 	}
6085 	if (hold_sblock) {
6086 		SOCKBUF_UNLOCK(&so->so_rcv);
6087 	}
6088 	if (sockbuf_lock) {
6089 		sbunlock(&so->so_rcv);
6090 	}
6091 	if (freecnt_applied) {
6092 		/*
6093 		 * The lock on the socket buffer protects us so the free
6094 		 * code will stop. But since we used the socketbuf lock and
6095 		 * the sender uses the tcb_lock to increment, we need to use
6096 		 * the atomic add to the refcnt.
6097 		 */
6098 		if (stcb == NULL) {
6099 #ifdef INVARIANTS
6100 			panic("stcb for refcnt has gone NULL?");
6101 			goto stage_left;
6102 #else
6103 			goto stage_left;
6104 #endif
6105 		}
6106 		atomic_add_int(&stcb->asoc.refcnt, -1);
6107 		/* Save the value back for next time */
6108 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6109 	}
6110 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6111 		if (stcb) {
6112 			sctp_misc_ints(SCTP_SORECV_DONE,
6113 			    freed_so_far,
6114 			    ((uio) ? (slen - uio->uio_resid) : slen),
6115 			    stcb->asoc.my_rwnd,
6116 			    so->so_rcv.sb_cc);
6117 		} else {
6118 			sctp_misc_ints(SCTP_SORECV_DONE,
6119 			    freed_so_far,
6120 			    ((uio) ? (slen - uio->uio_resid) : slen),
6121 			    0,
6122 			    so->so_rcv.sb_cc);
6123 		}
6124 	}
6125 stage_left:
6126 	if (wakeup_read_socket) {
6127 		sctp_sorwakeup(inp, so);
6128 	}
6129 	return (error);
6130 }
6131 
6132 
6133 #ifdef SCTP_MBUF_LOGGING
6134 struct mbuf *
6135 sctp_m_free(struct mbuf *m)
6136 {
6137 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6138 		if (SCTP_BUF_IS_EXTENDED(m)) {
6139 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6140 		}
6141 	}
6142 	return (m_free(m));
6143 }
6144 
6145 void
6146 sctp_m_freem(struct mbuf *mb)
6147 {
6148 	while (mb != NULL)
6149 		mb = sctp_m_free(mb);
6150 }
6151 
6152 #endif
6153 
6154 int
6155 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6156 {
6157 	/*
6158 	 * Given a local address. For all associations that holds the
6159 	 * address, request a peer-set-primary.
6160 	 */
6161 	struct sctp_ifa *ifa;
6162 	struct sctp_laddr *wi;
6163 
6164 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6165 	if (ifa == NULL) {
6166 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6167 		return (EADDRNOTAVAIL);
6168 	}
6169 	/*
6170 	 * Now that we have the ifa we must awaken the iterator with this
6171 	 * message.
6172 	 */
6173 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6174 	if (wi == NULL) {
6175 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6176 		return (ENOMEM);
6177 	}
6178 	/* Now incr the count and int wi structure */
6179 	SCTP_INCR_LADDR_COUNT();
6180 	bzero(wi, sizeof(*wi));
6181 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6182 	wi->ifa = ifa;
6183 	wi->action = SCTP_SET_PRIM_ADDR;
6184 	atomic_add_int(&ifa->refcount, 1);
6185 
6186 	/* Now add it to the work queue */
6187 	SCTP_WQ_ADDR_LOCK();
6188 	/*
6189 	 * Should this really be a tailq? As it is we will process the
6190 	 * newest first :-0
6191 	 */
6192 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6193 	SCTP_WQ_ADDR_UNLOCK();
6194 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6195 	    (struct sctp_inpcb *)NULL,
6196 	    (struct sctp_tcb *)NULL,
6197 	    (struct sctp_nets *)NULL);
6198 	return (0);
6199 }
6200 
6201 
6202 int
6203 sctp_soreceive(struct socket *so,
6204     struct sockaddr **psa,
6205     struct uio *uio,
6206     struct mbuf **mp0,
6207     struct mbuf **controlp,
6208     int *flagsp)
6209 {
6210 	int error, fromlen;
6211 	uint8_t sockbuf[256];
6212 	struct sockaddr *from;
6213 	struct sctp_extrcvinfo sinfo;
6214 	int filling_sinfo = 1;
6215 	struct sctp_inpcb *inp;
6216 
6217 	inp = (struct sctp_inpcb *)so->so_pcb;
6218 	/* pickup the assoc we are reading from */
6219 	if (inp == NULL) {
6220 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6221 		return (EINVAL);
6222 	}
6223 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6224 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6225 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6226 	    (controlp == NULL)) {
6227 		/* user does not want the sndrcv ctl */
6228 		filling_sinfo = 0;
6229 	}
6230 	if (psa) {
6231 		from = (struct sockaddr *)sockbuf;
6232 		fromlen = sizeof(sockbuf);
6233 		from->sa_len = 0;
6234 	} else {
6235 		from = NULL;
6236 		fromlen = 0;
6237 	}
6238 
6239 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6240 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6241 	if ((controlp) && (filling_sinfo)) {
6242 		/* copy back the sinfo in a CMSG format */
6243 		if (filling_sinfo)
6244 			*controlp = sctp_build_ctl_nchunk(inp,
6245 			    (struct sctp_sndrcvinfo *)&sinfo);
6246 		else
6247 			*controlp = NULL;
6248 	}
6249 	if (psa) {
6250 		/* copy back the address info */
6251 		if (from && from->sa_len) {
6252 			*psa = sodupsockaddr(from, M_NOWAIT);
6253 		} else {
6254 			*psa = NULL;
6255 		}
6256 	}
6257 	return (error);
6258 }
6259 
6260 
6261 
6262 
6263 
6264 int
6265 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6266     int totaddr, int *error)
6267 {
6268 	int added = 0;
6269 	int i;
6270 	struct sctp_inpcb *inp;
6271 	struct sockaddr *sa;
6272 	size_t incr = 0;
6273 
6274 #ifdef INET
6275 	struct sockaddr_in *sin;
6276 
6277 #endif
6278 #ifdef INET6
6279 	struct sockaddr_in6 *sin6;
6280 
6281 #endif
6282 
6283 	sa = addr;
6284 	inp = stcb->sctp_ep;
6285 	*error = 0;
6286 	for (i = 0; i < totaddr; i++) {
6287 		switch (sa->sa_family) {
6288 #ifdef INET
6289 		case AF_INET:
6290 			incr = sizeof(struct sockaddr_in);
6291 			sin = (struct sockaddr_in *)sa;
6292 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6293 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6294 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6295 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6296 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6297 				*error = EINVAL;
6298 				goto out_now;
6299 			}
6300 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6301 				/* assoc gone no un-lock */
6302 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6303 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6304 				*error = ENOBUFS;
6305 				goto out_now;
6306 			}
6307 			added++;
6308 			break;
6309 #endif
6310 #ifdef INET6
6311 		case AF_INET6:
6312 			incr = sizeof(struct sockaddr_in6);
6313 			sin6 = (struct sockaddr_in6 *)sa;
6314 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6315 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6316 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6317 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6318 				*error = EINVAL;
6319 				goto out_now;
6320 			}
6321 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6322 				/* assoc gone no un-lock */
6323 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6324 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6325 				*error = ENOBUFS;
6326 				goto out_now;
6327 			}
6328 			added++;
6329 			break;
6330 #endif
6331 		default:
6332 			break;
6333 		}
6334 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6335 	}
6336 out_now:
6337 	return (added);
6338 }
6339 
6340 struct sctp_tcb *
6341 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6342     int *totaddr, int *num_v4, int *num_v6, int *error,
6343     int limit, int *bad_addr)
6344 {
6345 	struct sockaddr *sa;
6346 	struct sctp_tcb *stcb = NULL;
6347 	size_t incr, at, i;
6348 
6349 	at = incr = 0;
6350 	sa = addr;
6351 
6352 	*error = *num_v6 = *num_v4 = 0;
6353 	/* account and validate addresses */
6354 	for (i = 0; i < (size_t)*totaddr; i++) {
6355 		switch (sa->sa_family) {
6356 #ifdef INET
6357 		case AF_INET:
6358 			(*num_v4) += 1;
6359 			incr = sizeof(struct sockaddr_in);
6360 			if (sa->sa_len != incr) {
6361 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6362 				*error = EINVAL;
6363 				*bad_addr = 1;
6364 				return (NULL);
6365 			}
6366 			break;
6367 #endif
6368 #ifdef INET6
6369 		case AF_INET6:
6370 			{
6371 				struct sockaddr_in6 *sin6;
6372 
6373 				sin6 = (struct sockaddr_in6 *)sa;
6374 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6375 					/* Must be non-mapped for connectx */
6376 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6377 					*error = EINVAL;
6378 					*bad_addr = 1;
6379 					return (NULL);
6380 				}
6381 				(*num_v6) += 1;
6382 				incr = sizeof(struct sockaddr_in6);
6383 				if (sa->sa_len != incr) {
6384 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6385 					*error = EINVAL;
6386 					*bad_addr = 1;
6387 					return (NULL);
6388 				}
6389 				break;
6390 			}
6391 #endif
6392 		default:
6393 			*totaddr = i;
6394 			/* we are done */
6395 			break;
6396 		}
6397 		if (i == (size_t)*totaddr) {
6398 			break;
6399 		}
6400 		SCTP_INP_INCR_REF(inp);
6401 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6402 		if (stcb != NULL) {
6403 			/* Already have or am bring up an association */
6404 			return (stcb);
6405 		} else {
6406 			SCTP_INP_DECR_REF(inp);
6407 		}
6408 		if ((at + incr) > (size_t)limit) {
6409 			*totaddr = i;
6410 			break;
6411 		}
6412 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6413 	}
6414 	return ((struct sctp_tcb *)NULL);
6415 }
6416 
6417 /*
6418  * sctp_bindx(ADD) for one address.
6419  * assumes all arguments are valid/checked by caller.
6420  */
6421 void
6422 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6423     struct sockaddr *sa, sctp_assoc_t assoc_id,
6424     uint32_t vrf_id, int *error, void *p)
6425 {
6426 	struct sockaddr *addr_touse;
6427 
6428 #ifdef INET6
6429 	struct sockaddr_in sin;
6430 
6431 #endif
6432 
6433 	/* see if we're bound all already! */
6434 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6435 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6436 		*error = EINVAL;
6437 		return;
6438 	}
6439 	addr_touse = sa;
6440 #ifdef INET6
6441 	if (sa->sa_family == AF_INET6) {
6442 		struct sockaddr_in6 *sin6;
6443 
6444 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6445 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6446 			*error = EINVAL;
6447 			return;
6448 		}
6449 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6450 			/* can only bind v6 on PF_INET6 sockets */
6451 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6452 			*error = EINVAL;
6453 			return;
6454 		}
6455 		sin6 = (struct sockaddr_in6 *)addr_touse;
6456 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6457 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6458 			    SCTP_IPV6_V6ONLY(inp)) {
6459 				/* can't bind v4-mapped on PF_INET sockets */
6460 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6461 				*error = EINVAL;
6462 				return;
6463 			}
6464 			in6_sin6_2_sin(&sin, sin6);
6465 			addr_touse = (struct sockaddr *)&sin;
6466 		}
6467 	}
6468 #endif
6469 #ifdef INET
6470 	if (sa->sa_family == AF_INET) {
6471 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6472 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473 			*error = EINVAL;
6474 			return;
6475 		}
6476 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6477 		    SCTP_IPV6_V6ONLY(inp)) {
6478 			/* can't bind v4 on PF_INET sockets */
6479 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6480 			*error = EINVAL;
6481 			return;
6482 		}
6483 	}
6484 #endif
6485 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6486 		if (p == NULL) {
6487 			/* Can't get proc for Net/Open BSD */
6488 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6489 			*error = EINVAL;
6490 			return;
6491 		}
6492 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6493 		return;
6494 	}
6495 	/*
6496 	 * No locks required here since bind and mgmt_ep_sa all do their own
6497 	 * locking. If we do something for the FIX: below we may need to
6498 	 * lock in that case.
6499 	 */
6500 	if (assoc_id == 0) {
6501 		/* add the address */
6502 		struct sctp_inpcb *lep;
6503 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6504 
6505 		/* validate the incoming port */
6506 		if ((lsin->sin_port != 0) &&
6507 		    (lsin->sin_port != inp->sctp_lport)) {
6508 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6509 			*error = EINVAL;
6510 			return;
6511 		} else {
6512 			/* user specified 0 port, set it to existing port */
6513 			lsin->sin_port = inp->sctp_lport;
6514 		}
6515 
6516 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6517 		if (lep != NULL) {
6518 			/*
6519 			 * We must decrement the refcount since we have the
6520 			 * ep already and are binding. No remove going on
6521 			 * here.
6522 			 */
6523 			SCTP_INP_DECR_REF(lep);
6524 		}
6525 		if (lep == inp) {
6526 			/* already bound to it.. ok */
6527 			return;
6528 		} else if (lep == NULL) {
6529 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6530 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6531 			    SCTP_ADD_IP_ADDRESS,
6532 			    vrf_id, NULL);
6533 		} else {
6534 			*error = EADDRINUSE;
6535 		}
6536 		if (*error)
6537 			return;
6538 	} else {
6539 		/*
6540 		 * FIX: decide whether we allow assoc based bindx
6541 		 */
6542 	}
6543 }
6544 
6545 /*
6546  * sctp_bindx(DELETE) for one address.
6547  * assumes all arguments are valid/checked by caller.
6548  */
6549 void
6550 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6551     struct sockaddr *sa, sctp_assoc_t assoc_id,
6552     uint32_t vrf_id, int *error)
6553 {
6554 	struct sockaddr *addr_touse;
6555 
6556 #ifdef INET6
6557 	struct sockaddr_in sin;
6558 
6559 #endif
6560 
6561 	/* see if we're bound all already! */
6562 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6563 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6564 		*error = EINVAL;
6565 		return;
6566 	}
6567 	addr_touse = sa;
6568 #ifdef INET6
6569 	if (sa->sa_family == AF_INET6) {
6570 		struct sockaddr_in6 *sin6;
6571 
6572 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6573 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6574 			*error = EINVAL;
6575 			return;
6576 		}
6577 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6578 			/* can only bind v6 on PF_INET6 sockets */
6579 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6580 			*error = EINVAL;
6581 			return;
6582 		}
6583 		sin6 = (struct sockaddr_in6 *)addr_touse;
6584 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6585 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6586 			    SCTP_IPV6_V6ONLY(inp)) {
6587 				/* can't bind mapped-v4 on PF_INET sockets */
6588 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6589 				*error = EINVAL;
6590 				return;
6591 			}
6592 			in6_sin6_2_sin(&sin, sin6);
6593 			addr_touse = (struct sockaddr *)&sin;
6594 		}
6595 	}
6596 #endif
6597 #ifdef INET
6598 	if (sa->sa_family == AF_INET) {
6599 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6600 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6601 			*error = EINVAL;
6602 			return;
6603 		}
6604 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6605 		    SCTP_IPV6_V6ONLY(inp)) {
6606 			/* can't bind v4 on PF_INET sockets */
6607 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6608 			*error = EINVAL;
6609 			return;
6610 		}
6611 	}
6612 #endif
6613 	/*
6614 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6615 	 * below is ever changed we may need to lock before calling
6616 	 * association level binding.
6617 	 */
6618 	if (assoc_id == 0) {
6619 		/* delete the address */
6620 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6621 		    SCTP_DEL_IP_ADDRESS,
6622 		    vrf_id, NULL);
6623 	} else {
6624 		/*
6625 		 * FIX: decide whether we allow assoc based bindx
6626 		 */
6627 	}
6628 }
6629 
6630 /*
6631  * returns the valid local address count for an assoc, taking into account
6632  * all scoping rules
6633  */
6634 int
6635 sctp_local_addr_count(struct sctp_tcb *stcb)
6636 {
6637 	int loopback_scope;
6638 
6639 #if defined(INET)
6640 	int ipv4_local_scope, ipv4_addr_legal;
6641 
6642 #endif
6643 #if defined (INET6)
6644 	int local_scope, site_scope, ipv6_addr_legal;
6645 
6646 #endif
6647 	struct sctp_vrf *vrf;
6648 	struct sctp_ifn *sctp_ifn;
6649 	struct sctp_ifa *sctp_ifa;
6650 	int count = 0;
6651 
6652 	/* Turn on all the appropriate scopes */
6653 	loopback_scope = stcb->asoc.scope.loopback_scope;
6654 #if defined(INET)
6655 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6656 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6657 #endif
6658 #if defined(INET6)
6659 	local_scope = stcb->asoc.scope.local_scope;
6660 	site_scope = stcb->asoc.scope.site_scope;
6661 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6662 #endif
6663 	SCTP_IPI_ADDR_RLOCK();
6664 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6665 	if (vrf == NULL) {
6666 		/* no vrf, no addresses */
6667 		SCTP_IPI_ADDR_RUNLOCK();
6668 		return (0);
6669 	}
6670 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6671 		/*
6672 		 * bound all case: go through all ifns on the vrf
6673 		 */
6674 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6675 			if ((loopback_scope == 0) &&
6676 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6677 				continue;
6678 			}
6679 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6680 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6681 					continue;
6682 				switch (sctp_ifa->address.sa.sa_family) {
6683 #ifdef INET
6684 				case AF_INET:
6685 					if (ipv4_addr_legal) {
6686 						struct sockaddr_in *sin;
6687 
6688 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6689 						if (sin->sin_addr.s_addr == 0) {
6690 							/*
6691 							 * skip unspecified
6692 							 * addrs
6693 							 */
6694 							continue;
6695 						}
6696 						if ((ipv4_local_scope == 0) &&
6697 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6698 							continue;
6699 						}
6700 						/* count this one */
6701 						count++;
6702 					} else {
6703 						continue;
6704 					}
6705 					break;
6706 #endif
6707 #ifdef INET6
6708 				case AF_INET6:
6709 					if (ipv6_addr_legal) {
6710 						struct sockaddr_in6 *sin6;
6711 
6712 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6713 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6714 							continue;
6715 						}
6716 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6717 							if (local_scope == 0)
6718 								continue;
6719 							if (sin6->sin6_scope_id == 0) {
6720 								if (sa6_recoverscope(sin6) != 0)
6721 									/*
6722 									 *
6723 									 * bad
6724 									 *
6725 									 * li
6726 									 * nk
6727 									 *
6728 									 * loc
6729 									 * al
6730 									 *
6731 									 * add
6732 									 * re
6733 									 * ss
6734 									 * */
6735 									continue;
6736 							}
6737 						}
6738 						if ((site_scope == 0) &&
6739 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6740 							continue;
6741 						}
6742 						/* count this one */
6743 						count++;
6744 					}
6745 					break;
6746 #endif
6747 				default:
6748 					/* TSNH */
6749 					break;
6750 				}
6751 			}
6752 		}
6753 	} else {
6754 		/*
6755 		 * subset bound case
6756 		 */
6757 		struct sctp_laddr *laddr;
6758 
6759 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6760 		    sctp_nxt_addr) {
6761 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6762 				continue;
6763 			}
6764 			/* count this one */
6765 			count++;
6766 		}
6767 	}
6768 	SCTP_IPI_ADDR_RUNLOCK();
6769 	return (count);
6770 }
6771 
6772 #if defined(SCTP_LOCAL_TRACE_BUF)
6773 
6774 void
6775 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6776 {
6777 	uint32_t saveindex, newindex;
6778 
6779 	do {
6780 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6781 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6782 			newindex = 1;
6783 		} else {
6784 			newindex = saveindex + 1;
6785 		}
6786 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6787 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6788 		saveindex = 0;
6789 	}
6790 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6791 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6792 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6793 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6794 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6795 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6796 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6797 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6798 }
6799 
6800 #endif
6801 static void
6802 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6803 {
6804 	struct ip *iph;
6805 
6806 #ifdef INET6
6807 	struct ip6_hdr *ip6;
6808 
6809 #endif
6810 	struct mbuf *sp, *last;
6811 	struct udphdr *uhdr;
6812 	uint16_t port;
6813 
6814 	if ((m->m_flags & M_PKTHDR) == 0) {
6815 		/* Can't handle one that is not a pkt hdr */
6816 		goto out;
6817 	}
6818 	/* Pull the src port */
6819 	iph = mtod(m, struct ip *);
6820 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6821 	port = uhdr->uh_sport;
6822 	/*
6823 	 * Split out the mbuf chain. Leave the IP header in m, place the
6824 	 * rest in the sp.
6825 	 */
6826 	sp = m_split(m, off, M_NOWAIT);
6827 	if (sp == NULL) {
6828 		/* Gak, drop packet, we can't do a split */
6829 		goto out;
6830 	}
6831 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6832 		/* Gak, packet can't have an SCTP header in it - too small */
6833 		m_freem(sp);
6834 		goto out;
6835 	}
6836 	/* Now pull up the UDP header and SCTP header together */
6837 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6838 	if (sp == NULL) {
6839 		/* Gak pullup failed */
6840 		goto out;
6841 	}
6842 	/* Trim out the UDP header */
6843 	m_adj(sp, sizeof(struct udphdr));
6844 
6845 	/* Now reconstruct the mbuf chain */
6846 	for (last = m; last->m_next; last = last->m_next);
6847 	last->m_next = sp;
6848 	m->m_pkthdr.len += sp->m_pkthdr.len;
6849 	iph = mtod(m, struct ip *);
6850 	switch (iph->ip_v) {
6851 #ifdef INET
6852 	case IPVERSION:
6853 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6854 		sctp_input_with_port(m, off, port);
6855 		break;
6856 #endif
6857 #ifdef INET6
6858 	case IPV6_VERSION >> 4:
6859 		ip6 = mtod(m, struct ip6_hdr *);
6860 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6861 		sctp6_input_with_port(&m, &off, port);
6862 		break;
6863 #endif
6864 	default:
6865 		goto out;
6866 		break;
6867 	}
6868 	return;
6869 out:
6870 	m_freem(m);
6871 }
6872 
6873 void
6874 sctp_over_udp_stop(void)
6875 {
6876 	/*
6877 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6878 	 * for writting!
6879 	 */
6880 #ifdef INET
6881 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6882 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6883 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6884 	}
6885 #endif
6886 #ifdef INET6
6887 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6888 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6889 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6890 	}
6891 #endif
6892 }
6893 
6894 int
6895 sctp_over_udp_start(void)
6896 {
6897 	uint16_t port;
6898 	int ret;
6899 
6900 #ifdef INET
6901 	struct sockaddr_in sin;
6902 
6903 #endif
6904 #ifdef INET6
6905 	struct sockaddr_in6 sin6;
6906 
6907 #endif
6908 	/*
6909 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6910 	 * for writting!
6911 	 */
6912 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6913 	if (ntohs(port) == 0) {
6914 		/* Must have a port set */
6915 		return (EINVAL);
6916 	}
6917 #ifdef INET
6918 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6919 		/* Already running -- must stop first */
6920 		return (EALREADY);
6921 	}
6922 #endif
6923 #ifdef INET6
6924 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6925 		/* Already running -- must stop first */
6926 		return (EALREADY);
6927 	}
6928 #endif
6929 #ifdef INET
6930 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6931 	    SOCK_DGRAM, IPPROTO_UDP,
6932 	    curthread->td_ucred, curthread))) {
6933 		sctp_over_udp_stop();
6934 		return (ret);
6935 	}
6936 	/* Call the special UDP hook. */
6937 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6938 	    sctp_recv_udp_tunneled_packet))) {
6939 		sctp_over_udp_stop();
6940 		return (ret);
6941 	}
6942 	/* Ok, we have a socket, bind it to the port. */
6943 	memset(&sin, 0, sizeof(struct sockaddr_in));
6944 	sin.sin_len = sizeof(struct sockaddr_in);
6945 	sin.sin_family = AF_INET;
6946 	sin.sin_port = htons(port);
6947 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6948 	    (struct sockaddr *)&sin, curthread))) {
6949 		sctp_over_udp_stop();
6950 		return (ret);
6951 	}
6952 #endif
6953 #ifdef INET6
6954 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6955 	    SOCK_DGRAM, IPPROTO_UDP,
6956 	    curthread->td_ucred, curthread))) {
6957 		sctp_over_udp_stop();
6958 		return (ret);
6959 	}
6960 	/* Call the special UDP hook. */
6961 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6962 	    sctp_recv_udp_tunneled_packet))) {
6963 		sctp_over_udp_stop();
6964 		return (ret);
6965 	}
6966 	/* Ok, we have a socket, bind it to the port. */
6967 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
6968 	sin6.sin6_len = sizeof(struct sockaddr_in6);
6969 	sin6.sin6_family = AF_INET6;
6970 	sin6.sin6_port = htons(port);
6971 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
6972 	    (struct sockaddr *)&sin6, curthread))) {
6973 		sctp_over_udp_stop();
6974 		return (ret);
6975 	}
6976 #endif
6977 	return (0);
6978 }
6979