xref: /freebsd/sys/netinet/sctputil.c (revision 298f5fdc242b760e70cd3494e3a4f1f50b20664d)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 void
221 sctp_log_mb(struct mbuf *m, int from)
222 {
223 	struct sctp_cwnd_log sctp_clog;
224 
225 	sctp_clog.x.mb.mp = m;
226 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
227 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
228 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
229 	if (SCTP_BUF_IS_EXTENDED(m)) {
230 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
231 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
232 	} else {
233 		sctp_clog.x.mb.ext = 0;
234 		sctp_clog.x.mb.refcnt = 0;
235 	}
236 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
237 	    SCTP_LOG_EVENT_MBUF,
238 	    from,
239 	    sctp_clog.x.misc.log1,
240 	    sctp_clog.x.misc.log2,
241 	    sctp_clog.x.misc.log3,
242 	    sctp_clog.x.misc.log4);
243 }
244 
245 void
246 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
247 {
248 	struct sctp_cwnd_log sctp_clog;
249 
250 	if (control == NULL) {
251 		SCTP_PRINTF("Gak log of NULL?\n");
252 		return;
253 	}
254 	sctp_clog.x.strlog.stcb = control->stcb;
255 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257 	sctp_clog.x.strlog.strm = control->sinfo_stream;
258 	if (poschk != NULL) {
259 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261 	} else {
262 		sctp_clog.x.strlog.e_tsn = 0;
263 		sctp_clog.x.strlog.e_sseq = 0;
264 	}
265 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266 	    SCTP_LOG_EVENT_STRM,
267 	    from,
268 	    sctp_clog.x.misc.log1,
269 	    sctp_clog.x.misc.log2,
270 	    sctp_clog.x.misc.log3,
271 	    sctp_clog.x.misc.log4);
272 }
273 
274 void
275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276 {
277 	struct sctp_cwnd_log sctp_clog;
278 
279 	sctp_clog.x.cwnd.net = net;
280 	if (stcb->asoc.send_queue_cnt > 255)
281 		sctp_clog.x.cwnd.cnt_in_send = 255;
282 	else
283 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284 	if (stcb->asoc.stream_queue_cnt > 255)
285 		sctp_clog.x.cwnd.cnt_in_str = 255;
286 	else
287 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288 
289 	if (net) {
290 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291 		sctp_clog.x.cwnd.inflight = net->flight_size;
292 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295 	}
296 	if (SCTP_CWNDLOG_PRESEND == from) {
297 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298 	}
299 	sctp_clog.x.cwnd.cwnd_augment = augment;
300 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301 	    SCTP_LOG_EVENT_CWND,
302 	    from,
303 	    sctp_clog.x.misc.log1,
304 	    sctp_clog.x.misc.log2,
305 	    sctp_clog.x.misc.log3,
306 	    sctp_clog.x.misc.log4);
307 }
308 
309 void
310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311 {
312 	struct sctp_cwnd_log sctp_clog;
313 
314 	memset(&sctp_clog, 0, sizeof(sctp_clog));
315 	if (inp) {
316 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317 
318 	} else {
319 		sctp_clog.x.lock.sock = (void *)NULL;
320 	}
321 	sctp_clog.x.lock.inp = (void *)inp;
322 	if (stcb) {
323 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324 	} else {
325 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326 	}
327 	if (inp) {
328 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330 	} else {
331 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333 	}
334 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335 	if (inp && (inp->sctp_socket)) {
336 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339 	} else {
340 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345 	    SCTP_LOG_LOCK_EVENT,
346 	    from,
347 	    sctp_clog.x.misc.log1,
348 	    sctp_clog.x.misc.log2,
349 	    sctp_clog.x.misc.log3,
350 	    sctp_clog.x.misc.log4);
351 }
352 
353 void
354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355 {
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	sctp_clog.x.cwnd.net = net;
360 	sctp_clog.x.cwnd.cwnd_new_value = error;
361 	sctp_clog.x.cwnd.inflight = net->flight_size;
362 	sctp_clog.x.cwnd.cwnd_augment = burst;
363 	if (stcb->asoc.send_queue_cnt > 255)
364 		sctp_clog.x.cwnd.cnt_in_send = 255;
365 	else
366 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367 	if (stcb->asoc.stream_queue_cnt > 255)
368 		sctp_clog.x.cwnd.cnt_in_str = 255;
369 	else
370 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372 	    SCTP_LOG_EVENT_MAXBURST,
373 	    from,
374 	    sctp_clog.x.misc.log1,
375 	    sctp_clog.x.misc.log2,
376 	    sctp_clog.x.misc.log3,
377 	    sctp_clog.x.misc.log4);
378 }
379 
380 void
381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382 {
383 	struct sctp_cwnd_log sctp_clog;
384 
385 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386 	sctp_clog.x.rwnd.send_size = snd_size;
387 	sctp_clog.x.rwnd.overhead = overhead;
388 	sctp_clog.x.rwnd.new_rwnd = 0;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_RWND,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = flight_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422 	sctp_clog.x.mbcnt.size_change = book;
423 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_MBCNT,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 void
435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436 {
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_MISC_EVENT,
439 	    from,
440 	    a, b, c, d);
441 }
442 
443 void
444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445 {
446 	struct sctp_cwnd_log sctp_clog;
447 
448 	sctp_clog.x.wake.stcb = (void *)stcb;
449 	sctp_clog.x.wake.wake_cnt = wake_cnt;
450 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453 
454 	if (stcb->asoc.stream_queue_cnt < 0xff)
455 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456 	else
457 		sctp_clog.x.wake.stream_qcnt = 0xff;
458 
459 	if (stcb->asoc.chunks_on_out_queue < 0xff)
460 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461 	else
462 		sctp_clog.x.wake.chunks_on_oque = 0xff;
463 
464 	sctp_clog.x.wake.sctpflags = 0;
465 	/* set in the defered mode stuff */
466 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467 		sctp_clog.x.wake.sctpflags |= 1;
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469 		sctp_clog.x.wake.sctpflags |= 2;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471 		sctp_clog.x.wake.sctpflags |= 4;
472 	/* what about the sb */
473 	if (stcb->sctp_socket) {
474 		struct socket *so = stcb->sctp_socket;
475 
476 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477 	} else {
478 		sctp_clog.x.wake.sbflags = 0xff;
479 	}
480 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 	    SCTP_LOG_EVENT_WAKE,
482 	    from,
483 	    sctp_clog.x.misc.log1,
484 	    sctp_clog.x.misc.log2,
485 	    sctp_clog.x.misc.log3,
486 	    sctp_clog.x.misc.log4);
487 }
488 
489 void
490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491 {
492 	struct sctp_cwnd_log sctp_clog;
493 
494 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500 	sctp_clog.x.blk.sndlen = sendlen;
501 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 	    SCTP_LOG_EVENT_BLOCK,
503 	    from,
504 	    sctp_clog.x.misc.log1,
505 	    sctp_clog.x.misc.log2,
506 	    sctp_clog.x.misc.log3,
507 	    sctp_clog.x.misc.log4);
508 }
509 
510 int
511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512 {
513 	/* May need to fix this if ktrdump does not work */
514 	return (0);
515 }
516 
517 #ifdef SCTP_AUDITING_ENABLED
518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519 static int sctp_audit_indx = 0;
520 
521 static
522 void
523 sctp_print_audit_report(void)
524 {
525 	int i;
526 	int cnt;
527 
528 	cnt = 0;
529 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530 		if ((sctp_audit_data[i][0] == 0xe0) &&
531 		    (sctp_audit_data[i][1] == 0x01)) {
532 			cnt = 0;
533 			SCTP_PRINTF("\n");
534 		} else if (sctp_audit_data[i][0] == 0xf0) {
535 			cnt = 0;
536 			SCTP_PRINTF("\n");
537 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538 		    (sctp_audit_data[i][1] == 0x01)) {
539 			SCTP_PRINTF("\n");
540 			cnt = 0;
541 		}
542 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543 		    (uint32_t) sctp_audit_data[i][1]);
544 		cnt++;
545 		if ((cnt % 14) == 0)
546 			SCTP_PRINTF("\n");
547 	}
548 	for (i = 0; i < sctp_audit_indx; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	SCTP_PRINTF("\n");
568 }
569 
570 void
571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572     struct sctp_nets *net)
573 {
574 	int resend_cnt, tot_out, rep, tot_book_cnt;
575 	struct sctp_nets *lnet;
576 	struct sctp_tmit_chunk *chk;
577 
578 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580 	sctp_audit_indx++;
581 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582 		sctp_audit_indx = 0;
583 	}
584 	if (inp == NULL) {
585 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587 		sctp_audit_indx++;
588 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589 			sctp_audit_indx = 0;
590 		}
591 		return;
592 	}
593 	if (stcb == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603 	sctp_audit_data[sctp_audit_indx][1] =
604 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605 	sctp_audit_indx++;
606 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 		sctp_audit_indx = 0;
608 	}
609 	rep = 0;
610 	tot_book_cnt = 0;
611 	resend_cnt = tot_out = 0;
612 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614 			resend_cnt++;
615 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616 			tot_out += chk->book_size;
617 			tot_book_cnt++;
618 		}
619 	}
620 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623 		sctp_audit_indx++;
624 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 			sctp_audit_indx = 0;
626 		}
627 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629 		rep = 1;
630 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632 		sctp_audit_data[sctp_audit_indx][1] =
633 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634 		sctp_audit_indx++;
635 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 			sctp_audit_indx = 0;
637 		}
638 	}
639 	if (tot_out != stcb->asoc.total_flight) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		rep = 1;
647 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648 		    (int)stcb->asoc.total_flight);
649 		stcb->asoc.total_flight = tot_out;
650 	}
651 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 		rep = 1;
659 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660 
661 		stcb->asoc.total_flight_count = tot_book_cnt;
662 	}
663 	tot_out = 0;
664 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665 		tot_out += lnet->flight_size;
666 	}
667 	if (tot_out != stcb->asoc.total_flight) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("real flight:%d net total was %d\n",
676 		    stcb->asoc.total_flight, tot_out);
677 		/* now corrective action */
678 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679 
680 			tot_out = 0;
681 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682 				if ((chk->whoTo == lnet) &&
683 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684 					tot_out += chk->book_size;
685 				}
686 			}
687 			if (lnet->flight_size != tot_out) {
688 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689 				    (void *)lnet, lnet->flight_size,
690 				    tot_out);
691 				lnet->flight_size = tot_out;
692 			}
693 		}
694 	}
695 	if (rep) {
696 		sctp_print_audit_report();
697 	}
698 }
699 
700 void
701 sctp_audit_log(uint8_t ev, uint8_t fd)
702 {
703 
704 	sctp_audit_data[sctp_audit_indx][0] = ev;
705 	sctp_audit_data[sctp_audit_indx][1] = fd;
706 	sctp_audit_indx++;
707 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708 		sctp_audit_indx = 0;
709 	}
710 }
711 
712 #endif
713 
714 /*
715  * sctp_stop_timers_for_shutdown() should be called
716  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717  * state to make sure that all timers are stopped.
718  */
719 void
720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721 {
722 	struct sctp_association *asoc;
723 	struct sctp_nets *net;
724 
725 	asoc = &stcb->asoc;
726 
727 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735 	}
736 }
737 
738 /*
739  * a list of sizes based on typical mtu's, used only if next hop size not
740  * returned.
741  */
742 static uint32_t sctp_mtu_sizes[] = {
743 	68,
744 	296,
745 	508,
746 	512,
747 	544,
748 	576,
749 	1006,
750 	1492,
751 	1500,
752 	1536,
753 	2002,
754 	2048,
755 	4352,
756 	4464,
757 	8166,
758 	17914,
759 	32000,
760 	65535
761 };
762 
763 /*
764  * Return the largest MTU smaller than val. If there is no
765  * entry, just return val.
766  */
767 uint32_t
768 sctp_get_prev_mtu(uint32_t val)
769 {
770 	uint32_t i;
771 
772 	if (val <= sctp_mtu_sizes[0]) {
773 		return (val);
774 	}
775 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776 		if (val <= sctp_mtu_sizes[i]) {
777 			break;
778 		}
779 	}
780 	return (sctp_mtu_sizes[i - 1]);
781 }
782 
783 /*
784  * Return the smallest MTU larger than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_next_mtu(uint32_t val)
789 {
790 	/* select another MTU that is just bigger than this one */
791 	uint32_t i;
792 
793 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794 		if (val < sctp_mtu_sizes[i]) {
795 			return (sctp_mtu_sizes[i]);
796 		}
797 	}
798 	return (val);
799 }
800 
801 void
802 sctp_fill_random_store(struct sctp_pcb *m)
803 {
804 	/*
805 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806 	 * our counter. The result becomes our good random numbers and we
807 	 * then setup to give these out. Note that we do no locking to
808 	 * protect this. This is ok, since if competing folks call this we
809 	 * will get more gobbled gook in the random store which is what we
810 	 * want. There is a danger that two guys will use the same random
811 	 * numbers, but thats ok too since that is random as well :->
812 	 */
813 	m->store_at = 0;
814 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817 	m->random_counter++;
818 }
819 
820 uint32_t
821 sctp_select_initial_TSN(struct sctp_pcb *inp)
822 {
823 	/*
824 	 * A true implementation should use random selection process to get
825 	 * the initial stream sequence number, using RFC1750 as a good
826 	 * guideline
827 	 */
828 	uint32_t x, *xp;
829 	uint8_t *p;
830 	int store_at, new_store;
831 
832 	if (inp->initial_sequence_debug != 0) {
833 		uint32_t ret;
834 
835 		ret = inp->initial_sequence_debug;
836 		inp->initial_sequence_debug++;
837 		return (ret);
838 	}
839 retry:
840 	store_at = inp->store_at;
841 	new_store = store_at + sizeof(uint32_t);
842 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843 		new_store = 0;
844 	}
845 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846 		goto retry;
847 	}
848 	if (new_store == 0) {
849 		/* Refill the random store */
850 		sctp_fill_random_store(inp);
851 	}
852 	p = &inp->random_store[store_at];
853 	xp = (uint32_t *) p;
854 	x = *xp;
855 	return (x);
856 }
857 
858 uint32_t
859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860 {
861 	uint32_t x;
862 	struct timeval now;
863 
864 	if (check) {
865 		(void)SCTP_GETTIME_TIMEVAL(&now);
866 	}
867 	for (;;) {
868 		x = sctp_select_initial_TSN(&inp->sctp_ep);
869 		if (x == 0) {
870 			/* we never use 0 */
871 			continue;
872 		}
873 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874 			break;
875 		}
876 	}
877 	return (x);
878 }
879 
880 int
881 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
882     uint32_t override_tag, uint32_t vrf_id)
883 {
884 	struct sctp_association *asoc;
885 
886 	/*
887 	 * Anything set to zero is taken care of by the allocation routine's
888 	 * bzero
889 	 */
890 
891 	/*
892 	 * Up front select what scoping to apply on addresses I tell my peer
893 	 * Not sure what to do with these right now, we will need to come up
894 	 * with a way to set them. We may need to pass them through from the
895 	 * caller in the sctp_aloc_assoc() function.
896 	 */
897 	int i;
898 
899 	asoc = &stcb->asoc;
900 	/* init all variables to a known value. */
901 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902 	asoc->max_burst = inp->sctp_ep.max_burst;
903 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
904 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
906 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
907 	asoc->ecn_allowed = inp->sctp_ecn_enable;
908 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
909 	asoc->sctp_cmt_pf = (uint8_t) 0;
910 	asoc->sctp_frag_point = inp->sctp_frag_point;
911 	asoc->sctp_features = inp->sctp_features;
912 	asoc->default_dscp = inp->sctp_ep.default_dscp;
913 #ifdef INET6
914 	if (inp->sctp_ep.default_flowlabel) {
915 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
916 	} else {
917 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
918 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
919 			asoc->default_flowlabel &= 0x000fffff;
920 			asoc->default_flowlabel |= 0x80000000;
921 		} else {
922 			asoc->default_flowlabel = 0;
923 		}
924 	}
925 #endif
926 	asoc->sb_send_resv = 0;
927 	if (override_tag) {
928 		asoc->my_vtag = override_tag;
929 	} else {
930 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
931 	}
932 	/* Get the nonce tags */
933 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
934 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
935 	asoc->vrf_id = vrf_id;
936 
937 #ifdef SCTP_ASOCLOG_OF_TSNS
938 	asoc->tsn_in_at = 0;
939 	asoc->tsn_out_at = 0;
940 	asoc->tsn_in_wrapped = 0;
941 	asoc->tsn_out_wrapped = 0;
942 	asoc->cumack_log_at = 0;
943 	asoc->cumack_log_atsnt = 0;
944 #endif
945 #ifdef SCTP_FS_SPEC_LOG
946 	asoc->fs_index = 0;
947 #endif
948 	asoc->refcnt = 0;
949 	asoc->assoc_up_sent = 0;
950 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
951 	    sctp_select_initial_TSN(&inp->sctp_ep);
952 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
953 	/* we are optimisitic here */
954 	asoc->peer_supports_pktdrop = 1;
955 	asoc->peer_supports_nat = 0;
956 	asoc->sent_queue_retran_cnt = 0;
957 
958 	/* for CMT */
959 	asoc->last_net_cmt_send_started = NULL;
960 
961 	/* This will need to be adjusted */
962 	asoc->last_acked_seq = asoc->init_seq_number - 1;
963 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
964 	asoc->asconf_seq_in = asoc->last_acked_seq;
965 
966 	/* here we are different, we hold the next one we expect */
967 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
968 
969 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
970 	asoc->initial_rto = inp->sctp_ep.initial_rto;
971 
972 	asoc->max_init_times = inp->sctp_ep.max_init_times;
973 	asoc->max_send_times = inp->sctp_ep.max_send_times;
974 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
975 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
976 	asoc->free_chunk_cnt = 0;
977 
978 	asoc->iam_blocking = 0;
979 	asoc->context = inp->sctp_context;
980 	asoc->local_strreset_support = inp->local_strreset_support;
981 	asoc->def_send = inp->def_send;
982 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
984 	asoc->pr_sctp_cnt = 0;
985 	asoc->total_output_queue_size = 0;
986 
987 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988 		asoc->scope.ipv6_addr_legal = 1;
989 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
990 			asoc->scope.ipv4_addr_legal = 1;
991 		} else {
992 			asoc->scope.ipv4_addr_legal = 0;
993 		}
994 	} else {
995 		asoc->scope.ipv6_addr_legal = 0;
996 		asoc->scope.ipv4_addr_legal = 1;
997 	}
998 
999 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1000 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1001 
1002 	asoc->smallest_mtu = inp->sctp_frag_point;
1003 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1004 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1005 
1006 	asoc->locked_on_sending = NULL;
1007 	asoc->stream_locked_on = 0;
1008 	asoc->ecn_echo_cnt_onq = 0;
1009 	asoc->stream_locked = 0;
1010 
1011 	asoc->send_sack = 1;
1012 
1013 	LIST_INIT(&asoc->sctp_restricted_addrs);
1014 
1015 	TAILQ_INIT(&asoc->nets);
1016 	TAILQ_INIT(&asoc->pending_reply_queue);
1017 	TAILQ_INIT(&asoc->asconf_ack_sent);
1018 	/* Setup to fill the hb random cache at first HB */
1019 	asoc->hb_random_idx = 4;
1020 
1021 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1022 
1023 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1024 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1025 
1026 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1027 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1028 
1029 	/*
1030 	 * Now the stream parameters, here we allocate space for all streams
1031 	 * that we request by default.
1032 	 */
1033 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1034 	    inp->sctp_ep.pre_open_stream_count;
1035 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1036 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1037 	    SCTP_M_STRMO);
1038 	if (asoc->strmout == NULL) {
1039 		/* big trouble no memory */
1040 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1041 		return (ENOMEM);
1042 	}
1043 	for (i = 0; i < asoc->streamoutcnt; i++) {
1044 		/*
1045 		 * inbound side must be set to 0xffff, also NOTE when we get
1046 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1047 		 * count (streamoutcnt) but first check if we sent to any of
1048 		 * the upper streams that were dropped (if some were). Those
1049 		 * that were dropped must be notified to the upper layer as
1050 		 * failed to send.
1051 		 */
1052 		asoc->strmout[i].next_sequence_send = 0x0;
1053 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1054 		asoc->strmout[i].chunks_on_queues = 0;
1055 		asoc->strmout[i].stream_no = i;
1056 		asoc->strmout[i].last_msg_incomplete = 0;
1057 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1058 	}
1059 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1060 
1061 	/* Now the mapping array */
1062 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1063 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1064 	    SCTP_M_MAP);
1065 	if (asoc->mapping_array == NULL) {
1066 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1067 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1068 		return (ENOMEM);
1069 	}
1070 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1071 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1072 	    SCTP_M_MAP);
1073 	if (asoc->nr_mapping_array == NULL) {
1074 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1076 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1077 		return (ENOMEM);
1078 	}
1079 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1080 
1081 	/* Now the init of the other outqueues */
1082 	TAILQ_INIT(&asoc->free_chunks);
1083 	TAILQ_INIT(&asoc->control_send_queue);
1084 	TAILQ_INIT(&asoc->asconf_send_queue);
1085 	TAILQ_INIT(&asoc->send_queue);
1086 	TAILQ_INIT(&asoc->sent_queue);
1087 	TAILQ_INIT(&asoc->reasmqueue);
1088 	TAILQ_INIT(&asoc->resetHead);
1089 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1090 	TAILQ_INIT(&asoc->asconf_queue);
1091 	/* authentication fields */
1092 	asoc->authinfo.random = NULL;
1093 	asoc->authinfo.active_keyid = 0;
1094 	asoc->authinfo.assoc_key = NULL;
1095 	asoc->authinfo.assoc_keyid = 0;
1096 	asoc->authinfo.recv_key = NULL;
1097 	asoc->authinfo.recv_keyid = 0;
1098 	LIST_INIT(&asoc->shared_keys);
1099 	asoc->marked_retrans = 0;
1100 	asoc->port = inp->sctp_ep.port;
1101 	asoc->timoinit = 0;
1102 	asoc->timodata = 0;
1103 	asoc->timosack = 0;
1104 	asoc->timoshutdown = 0;
1105 	asoc->timoheartbeat = 0;
1106 	asoc->timocookie = 0;
1107 	asoc->timoshutdownack = 0;
1108 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1109 	asoc->discontinuity_time = asoc->start_time;
1110 	/*
1111 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1112 	 * freed later when the association is freed.
1113 	 */
1114 	return (0);
1115 }
1116 
1117 void
1118 sctp_print_mapping_array(struct sctp_association *asoc)
1119 {
1120 	unsigned int i, limit;
1121 
1122 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1123 	    asoc->mapping_array_size,
1124 	    asoc->mapping_array_base_tsn,
1125 	    asoc->cumulative_tsn,
1126 	    asoc->highest_tsn_inside_map,
1127 	    asoc->highest_tsn_inside_nr_map);
1128 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1129 		if (asoc->mapping_array[limit - 1] != 0) {
1130 			break;
1131 		}
1132 	}
1133 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1134 	for (i = 0; i < limit; i++) {
1135 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1136 	}
1137 	if (limit % 16)
1138 		SCTP_PRINTF("\n");
1139 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1140 		if (asoc->nr_mapping_array[limit - 1]) {
1141 			break;
1142 		}
1143 	}
1144 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1145 	for (i = 0; i < limit; i++) {
1146 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1147 	}
1148 	if (limit % 16)
1149 		SCTP_PRINTF("\n");
1150 }
1151 
1152 int
1153 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1154 {
1155 	/* mapping array needs to grow */
1156 	uint8_t *new_array1, *new_array2;
1157 	uint32_t new_size;
1158 
1159 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1160 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1161 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1162 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1163 		/* can't get more, forget it */
1164 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1165 		if (new_array1) {
1166 			SCTP_FREE(new_array1, SCTP_M_MAP);
1167 		}
1168 		if (new_array2) {
1169 			SCTP_FREE(new_array2, SCTP_M_MAP);
1170 		}
1171 		return (-1);
1172 	}
1173 	memset(new_array1, 0, new_size);
1174 	memset(new_array2, 0, new_size);
1175 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1176 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1177 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1178 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1179 	asoc->mapping_array = new_array1;
1180 	asoc->nr_mapping_array = new_array2;
1181 	asoc->mapping_array_size = new_size;
1182 	return (0);
1183 }
1184 
1185 
1186 static void
1187 sctp_iterator_work(struct sctp_iterator *it)
1188 {
1189 	int iteration_count = 0;
1190 	int inp_skip = 0;
1191 	int first_in = 1;
1192 	struct sctp_inpcb *tinp;
1193 
1194 	SCTP_INP_INFO_RLOCK();
1195 	SCTP_ITERATOR_LOCK();
1196 	if (it->inp) {
1197 		SCTP_INP_RLOCK(it->inp);
1198 		SCTP_INP_DECR_REF(it->inp);
1199 	}
1200 	if (it->inp == NULL) {
1201 		/* iterator is complete */
1202 done_with_iterator:
1203 		SCTP_ITERATOR_UNLOCK();
1204 		SCTP_INP_INFO_RUNLOCK();
1205 		if (it->function_atend != NULL) {
1206 			(*it->function_atend) (it->pointer, it->val);
1207 		}
1208 		SCTP_FREE(it, SCTP_M_ITER);
1209 		return;
1210 	}
1211 select_a_new_ep:
1212 	if (first_in) {
1213 		first_in = 0;
1214 	} else {
1215 		SCTP_INP_RLOCK(it->inp);
1216 	}
1217 	while (((it->pcb_flags) &&
1218 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1219 	    ((it->pcb_features) &&
1220 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1221 		/* endpoint flags or features don't match, so keep looking */
1222 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1223 			SCTP_INP_RUNLOCK(it->inp);
1224 			goto done_with_iterator;
1225 		}
1226 		tinp = it->inp;
1227 		it->inp = LIST_NEXT(it->inp, sctp_list);
1228 		SCTP_INP_RUNLOCK(tinp);
1229 		if (it->inp == NULL) {
1230 			goto done_with_iterator;
1231 		}
1232 		SCTP_INP_RLOCK(it->inp);
1233 	}
1234 	/* now go through each assoc which is in the desired state */
1235 	if (it->done_current_ep == 0) {
1236 		if (it->function_inp != NULL)
1237 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1238 		it->done_current_ep = 1;
1239 	}
1240 	if (it->stcb == NULL) {
1241 		/* run the per instance function */
1242 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1243 	}
1244 	if ((inp_skip) || it->stcb == NULL) {
1245 		if (it->function_inp_end != NULL) {
1246 			inp_skip = (*it->function_inp_end) (it->inp,
1247 			    it->pointer,
1248 			    it->val);
1249 		}
1250 		SCTP_INP_RUNLOCK(it->inp);
1251 		goto no_stcb;
1252 	}
1253 	while (it->stcb) {
1254 		SCTP_TCB_LOCK(it->stcb);
1255 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1256 			/* not in the right state... keep looking */
1257 			SCTP_TCB_UNLOCK(it->stcb);
1258 			goto next_assoc;
1259 		}
1260 		/* see if we have limited out the iterator loop */
1261 		iteration_count++;
1262 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1263 			/* Pause to let others grab the lock */
1264 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1265 			SCTP_TCB_UNLOCK(it->stcb);
1266 			SCTP_INP_INCR_REF(it->inp);
1267 			SCTP_INP_RUNLOCK(it->inp);
1268 			SCTP_ITERATOR_UNLOCK();
1269 			SCTP_INP_INFO_RUNLOCK();
1270 			SCTP_INP_INFO_RLOCK();
1271 			SCTP_ITERATOR_LOCK();
1272 			if (sctp_it_ctl.iterator_flags) {
1273 				/* We won't be staying here */
1274 				SCTP_INP_DECR_REF(it->inp);
1275 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1276 				if (sctp_it_ctl.iterator_flags &
1277 				    SCTP_ITERATOR_STOP_CUR_IT) {
1278 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1279 					goto done_with_iterator;
1280 				}
1281 				if (sctp_it_ctl.iterator_flags &
1282 				    SCTP_ITERATOR_STOP_CUR_INP) {
1283 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1284 					goto no_stcb;
1285 				}
1286 				/* If we reach here huh? */
1287 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1288 				    sctp_it_ctl.iterator_flags);
1289 				sctp_it_ctl.iterator_flags = 0;
1290 			}
1291 			SCTP_INP_RLOCK(it->inp);
1292 			SCTP_INP_DECR_REF(it->inp);
1293 			SCTP_TCB_LOCK(it->stcb);
1294 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1295 			iteration_count = 0;
1296 		}
1297 		/* run function on this one */
1298 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1299 
1300 		/*
1301 		 * we lie here, it really needs to have its own type but
1302 		 * first I must verify that this won't effect things :-0
1303 		 */
1304 		if (it->no_chunk_output == 0)
1305 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1306 
1307 		SCTP_TCB_UNLOCK(it->stcb);
1308 next_assoc:
1309 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1310 		if (it->stcb == NULL) {
1311 			/* Run last function */
1312 			if (it->function_inp_end != NULL) {
1313 				inp_skip = (*it->function_inp_end) (it->inp,
1314 				    it->pointer,
1315 				    it->val);
1316 			}
1317 		}
1318 	}
1319 	SCTP_INP_RUNLOCK(it->inp);
1320 no_stcb:
1321 	/* done with all assocs on this endpoint, move on to next endpoint */
1322 	it->done_current_ep = 0;
1323 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1324 		it->inp = NULL;
1325 	} else {
1326 		it->inp = LIST_NEXT(it->inp, sctp_list);
1327 	}
1328 	if (it->inp == NULL) {
1329 		goto done_with_iterator;
1330 	}
1331 	goto select_a_new_ep;
1332 }
1333 
1334 void
1335 sctp_iterator_worker(void)
1336 {
1337 	struct sctp_iterator *it, *nit;
1338 
1339 	/* This function is called with the WQ lock in place */
1340 
1341 	sctp_it_ctl.iterator_running = 1;
1342 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1343 		sctp_it_ctl.cur_it = it;
1344 		/* now lets work on this one */
1345 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1346 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1347 		CURVNET_SET(it->vn);
1348 		sctp_iterator_work(it);
1349 		sctp_it_ctl.cur_it = NULL;
1350 		CURVNET_RESTORE();
1351 		SCTP_IPI_ITERATOR_WQ_LOCK();
1352 		/* sa_ignore FREED_MEMORY */
1353 	}
1354 	sctp_it_ctl.iterator_running = 0;
1355 	return;
1356 }
1357 
1358 
1359 static void
1360 sctp_handle_addr_wq(void)
1361 {
1362 	/* deal with the ADDR wq from the rtsock calls */
1363 	struct sctp_laddr *wi, *nwi;
1364 	struct sctp_asconf_iterator *asc;
1365 
1366 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1367 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1368 	if (asc == NULL) {
1369 		/* Try later, no memory */
1370 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1371 		    (struct sctp_inpcb *)NULL,
1372 		    (struct sctp_tcb *)NULL,
1373 		    (struct sctp_nets *)NULL);
1374 		return;
1375 	}
1376 	LIST_INIT(&asc->list_of_work);
1377 	asc->cnt = 0;
1378 
1379 	SCTP_WQ_ADDR_LOCK();
1380 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1381 		LIST_REMOVE(wi, sctp_nxt_addr);
1382 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1383 		asc->cnt++;
1384 	}
1385 	SCTP_WQ_ADDR_UNLOCK();
1386 
1387 	if (asc->cnt == 0) {
1388 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1389 	} else {
1390 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1391 		    sctp_asconf_iterator_stcb,
1392 		    NULL,	/* No ep end for boundall */
1393 		    SCTP_PCB_FLAGS_BOUNDALL,
1394 		    SCTP_PCB_ANY_FEATURES,
1395 		    SCTP_ASOC_ANY_STATE,
1396 		    (void *)asc, 0,
1397 		    sctp_asconf_iterator_end, NULL, 0);
1398 	}
1399 }
1400 
1401 void
1402 sctp_timeout_handler(void *t)
1403 {
1404 	struct sctp_inpcb *inp;
1405 	struct sctp_tcb *stcb;
1406 	struct sctp_nets *net;
1407 	struct sctp_timer *tmr;
1408 
1409 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1410 	struct socket *so;
1411 
1412 #endif
1413 	int did_output, type;
1414 
1415 	tmr = (struct sctp_timer *)t;
1416 	inp = (struct sctp_inpcb *)tmr->ep;
1417 	stcb = (struct sctp_tcb *)tmr->tcb;
1418 	net = (struct sctp_nets *)tmr->net;
1419 	CURVNET_SET((struct vnet *)tmr->vnet);
1420 	did_output = 1;
1421 
1422 #ifdef SCTP_AUDITING_ENABLED
1423 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1424 	sctp_auditing(3, inp, stcb, net);
1425 #endif
1426 
1427 	/* sanity checks... */
1428 	if (tmr->self != (void *)tmr) {
1429 		/*
1430 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1431 		 * (void *)tmr);
1432 		 */
1433 		CURVNET_RESTORE();
1434 		return;
1435 	}
1436 	tmr->stopped_from = 0xa001;
1437 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1438 		/*
1439 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1440 		 * tmr->type);
1441 		 */
1442 		CURVNET_RESTORE();
1443 		return;
1444 	}
1445 	tmr->stopped_from = 0xa002;
1446 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1447 		CURVNET_RESTORE();
1448 		return;
1449 	}
1450 	/* if this is an iterator timeout, get the struct and clear inp */
1451 	tmr->stopped_from = 0xa003;
1452 	type = tmr->type;
1453 	if (inp) {
1454 		SCTP_INP_INCR_REF(inp);
1455 		if ((inp->sctp_socket == NULL) &&
1456 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1457 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1458 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1459 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1460 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1461 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1462 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1463 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1464 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1465 		    ) {
1466 			SCTP_INP_DECR_REF(inp);
1467 			CURVNET_RESTORE();
1468 			return;
1469 		}
1470 	}
1471 	tmr->stopped_from = 0xa004;
1472 	if (stcb) {
1473 		atomic_add_int(&stcb->asoc.refcnt, 1);
1474 		if (stcb->asoc.state == 0) {
1475 			atomic_add_int(&stcb->asoc.refcnt, -1);
1476 			if (inp) {
1477 				SCTP_INP_DECR_REF(inp);
1478 			}
1479 			CURVNET_RESTORE();
1480 			return;
1481 		}
1482 	}
1483 	tmr->stopped_from = 0xa005;
1484 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1485 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1486 		if (inp) {
1487 			SCTP_INP_DECR_REF(inp);
1488 		}
1489 		if (stcb) {
1490 			atomic_add_int(&stcb->asoc.refcnt, -1);
1491 		}
1492 		CURVNET_RESTORE();
1493 		return;
1494 	}
1495 	tmr->stopped_from = 0xa006;
1496 
1497 	if (stcb) {
1498 		SCTP_TCB_LOCK(stcb);
1499 		atomic_add_int(&stcb->asoc.refcnt, -1);
1500 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1501 		    ((stcb->asoc.state == 0) ||
1502 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1503 			SCTP_TCB_UNLOCK(stcb);
1504 			if (inp) {
1505 				SCTP_INP_DECR_REF(inp);
1506 			}
1507 			CURVNET_RESTORE();
1508 			return;
1509 		}
1510 	}
1511 	/* record in stopped what t-o occured */
1512 	tmr->stopped_from = tmr->type;
1513 
1514 	/* mark as being serviced now */
1515 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1516 		/*
1517 		 * Callout has been rescheduled.
1518 		 */
1519 		goto get_out;
1520 	}
1521 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1522 		/*
1523 		 * Not active, so no action.
1524 		 */
1525 		goto get_out;
1526 	}
1527 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1528 
1529 	/* call the handler for the appropriate timer type */
1530 	switch (tmr->type) {
1531 	case SCTP_TIMER_TYPE_ZERO_COPY:
1532 		if (inp == NULL) {
1533 			break;
1534 		}
1535 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1536 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1537 		}
1538 		break;
1539 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1540 		if (inp == NULL) {
1541 			break;
1542 		}
1543 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1545 		}
1546 		break;
1547 	case SCTP_TIMER_TYPE_ADDR_WQ:
1548 		sctp_handle_addr_wq();
1549 		break;
1550 	case SCTP_TIMER_TYPE_SEND:
1551 		if ((stcb == NULL) || (inp == NULL)) {
1552 			break;
1553 		}
1554 		SCTP_STAT_INCR(sctps_timodata);
1555 		stcb->asoc.timodata++;
1556 		stcb->asoc.num_send_timers_up--;
1557 		if (stcb->asoc.num_send_timers_up < 0) {
1558 			stcb->asoc.num_send_timers_up = 0;
1559 		}
1560 		SCTP_TCB_LOCK_ASSERT(stcb);
1561 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1562 			/* no need to unlock on tcb its gone */
1563 
1564 			goto out_decr;
1565 		}
1566 		SCTP_TCB_LOCK_ASSERT(stcb);
1567 #ifdef SCTP_AUDITING_ENABLED
1568 		sctp_auditing(4, inp, stcb, net);
1569 #endif
1570 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1571 		if ((stcb->asoc.num_send_timers_up == 0) &&
1572 		    (stcb->asoc.sent_queue_cnt > 0)) {
1573 			struct sctp_tmit_chunk *chk;
1574 
1575 			/*
1576 			 * safeguard. If there on some on the sent queue
1577 			 * somewhere but no timers running something is
1578 			 * wrong... so we start a timer on the first chunk
1579 			 * on the send queue on whatever net it is sent to.
1580 			 */
1581 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1582 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1583 			    chk->whoTo);
1584 		}
1585 		break;
1586 	case SCTP_TIMER_TYPE_INIT:
1587 		if ((stcb == NULL) || (inp == NULL)) {
1588 			break;
1589 		}
1590 		SCTP_STAT_INCR(sctps_timoinit);
1591 		stcb->asoc.timoinit++;
1592 		if (sctp_t1init_timer(inp, stcb, net)) {
1593 			/* no need to unlock on tcb its gone */
1594 			goto out_decr;
1595 		}
1596 		/* We do output but not here */
1597 		did_output = 0;
1598 		break;
1599 	case SCTP_TIMER_TYPE_RECV:
1600 		if ((stcb == NULL) || (inp == NULL)) {
1601 			break;
1602 		}
1603 		SCTP_STAT_INCR(sctps_timosack);
1604 		stcb->asoc.timosack++;
1605 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1606 #ifdef SCTP_AUDITING_ENABLED
1607 		sctp_auditing(4, inp, stcb, net);
1608 #endif
1609 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1610 		break;
1611 	case SCTP_TIMER_TYPE_SHUTDOWN:
1612 		if ((stcb == NULL) || (inp == NULL)) {
1613 			break;
1614 		}
1615 		if (sctp_shutdown_timer(inp, stcb, net)) {
1616 			/* no need to unlock on tcb its gone */
1617 			goto out_decr;
1618 		}
1619 		SCTP_STAT_INCR(sctps_timoshutdown);
1620 		stcb->asoc.timoshutdown++;
1621 #ifdef SCTP_AUDITING_ENABLED
1622 		sctp_auditing(4, inp, stcb, net);
1623 #endif
1624 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1625 		break;
1626 	case SCTP_TIMER_TYPE_HEARTBEAT:
1627 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1628 			break;
1629 		}
1630 		SCTP_STAT_INCR(sctps_timoheartbeat);
1631 		stcb->asoc.timoheartbeat++;
1632 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1633 			/* no need to unlock on tcb its gone */
1634 			goto out_decr;
1635 		}
1636 #ifdef SCTP_AUDITING_ENABLED
1637 		sctp_auditing(4, inp, stcb, net);
1638 #endif
1639 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1640 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1641 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_COOKIE:
1645 		if ((stcb == NULL) || (inp == NULL)) {
1646 			break;
1647 		}
1648 		if (sctp_cookie_timer(inp, stcb, net)) {
1649 			/* no need to unlock on tcb its gone */
1650 			goto out_decr;
1651 		}
1652 		SCTP_STAT_INCR(sctps_timocookie);
1653 		stcb->asoc.timocookie++;
1654 #ifdef SCTP_AUDITING_ENABLED
1655 		sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 		/*
1658 		 * We consider T3 and Cookie timer pretty much the same with
1659 		 * respect to where from in chunk_output.
1660 		 */
1661 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1662 		break;
1663 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1664 		{
1665 			struct timeval tv;
1666 			int i, secret;
1667 
1668 			if (inp == NULL) {
1669 				break;
1670 			}
1671 			SCTP_STAT_INCR(sctps_timosecret);
1672 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1673 			SCTP_INP_WLOCK(inp);
1674 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1675 			inp->sctp_ep.last_secret_number =
1676 			    inp->sctp_ep.current_secret_number;
1677 			inp->sctp_ep.current_secret_number++;
1678 			if (inp->sctp_ep.current_secret_number >=
1679 			    SCTP_HOW_MANY_SECRETS) {
1680 				inp->sctp_ep.current_secret_number = 0;
1681 			}
1682 			secret = (int)inp->sctp_ep.current_secret_number;
1683 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1684 				inp->sctp_ep.secret_key[secret][i] =
1685 				    sctp_select_initial_TSN(&inp->sctp_ep);
1686 			}
1687 			SCTP_INP_WUNLOCK(inp);
1688 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1689 		}
1690 		did_output = 0;
1691 		break;
1692 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		SCTP_STAT_INCR(sctps_timopathmtu);
1697 		sctp_pathmtu_timer(inp, stcb, net);
1698 		did_output = 0;
1699 		break;
1700 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1701 		if ((stcb == NULL) || (inp == NULL)) {
1702 			break;
1703 		}
1704 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1705 			/* no need to unlock on tcb its gone */
1706 			goto out_decr;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timoshutdownack);
1709 		stcb->asoc.timoshutdownack++;
1710 #ifdef SCTP_AUDITING_ENABLED
1711 		sctp_auditing(4, inp, stcb, net);
1712 #endif
1713 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1714 		break;
1715 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1716 		if ((stcb == NULL) || (inp == NULL)) {
1717 			break;
1718 		}
1719 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1720 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1721 		/* no need to unlock on tcb its gone */
1722 		goto out_decr;
1723 
1724 	case SCTP_TIMER_TYPE_STRRESET:
1725 		if ((stcb == NULL) || (inp == NULL)) {
1726 			break;
1727 		}
1728 		if (sctp_strreset_timer(inp, stcb, net)) {
1729 			/* no need to unlock on tcb its gone */
1730 			goto out_decr;
1731 		}
1732 		SCTP_STAT_INCR(sctps_timostrmrst);
1733 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1734 		break;
1735 	case SCTP_TIMER_TYPE_ASCONF:
1736 		if ((stcb == NULL) || (inp == NULL)) {
1737 			break;
1738 		}
1739 		if (sctp_asconf_timer(inp, stcb, net)) {
1740 			/* no need to unlock on tcb its gone */
1741 			goto out_decr;
1742 		}
1743 		SCTP_STAT_INCR(sctps_timoasconf);
1744 #ifdef SCTP_AUDITING_ENABLED
1745 		sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1748 		break;
1749 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		sctp_delete_prim_timer(inp, stcb, net);
1754 		SCTP_STAT_INCR(sctps_timodelprim);
1755 		break;
1756 
1757 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1758 		if ((stcb == NULL) || (inp == NULL)) {
1759 			break;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timoautoclose);
1762 		sctp_autoclose_timer(inp, stcb, net);
1763 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1764 		did_output = 0;
1765 		break;
1766 	case SCTP_TIMER_TYPE_ASOCKILL:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timoassockill);
1771 		/* Can we free it yet? */
1772 		SCTP_INP_DECR_REF(inp);
1773 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1775 		so = SCTP_INP_SO(inp);
1776 		atomic_add_int(&stcb->asoc.refcnt, 1);
1777 		SCTP_TCB_UNLOCK(stcb);
1778 		SCTP_SOCKET_LOCK(so, 1);
1779 		SCTP_TCB_LOCK(stcb);
1780 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1781 #endif
1782 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1783 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1784 		SCTP_SOCKET_UNLOCK(so, 1);
1785 #endif
1786 		/*
1787 		 * free asoc, always unlocks (or destroy's) so prevent
1788 		 * duplicate unlock or unlock of a free mtx :-0
1789 		 */
1790 		stcb = NULL;
1791 		goto out_no_decr;
1792 	case SCTP_TIMER_TYPE_INPKILL:
1793 		SCTP_STAT_INCR(sctps_timoinpkill);
1794 		if (inp == NULL) {
1795 			break;
1796 		}
1797 		/*
1798 		 * special case, take away our increment since WE are the
1799 		 * killer
1800 		 */
1801 		SCTP_INP_DECR_REF(inp);
1802 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1803 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1804 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1805 		inp = NULL;
1806 		goto out_no_decr;
1807 	default:
1808 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1809 		    tmr->type);
1810 		break;
1811 	}
1812 #ifdef SCTP_AUDITING_ENABLED
1813 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1814 	if (inp)
1815 		sctp_auditing(5, inp, stcb, net);
1816 #endif
1817 	if ((did_output) && stcb) {
1818 		/*
1819 		 * Now we need to clean up the control chunk chain if an
1820 		 * ECNE is on it. It must be marked as UNSENT again so next
1821 		 * call will continue to send it until such time that we get
1822 		 * a CWR, to remove it. It is, however, less likely that we
1823 		 * will find a ecn echo on the chain though.
1824 		 */
1825 		sctp_fix_ecn_echo(&stcb->asoc);
1826 	}
1827 get_out:
1828 	if (stcb) {
1829 		SCTP_TCB_UNLOCK(stcb);
1830 	}
1831 out_decr:
1832 	if (inp) {
1833 		SCTP_INP_DECR_REF(inp);
1834 	}
1835 out_no_decr:
1836 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1837 	    type);
1838 	CURVNET_RESTORE();
1839 }
1840 
1841 void
1842 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1843     struct sctp_nets *net)
1844 {
1845 	uint32_t to_ticks;
1846 	struct sctp_timer *tmr;
1847 
1848 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1849 		return;
1850 
1851 	tmr = NULL;
1852 	if (stcb) {
1853 		SCTP_TCB_LOCK_ASSERT(stcb);
1854 	}
1855 	switch (t_type) {
1856 	case SCTP_TIMER_TYPE_ZERO_COPY:
1857 		tmr = &inp->sctp_ep.zero_copy_timer;
1858 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1859 		break;
1860 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1861 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1862 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1863 		break;
1864 	case SCTP_TIMER_TYPE_ADDR_WQ:
1865 		/* Only 1 tick away :-) */
1866 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1867 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1868 		break;
1869 	case SCTP_TIMER_TYPE_SEND:
1870 		/* Here we use the RTO timer */
1871 		{
1872 			int rto_val;
1873 
1874 			if ((stcb == NULL) || (net == NULL)) {
1875 				return;
1876 			}
1877 			tmr = &net->rxt_timer;
1878 			if (net->RTO == 0) {
1879 				rto_val = stcb->asoc.initial_rto;
1880 			} else {
1881 				rto_val = net->RTO;
1882 			}
1883 			to_ticks = MSEC_TO_TICKS(rto_val);
1884 		}
1885 		break;
1886 	case SCTP_TIMER_TYPE_INIT:
1887 		/*
1888 		 * Here we use the INIT timer default usually about 1
1889 		 * minute.
1890 		 */
1891 		if ((stcb == NULL) || (net == NULL)) {
1892 			return;
1893 		}
1894 		tmr = &net->rxt_timer;
1895 		if (net->RTO == 0) {
1896 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1897 		} else {
1898 			to_ticks = MSEC_TO_TICKS(net->RTO);
1899 		}
1900 		break;
1901 	case SCTP_TIMER_TYPE_RECV:
1902 		/*
1903 		 * Here we use the Delayed-Ack timer value from the inp
1904 		 * ususually about 200ms.
1905 		 */
1906 		if (stcb == NULL) {
1907 			return;
1908 		}
1909 		tmr = &stcb->asoc.dack_timer;
1910 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1911 		break;
1912 	case SCTP_TIMER_TYPE_SHUTDOWN:
1913 		/* Here we use the RTO of the destination. */
1914 		if ((stcb == NULL) || (net == NULL)) {
1915 			return;
1916 		}
1917 		if (net->RTO == 0) {
1918 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1919 		} else {
1920 			to_ticks = MSEC_TO_TICKS(net->RTO);
1921 		}
1922 		tmr = &net->rxt_timer;
1923 		break;
1924 	case SCTP_TIMER_TYPE_HEARTBEAT:
1925 		/*
1926 		 * the net is used here so that we can add in the RTO. Even
1927 		 * though we use a different timer. We also add the HB timer
1928 		 * PLUS a random jitter.
1929 		 */
1930 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1931 			return;
1932 		} else {
1933 			uint32_t rndval;
1934 			uint32_t jitter;
1935 
1936 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1937 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1938 				return;
1939 			}
1940 			if (net->RTO == 0) {
1941 				to_ticks = stcb->asoc.initial_rto;
1942 			} else {
1943 				to_ticks = net->RTO;
1944 			}
1945 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1946 			jitter = rndval % to_ticks;
1947 			if (jitter >= (to_ticks >> 1)) {
1948 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1949 			} else {
1950 				to_ticks = to_ticks - jitter;
1951 			}
1952 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1953 			    !(net->dest_state & SCTP_ADDR_PF)) {
1954 				to_ticks += net->heart_beat_delay;
1955 			}
1956 			/*
1957 			 * Now we must convert the to_ticks that are now in
1958 			 * ms to ticks.
1959 			 */
1960 			to_ticks = MSEC_TO_TICKS(to_ticks);
1961 			tmr = &net->hb_timer;
1962 		}
1963 		break;
1964 	case SCTP_TIMER_TYPE_COOKIE:
1965 		/*
1966 		 * Here we can use the RTO timer from the network since one
1967 		 * RTT was compelete. If a retran happened then we will be
1968 		 * using the RTO initial value.
1969 		 */
1970 		if ((stcb == NULL) || (net == NULL)) {
1971 			return;
1972 		}
1973 		if (net->RTO == 0) {
1974 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1975 		} else {
1976 			to_ticks = MSEC_TO_TICKS(net->RTO);
1977 		}
1978 		tmr = &net->rxt_timer;
1979 		break;
1980 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1981 		/*
1982 		 * nothing needed but the endpoint here ususually about 60
1983 		 * minutes.
1984 		 */
1985 		if (inp == NULL) {
1986 			return;
1987 		}
1988 		tmr = &inp->sctp_ep.signature_change;
1989 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1990 		break;
1991 	case SCTP_TIMER_TYPE_ASOCKILL:
1992 		if (stcb == NULL) {
1993 			return;
1994 		}
1995 		tmr = &stcb->asoc.strreset_timer;
1996 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1997 		break;
1998 	case SCTP_TIMER_TYPE_INPKILL:
1999 		/*
2000 		 * The inp is setup to die. We re-use the signature_chage
2001 		 * timer since that has stopped and we are in the GONE
2002 		 * state.
2003 		 */
2004 		if (inp == NULL) {
2005 			return;
2006 		}
2007 		tmr = &inp->sctp_ep.signature_change;
2008 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2009 		break;
2010 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2011 		/*
2012 		 * Here we use the value found in the EP for PMTU ususually
2013 		 * about 10 minutes.
2014 		 */
2015 		if ((stcb == NULL) || (inp == NULL)) {
2016 			return;
2017 		}
2018 		if (net == NULL) {
2019 			return;
2020 		}
2021 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2022 			return;
2023 		}
2024 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2025 		tmr = &net->pmtu_timer;
2026 		break;
2027 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2028 		/* Here we use the RTO of the destination */
2029 		if ((stcb == NULL) || (net == NULL)) {
2030 			return;
2031 		}
2032 		if (net->RTO == 0) {
2033 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2034 		} else {
2035 			to_ticks = MSEC_TO_TICKS(net->RTO);
2036 		}
2037 		tmr = &net->rxt_timer;
2038 		break;
2039 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2040 		/*
2041 		 * Here we use the endpoints shutdown guard timer usually
2042 		 * about 3 minutes.
2043 		 */
2044 		if ((inp == NULL) || (stcb == NULL)) {
2045 			return;
2046 		}
2047 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2048 		tmr = &stcb->asoc.shut_guard_timer;
2049 		break;
2050 	case SCTP_TIMER_TYPE_STRRESET:
2051 		/*
2052 		 * Here the timer comes from the stcb but its value is from
2053 		 * the net's RTO.
2054 		 */
2055 		if ((stcb == NULL) || (net == NULL)) {
2056 			return;
2057 		}
2058 		if (net->RTO == 0) {
2059 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2060 		} else {
2061 			to_ticks = MSEC_TO_TICKS(net->RTO);
2062 		}
2063 		tmr = &stcb->asoc.strreset_timer;
2064 		break;
2065 	case SCTP_TIMER_TYPE_ASCONF:
2066 		/*
2067 		 * Here the timer comes from the stcb but its value is from
2068 		 * the net's RTO.
2069 		 */
2070 		if ((stcb == NULL) || (net == NULL)) {
2071 			return;
2072 		}
2073 		if (net->RTO == 0) {
2074 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2075 		} else {
2076 			to_ticks = MSEC_TO_TICKS(net->RTO);
2077 		}
2078 		tmr = &stcb->asoc.asconf_timer;
2079 		break;
2080 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2081 		if ((stcb == NULL) || (net != NULL)) {
2082 			return;
2083 		}
2084 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2085 		tmr = &stcb->asoc.delete_prim_timer;
2086 		break;
2087 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2088 		if (stcb == NULL) {
2089 			return;
2090 		}
2091 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2092 			/*
2093 			 * Really an error since stcb is NOT set to
2094 			 * autoclose
2095 			 */
2096 			return;
2097 		}
2098 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2099 		tmr = &stcb->asoc.autoclose_timer;
2100 		break;
2101 	default:
2102 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2103 		    __FUNCTION__, t_type);
2104 		return;
2105 		break;
2106 	}
2107 	if ((to_ticks <= 0) || (tmr == NULL)) {
2108 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2109 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2110 		return;
2111 	}
2112 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2113 		/*
2114 		 * we do NOT allow you to have it already running. if it is
2115 		 * we leave the current one up unchanged
2116 		 */
2117 		return;
2118 	}
2119 	/* At this point we can proceed */
2120 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2121 		stcb->asoc.num_send_timers_up++;
2122 	}
2123 	tmr->stopped_from = 0;
2124 	tmr->type = t_type;
2125 	tmr->ep = (void *)inp;
2126 	tmr->tcb = (void *)stcb;
2127 	tmr->net = (void *)net;
2128 	tmr->self = (void *)tmr;
2129 	tmr->vnet = (void *)curvnet;
2130 	tmr->ticks = sctp_get_tick_count();
2131 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2132 	return;
2133 }
2134 
2135 void
2136 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2137     struct sctp_nets *net, uint32_t from)
2138 {
2139 	struct sctp_timer *tmr;
2140 
2141 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2142 	    (inp == NULL))
2143 		return;
2144 
2145 	tmr = NULL;
2146 	if (stcb) {
2147 		SCTP_TCB_LOCK_ASSERT(stcb);
2148 	}
2149 	switch (t_type) {
2150 	case SCTP_TIMER_TYPE_ZERO_COPY:
2151 		tmr = &inp->sctp_ep.zero_copy_timer;
2152 		break;
2153 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2154 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2155 		break;
2156 	case SCTP_TIMER_TYPE_ADDR_WQ:
2157 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2158 		break;
2159 	case SCTP_TIMER_TYPE_SEND:
2160 		if ((stcb == NULL) || (net == NULL)) {
2161 			return;
2162 		}
2163 		tmr = &net->rxt_timer;
2164 		break;
2165 	case SCTP_TIMER_TYPE_INIT:
2166 		if ((stcb == NULL) || (net == NULL)) {
2167 			return;
2168 		}
2169 		tmr = &net->rxt_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_RECV:
2172 		if (stcb == NULL) {
2173 			return;
2174 		}
2175 		tmr = &stcb->asoc.dack_timer;
2176 		break;
2177 	case SCTP_TIMER_TYPE_SHUTDOWN:
2178 		if ((stcb == NULL) || (net == NULL)) {
2179 			return;
2180 		}
2181 		tmr = &net->rxt_timer;
2182 		break;
2183 	case SCTP_TIMER_TYPE_HEARTBEAT:
2184 		if ((stcb == NULL) || (net == NULL)) {
2185 			return;
2186 		}
2187 		tmr = &net->hb_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_COOKIE:
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		tmr = &net->rxt_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2196 		/* nothing needed but the endpoint here */
2197 		tmr = &inp->sctp_ep.signature_change;
2198 		/*
2199 		 * We re-use the newcookie timer for the INP kill timer. We
2200 		 * must assure that we do not kill it by accident.
2201 		 */
2202 		break;
2203 	case SCTP_TIMER_TYPE_ASOCKILL:
2204 		/*
2205 		 * Stop the asoc kill timer.
2206 		 */
2207 		if (stcb == NULL) {
2208 			return;
2209 		}
2210 		tmr = &stcb->asoc.strreset_timer;
2211 		break;
2212 
2213 	case SCTP_TIMER_TYPE_INPKILL:
2214 		/*
2215 		 * The inp is setup to die. We re-use the signature_chage
2216 		 * timer since that has stopped and we are in the GONE
2217 		 * state.
2218 		 */
2219 		tmr = &inp->sctp_ep.signature_change;
2220 		break;
2221 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2222 		if ((stcb == NULL) || (net == NULL)) {
2223 			return;
2224 		}
2225 		tmr = &net->pmtu_timer;
2226 		break;
2227 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2228 		if ((stcb == NULL) || (net == NULL)) {
2229 			return;
2230 		}
2231 		tmr = &net->rxt_timer;
2232 		break;
2233 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2234 		if (stcb == NULL) {
2235 			return;
2236 		}
2237 		tmr = &stcb->asoc.shut_guard_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_STRRESET:
2240 		if (stcb == NULL) {
2241 			return;
2242 		}
2243 		tmr = &stcb->asoc.strreset_timer;
2244 		break;
2245 	case SCTP_TIMER_TYPE_ASCONF:
2246 		if (stcb == NULL) {
2247 			return;
2248 		}
2249 		tmr = &stcb->asoc.asconf_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2252 		if (stcb == NULL) {
2253 			return;
2254 		}
2255 		tmr = &stcb->asoc.delete_prim_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2258 		if (stcb == NULL) {
2259 			return;
2260 		}
2261 		tmr = &stcb->asoc.autoclose_timer;
2262 		break;
2263 	default:
2264 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2265 		    __FUNCTION__, t_type);
2266 		break;
2267 	}
2268 	if (tmr == NULL) {
2269 		return;
2270 	}
2271 	if ((tmr->type != t_type) && tmr->type) {
2272 		/*
2273 		 * Ok we have a timer that is under joint use. Cookie timer
2274 		 * per chance with the SEND timer. We therefore are NOT
2275 		 * running the timer that the caller wants stopped.  So just
2276 		 * return.
2277 		 */
2278 		return;
2279 	}
2280 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2281 		stcb->asoc.num_send_timers_up--;
2282 		if (stcb->asoc.num_send_timers_up < 0) {
2283 			stcb->asoc.num_send_timers_up = 0;
2284 		}
2285 	}
2286 	tmr->self = NULL;
2287 	tmr->stopped_from = from;
2288 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2289 	return;
2290 }
2291 
2292 uint32_t
2293 sctp_calculate_len(struct mbuf *m)
2294 {
2295 	uint32_t tlen = 0;
2296 	struct mbuf *at;
2297 
2298 	at = m;
2299 	while (at) {
2300 		tlen += SCTP_BUF_LEN(at);
2301 		at = SCTP_BUF_NEXT(at);
2302 	}
2303 	return (tlen);
2304 }
2305 
2306 void
2307 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2308     struct sctp_association *asoc, uint32_t mtu)
2309 {
2310 	/*
2311 	 * Reset the P-MTU size on this association, this involves changing
2312 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2313 	 * allow the DF flag to be cleared.
2314 	 */
2315 	struct sctp_tmit_chunk *chk;
2316 	unsigned int eff_mtu, ovh;
2317 
2318 	asoc->smallest_mtu = mtu;
2319 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2320 		ovh = SCTP_MIN_OVERHEAD;
2321 	} else {
2322 		ovh = SCTP_MIN_V4_OVERHEAD;
2323 	}
2324 	eff_mtu = mtu - ovh;
2325 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2326 		if (chk->send_size > eff_mtu) {
2327 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2328 		}
2329 	}
2330 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2331 		if (chk->send_size > eff_mtu) {
2332 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2333 		}
2334 	}
2335 }
2336 
2337 
2338 /*
2339  * given an association and starting time of the current RTT period return
2340  * RTO in number of msecs net should point to the current network
2341  */
2342 
2343 uint32_t
2344 sctp_calculate_rto(struct sctp_tcb *stcb,
2345     struct sctp_association *asoc,
2346     struct sctp_nets *net,
2347     struct timeval *told,
2348     int safe, int rtt_from_sack)
2349 {
2350 	/*-
2351 	 * given an association and the starting time of the current RTT
2352 	 * period (in value1/value2) return RTO in number of msecs.
2353 	 */
2354 	int32_t rtt;		/* RTT in ms */
2355 	uint32_t new_rto;
2356 	int first_measure = 0;
2357 	struct timeval now, then, *old;
2358 
2359 	/* Copy it out for sparc64 */
2360 	if (safe == sctp_align_unsafe_makecopy) {
2361 		old = &then;
2362 		memcpy(&then, told, sizeof(struct timeval));
2363 	} else if (safe == sctp_align_safe_nocopy) {
2364 		old = told;
2365 	} else {
2366 		/* error */
2367 		SCTP_PRINTF("Huh, bad rto calc call\n");
2368 		return (0);
2369 	}
2370 	/************************/
2371 	/* 1. calculate new RTT */
2372 	/************************/
2373 	/* get the current time */
2374 	if (stcb->asoc.use_precise_time) {
2375 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2376 	} else {
2377 		(void)SCTP_GETTIME_TIMEVAL(&now);
2378 	}
2379 	timevalsub(&now, old);
2380 	/* store the current RTT in us */
2381 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2382 	        (uint64_t) now.tv_usec;
2383 
2384 	/* computer rtt in ms */
2385 	rtt = net->rtt / 1000;
2386 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2387 		/*
2388 		 * Tell the CC module that a new update has just occurred
2389 		 * from a sack
2390 		 */
2391 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2392 	}
2393 	/*
2394 	 * Do we need to determine the lan? We do this only on sacks i.e.
2395 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2396 	 */
2397 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2398 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2399 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2400 			net->lan_type = SCTP_LAN_INTERNET;
2401 		} else {
2402 			net->lan_type = SCTP_LAN_LOCAL;
2403 		}
2404 	}
2405 	/***************************/
2406 	/* 2. update RTTVAR & SRTT */
2407 	/***************************/
2408 	/*-
2409 	 * Compute the scaled average lastsa and the
2410 	 * scaled variance lastsv as described in van Jacobson
2411 	 * Paper "Congestion Avoidance and Control", Annex A.
2412 	 *
2413 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2414 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2415 	 */
2416 	if (net->RTO_measured) {
2417 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2418 		net->lastsa += rtt;
2419 		if (rtt < 0) {
2420 			rtt = -rtt;
2421 		}
2422 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2423 		net->lastsv += rtt;
2424 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2425 			rto_logging(net, SCTP_LOG_RTTVAR);
2426 		}
2427 	} else {
2428 		/* First RTO measurment */
2429 		net->RTO_measured = 1;
2430 		first_measure = 1;
2431 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2432 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2434 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2435 		}
2436 	}
2437 	if (net->lastsv == 0) {
2438 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2439 	}
2440 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2441 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2442 	    (stcb->asoc.sat_network_lockout == 0)) {
2443 		stcb->asoc.sat_network = 1;
2444 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2445 		stcb->asoc.sat_network = 0;
2446 		stcb->asoc.sat_network_lockout = 1;
2447 	}
2448 	/* bound it, per C6/C7 in Section 5.3.1 */
2449 	if (new_rto < stcb->asoc.minrto) {
2450 		new_rto = stcb->asoc.minrto;
2451 	}
2452 	if (new_rto > stcb->asoc.maxrto) {
2453 		new_rto = stcb->asoc.maxrto;
2454 	}
2455 	/* we are now returning the RTO */
2456 	return (new_rto);
2457 }
2458 
2459 /*
2460  * return a pointer to a contiguous piece of data from the given mbuf chain
2461  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2462  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2463  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2464  */
2465 caddr_t
2466 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2467 {
2468 	uint32_t count;
2469 	uint8_t *ptr;
2470 
2471 	ptr = in_ptr;
2472 	if ((off < 0) || (len <= 0))
2473 		return (NULL);
2474 
2475 	/* find the desired start location */
2476 	while ((m != NULL) && (off > 0)) {
2477 		if (off < SCTP_BUF_LEN(m))
2478 			break;
2479 		off -= SCTP_BUF_LEN(m);
2480 		m = SCTP_BUF_NEXT(m);
2481 	}
2482 	if (m == NULL)
2483 		return (NULL);
2484 
2485 	/* is the current mbuf large enough (eg. contiguous)? */
2486 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2487 		return (mtod(m, caddr_t)+off);
2488 	} else {
2489 		/* else, it spans more than one mbuf, so save a temp copy... */
2490 		while ((m != NULL) && (len > 0)) {
2491 			count = min(SCTP_BUF_LEN(m) - off, len);
2492 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2493 			len -= count;
2494 			ptr += count;
2495 			off = 0;
2496 			m = SCTP_BUF_NEXT(m);
2497 		}
2498 		if ((m == NULL) && (len > 0))
2499 			return (NULL);
2500 		else
2501 			return ((caddr_t)in_ptr);
2502 	}
2503 }
2504 
2505 
2506 
2507 struct sctp_paramhdr *
2508 sctp_get_next_param(struct mbuf *m,
2509     int offset,
2510     struct sctp_paramhdr *pull,
2511     int pull_limit)
2512 {
2513 	/* This just provides a typed signature to Peter's Pull routine */
2514 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2515 	    (uint8_t *) pull));
2516 }
2517 
2518 
2519 int
2520 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2521 {
2522 	/*
2523 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2524 	 * padlen is > 3 this routine will fail.
2525 	 */
2526 	uint8_t *dp;
2527 	int i;
2528 
2529 	if (padlen > 3) {
2530 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2531 		return (ENOBUFS);
2532 	}
2533 	if (padlen <= M_TRAILINGSPACE(m)) {
2534 		/*
2535 		 * The easy way. We hope the majority of the time we hit
2536 		 * here :)
2537 		 */
2538 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2539 		SCTP_BUF_LEN(m) += padlen;
2540 	} else {
2541 		/* Hard way we must grow the mbuf */
2542 		struct mbuf *tmp;
2543 
2544 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2545 		if (tmp == NULL) {
2546 			/* Out of space GAK! we are in big trouble. */
2547 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2548 			return (ENOBUFS);
2549 		}
2550 		/* setup and insert in middle */
2551 		SCTP_BUF_LEN(tmp) = padlen;
2552 		SCTP_BUF_NEXT(tmp) = NULL;
2553 		SCTP_BUF_NEXT(m) = tmp;
2554 		dp = mtod(tmp, uint8_t *);
2555 	}
2556 	/* zero out the pad */
2557 	for (i = 0; i < padlen; i++) {
2558 		*dp = 0;
2559 		dp++;
2560 	}
2561 	return (0);
2562 }
2563 
2564 int
2565 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2566 {
2567 	/* find the last mbuf in chain and pad it */
2568 	struct mbuf *m_at;
2569 
2570 	if (last_mbuf) {
2571 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2572 	} else {
2573 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2574 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2575 				return (sctp_add_pad_tombuf(m_at, padval));
2576 			}
2577 		}
2578 	}
2579 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2580 	return (EFAULT);
2581 }
2582 
2583 static void
2584 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2585     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2586 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2587     SCTP_UNUSED
2588 #endif
2589 )
2590 {
2591 	struct mbuf *m_notify;
2592 	struct sctp_assoc_change *sac;
2593 	struct sctp_queued_to_read *control;
2594 	size_t notif_len, abort_len;
2595 	unsigned int i;
2596 
2597 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2598 	struct socket *so;
2599 
2600 #endif
2601 
2602 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2603 		notif_len = sizeof(struct sctp_assoc_change);
2604 		if (abort != NULL) {
2605 			abort_len = ntohs(abort->ch.chunk_length);
2606 		} else {
2607 			abort_len = 0;
2608 		}
2609 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2610 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2611 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2612 			notif_len += abort_len;
2613 		}
2614 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2615 		if (m_notify == NULL) {
2616 			/* Retry with smaller value. */
2617 			notif_len = sizeof(struct sctp_assoc_change);
2618 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2619 			if (m_notify == NULL) {
2620 				goto set_error;
2621 			}
2622 		}
2623 		SCTP_BUF_NEXT(m_notify) = NULL;
2624 		sac = mtod(m_notify, struct sctp_assoc_change *);
2625 		memset(sac, 0, notif_len);
2626 		sac->sac_type = SCTP_ASSOC_CHANGE;
2627 		sac->sac_flags = 0;
2628 		sac->sac_length = sizeof(struct sctp_assoc_change);
2629 		sac->sac_state = state;
2630 		sac->sac_error = error;
2631 		/* XXX verify these stream counts */
2632 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2633 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2634 		sac->sac_assoc_id = sctp_get_associd(stcb);
2635 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2636 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2637 				i = 0;
2638 				if (stcb->asoc.peer_supports_prsctp) {
2639 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2640 				}
2641 				if (stcb->asoc.peer_supports_auth) {
2642 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2643 				}
2644 				if (stcb->asoc.peer_supports_asconf) {
2645 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2646 				}
2647 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2648 				if (stcb->asoc.peer_supports_strreset) {
2649 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2650 				}
2651 				sac->sac_length += i;
2652 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2653 				memcpy(sac->sac_info, abort, abort_len);
2654 				sac->sac_length += abort_len;
2655 			}
2656 		}
2657 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2658 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2659 		    0, 0, stcb->asoc.context, 0, 0, 0,
2660 		    m_notify);
2661 		if (control != NULL) {
2662 			control->length = SCTP_BUF_LEN(m_notify);
2663 			/* not that we need this */
2664 			control->tail_mbuf = m_notify;
2665 			control->spec_flags = M_NOTIFICATION;
2666 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2667 			    control,
2668 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2669 			    so_locked);
2670 		} else {
2671 			sctp_m_freem(m_notify);
2672 		}
2673 	}
2674 	/*
2675 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2676 	 * comes in.
2677 	 */
2678 set_error:
2679 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2680 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2681 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2682 		SOCK_LOCK(stcb->sctp_socket);
2683 		if (from_peer) {
2684 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2685 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2686 				stcb->sctp_socket->so_error = ECONNREFUSED;
2687 			} else {
2688 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2689 				stcb->sctp_socket->so_error = ECONNRESET;
2690 			}
2691 		} else {
2692 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2693 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2694 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2695 				stcb->sctp_socket->so_error = ETIMEDOUT;
2696 			} else {
2697 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2698 				stcb->sctp_socket->so_error = ECONNABORTED;
2699 			}
2700 		}
2701 	}
2702 	/* Wake ANY sleepers */
2703 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2704 	so = SCTP_INP_SO(stcb->sctp_ep);
2705 	if (!so_locked) {
2706 		atomic_add_int(&stcb->asoc.refcnt, 1);
2707 		SCTP_TCB_UNLOCK(stcb);
2708 		SCTP_SOCKET_LOCK(so, 1);
2709 		SCTP_TCB_LOCK(stcb);
2710 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2711 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2712 			SCTP_SOCKET_UNLOCK(so, 1);
2713 			return;
2714 		}
2715 	}
2716 #endif
2717 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2718 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2719 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2720 		socantrcvmore_locked(stcb->sctp_socket);
2721 	}
2722 	sorwakeup(stcb->sctp_socket);
2723 	sowwakeup(stcb->sctp_socket);
2724 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2725 	if (!so_locked) {
2726 		SCTP_SOCKET_UNLOCK(so, 1);
2727 	}
2728 #endif
2729 }
2730 
2731 static void
2732 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2733     struct sockaddr *sa, uint32_t error)
2734 {
2735 	struct mbuf *m_notify;
2736 	struct sctp_paddr_change *spc;
2737 	struct sctp_queued_to_read *control;
2738 
2739 	if ((stcb == NULL) ||
2740 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2741 		/* event not enabled */
2742 		return;
2743 	}
2744 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2745 	if (m_notify == NULL)
2746 		return;
2747 	SCTP_BUF_LEN(m_notify) = 0;
2748 	spc = mtod(m_notify, struct sctp_paddr_change *);
2749 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2750 	spc->spc_flags = 0;
2751 	spc->spc_length = sizeof(struct sctp_paddr_change);
2752 	switch (sa->sa_family) {
2753 #ifdef INET
2754 	case AF_INET:
2755 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2756 		break;
2757 #endif
2758 #ifdef INET6
2759 	case AF_INET6:
2760 		{
2761 			struct sockaddr_in6 *sin6;
2762 
2763 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2764 
2765 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2766 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2767 				if (sin6->sin6_scope_id == 0) {
2768 					/* recover scope_id for user */
2769 					(void)sa6_recoverscope(sin6);
2770 				} else {
2771 					/* clear embedded scope_id for user */
2772 					in6_clearscope(&sin6->sin6_addr);
2773 				}
2774 			}
2775 			break;
2776 		}
2777 #endif
2778 	default:
2779 		/* TSNH */
2780 		break;
2781 	}
2782 	spc->spc_state = state;
2783 	spc->spc_error = error;
2784 	spc->spc_assoc_id = sctp_get_associd(stcb);
2785 
2786 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2787 	SCTP_BUF_NEXT(m_notify) = NULL;
2788 
2789 	/* append to socket */
2790 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2791 	    0, 0, stcb->asoc.context, 0, 0, 0,
2792 	    m_notify);
2793 	if (control == NULL) {
2794 		/* no memory */
2795 		sctp_m_freem(m_notify);
2796 		return;
2797 	}
2798 	control->length = SCTP_BUF_LEN(m_notify);
2799 	control->spec_flags = M_NOTIFICATION;
2800 	/* not that we need this */
2801 	control->tail_mbuf = m_notify;
2802 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2803 	    control,
2804 	    &stcb->sctp_socket->so_rcv, 1,
2805 	    SCTP_READ_LOCK_NOT_HELD,
2806 	    SCTP_SO_NOT_LOCKED);
2807 }
2808 
2809 
2810 static void
2811 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2812     struct sctp_tmit_chunk *chk, int so_locked
2813 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2814     SCTP_UNUSED
2815 #endif
2816 )
2817 {
2818 	struct mbuf *m_notify;
2819 	struct sctp_send_failed *ssf;
2820 	struct sctp_send_failed_event *ssfe;
2821 	struct sctp_queued_to_read *control;
2822 	int length;
2823 
2824 	if ((stcb == NULL) ||
2825 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2826 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2827 		/* event not enabled */
2828 		return;
2829 	}
2830 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2831 		length = sizeof(struct sctp_send_failed_event);
2832 	} else {
2833 		length = sizeof(struct sctp_send_failed);
2834 	}
2835 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2836 	if (m_notify == NULL)
2837 		/* no space left */
2838 		return;
2839 	SCTP_BUF_LEN(m_notify) = 0;
2840 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2841 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2842 		memset(ssfe, 0, length);
2843 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2844 		if (sent) {
2845 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2846 		} else {
2847 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2848 		}
2849 		length += chk->send_size;
2850 		length -= sizeof(struct sctp_data_chunk);
2851 		ssfe->ssfe_length = length;
2852 		ssfe->ssfe_error = error;
2853 		/* not exactly what the user sent in, but should be close :) */
2854 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2855 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2856 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2857 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2858 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2859 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2860 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2861 	} else {
2862 		ssf = mtod(m_notify, struct sctp_send_failed *);
2863 		memset(ssf, 0, length);
2864 		ssf->ssf_type = SCTP_SEND_FAILED;
2865 		if (sent) {
2866 			ssf->ssf_flags = SCTP_DATA_SENT;
2867 		} else {
2868 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2869 		}
2870 		length += chk->send_size;
2871 		length -= sizeof(struct sctp_data_chunk);
2872 		ssf->ssf_length = length;
2873 		ssf->ssf_error = error;
2874 		/* not exactly what the user sent in, but should be close :) */
2875 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2876 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2877 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2878 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2879 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2880 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2881 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2882 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2883 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2884 	}
2885 	if (chk->data) {
2886 		/*
2887 		 * trim off the sctp chunk header(it should be there)
2888 		 */
2889 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2890 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2891 			sctp_mbuf_crush(chk->data);
2892 			chk->send_size -= sizeof(struct sctp_data_chunk);
2893 		}
2894 	}
2895 	SCTP_BUF_NEXT(m_notify) = chk->data;
2896 	/* Steal off the mbuf */
2897 	chk->data = NULL;
2898 	/*
2899 	 * For this case, we check the actual socket buffer, since the assoc
2900 	 * is going away we don't want to overfill the socket buffer for a
2901 	 * non-reader
2902 	 */
2903 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2904 		sctp_m_freem(m_notify);
2905 		return;
2906 	}
2907 	/* append to socket */
2908 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2909 	    0, 0, stcb->asoc.context, 0, 0, 0,
2910 	    m_notify);
2911 	if (control == NULL) {
2912 		/* no memory */
2913 		sctp_m_freem(m_notify);
2914 		return;
2915 	}
2916 	control->spec_flags = M_NOTIFICATION;
2917 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2918 	    control,
2919 	    &stcb->sctp_socket->so_rcv, 1,
2920 	    SCTP_READ_LOCK_NOT_HELD,
2921 	    so_locked);
2922 }
2923 
2924 
2925 static void
2926 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2927     struct sctp_stream_queue_pending *sp, int so_locked
2928 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2929     SCTP_UNUSED
2930 #endif
2931 )
2932 {
2933 	struct mbuf *m_notify;
2934 	struct sctp_send_failed *ssf;
2935 	struct sctp_send_failed_event *ssfe;
2936 	struct sctp_queued_to_read *control;
2937 	int length;
2938 
2939 	if ((stcb == NULL) ||
2940 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2941 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2942 		/* event not enabled */
2943 		return;
2944 	}
2945 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2946 		length = sizeof(struct sctp_send_failed_event);
2947 	} else {
2948 		length = sizeof(struct sctp_send_failed);
2949 	}
2950 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2951 	if (m_notify == NULL) {
2952 		/* no space left */
2953 		return;
2954 	}
2955 	SCTP_BUF_LEN(m_notify) = 0;
2956 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2957 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2958 		memset(ssfe, 0, length);
2959 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2960 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2961 		length += sp->length;
2962 		ssfe->ssfe_length = length;
2963 		ssfe->ssfe_error = error;
2964 		/* not exactly what the user sent in, but should be close :) */
2965 		ssfe->ssfe_info.snd_sid = sp->stream;
2966 		if (sp->some_taken) {
2967 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2968 		} else {
2969 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2970 		}
2971 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2972 		ssfe->ssfe_info.snd_context = sp->context;
2973 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2974 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2975 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2976 	} else {
2977 		ssf = mtod(m_notify, struct sctp_send_failed *);
2978 		memset(ssf, 0, length);
2979 		ssf->ssf_type = SCTP_SEND_FAILED;
2980 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2981 		length += sp->length;
2982 		ssf->ssf_length = length;
2983 		ssf->ssf_error = error;
2984 		/* not exactly what the user sent in, but should be close :) */
2985 		ssf->ssf_info.sinfo_stream = sp->stream;
2986 		ssf->ssf_info.sinfo_ssn = 0;
2987 		if (sp->some_taken) {
2988 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2989 		} else {
2990 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2991 		}
2992 		ssf->ssf_info.sinfo_ppid = sp->ppid;
2993 		ssf->ssf_info.sinfo_context = sp->context;
2994 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2995 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2996 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2997 	}
2998 	SCTP_BUF_NEXT(m_notify) = sp->data;
2999 
3000 	/* Steal off the mbuf */
3001 	sp->data = NULL;
3002 	/*
3003 	 * For this case, we check the actual socket buffer, since the assoc
3004 	 * is going away we don't want to overfill the socket buffer for a
3005 	 * non-reader
3006 	 */
3007 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3008 		sctp_m_freem(m_notify);
3009 		return;
3010 	}
3011 	/* append to socket */
3012 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3013 	    0, 0, stcb->asoc.context, 0, 0, 0,
3014 	    m_notify);
3015 	if (control == NULL) {
3016 		/* no memory */
3017 		sctp_m_freem(m_notify);
3018 		return;
3019 	}
3020 	control->spec_flags = M_NOTIFICATION;
3021 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3022 	    control,
3023 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3024 }
3025 
3026 
3027 
3028 static void
3029 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3030 {
3031 	struct mbuf *m_notify;
3032 	struct sctp_adaptation_event *sai;
3033 	struct sctp_queued_to_read *control;
3034 
3035 	if ((stcb == NULL) ||
3036 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3037 		/* event not enabled */
3038 		return;
3039 	}
3040 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3041 	if (m_notify == NULL)
3042 		/* no space left */
3043 		return;
3044 	SCTP_BUF_LEN(m_notify) = 0;
3045 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3046 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3047 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3048 	sai->sai_flags = 0;
3049 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3050 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3051 	sai->sai_assoc_id = sctp_get_associd(stcb);
3052 
3053 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3054 	SCTP_BUF_NEXT(m_notify) = NULL;
3055 
3056 	/* append to socket */
3057 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3058 	    0, 0, stcb->asoc.context, 0, 0, 0,
3059 	    m_notify);
3060 	if (control == NULL) {
3061 		/* no memory */
3062 		sctp_m_freem(m_notify);
3063 		return;
3064 	}
3065 	control->length = SCTP_BUF_LEN(m_notify);
3066 	control->spec_flags = M_NOTIFICATION;
3067 	/* not that we need this */
3068 	control->tail_mbuf = m_notify;
3069 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3070 	    control,
3071 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3072 }
3073 
3074 /* This always must be called with the read-queue LOCKED in the INP */
3075 static void
3076 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3077     uint32_t val, int so_locked
3078 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3079     SCTP_UNUSED
3080 #endif
3081 )
3082 {
3083 	struct mbuf *m_notify;
3084 	struct sctp_pdapi_event *pdapi;
3085 	struct sctp_queued_to_read *control;
3086 	struct sockbuf *sb;
3087 
3088 	if ((stcb == NULL) ||
3089 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3090 		/* event not enabled */
3091 		return;
3092 	}
3093 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3094 		return;
3095 	}
3096 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3097 	if (m_notify == NULL)
3098 		/* no space left */
3099 		return;
3100 	SCTP_BUF_LEN(m_notify) = 0;
3101 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3102 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3103 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3104 	pdapi->pdapi_flags = 0;
3105 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3106 	pdapi->pdapi_indication = error;
3107 	pdapi->pdapi_stream = (val >> 16);
3108 	pdapi->pdapi_seq = (val & 0x0000ffff);
3109 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3110 
3111 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3112 	SCTP_BUF_NEXT(m_notify) = NULL;
3113 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3114 	    0, 0, stcb->asoc.context, 0, 0, 0,
3115 	    m_notify);
3116 	if (control == NULL) {
3117 		/* no memory */
3118 		sctp_m_freem(m_notify);
3119 		return;
3120 	}
3121 	control->spec_flags = M_NOTIFICATION;
3122 	control->length = SCTP_BUF_LEN(m_notify);
3123 	/* not that we need this */
3124 	control->tail_mbuf = m_notify;
3125 	control->held_length = 0;
3126 	control->length = 0;
3127 	sb = &stcb->sctp_socket->so_rcv;
3128 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3129 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3130 	}
3131 	sctp_sballoc(stcb, sb, m_notify);
3132 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3133 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3134 	}
3135 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3136 	control->end_added = 1;
3137 	if (stcb->asoc.control_pdapi)
3138 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3139 	else {
3140 		/* we really should not see this case */
3141 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3142 	}
3143 	if (stcb->sctp_ep && stcb->sctp_socket) {
3144 		/* This should always be the case */
3145 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3146 		struct socket *so;
3147 
3148 		so = SCTP_INP_SO(stcb->sctp_ep);
3149 		if (!so_locked) {
3150 			atomic_add_int(&stcb->asoc.refcnt, 1);
3151 			SCTP_TCB_UNLOCK(stcb);
3152 			SCTP_SOCKET_LOCK(so, 1);
3153 			SCTP_TCB_LOCK(stcb);
3154 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3155 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3156 				SCTP_SOCKET_UNLOCK(so, 1);
3157 				return;
3158 			}
3159 		}
3160 #endif
3161 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3162 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3163 		if (!so_locked) {
3164 			SCTP_SOCKET_UNLOCK(so, 1);
3165 		}
3166 #endif
3167 	}
3168 }
3169 
3170 static void
3171 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3172 {
3173 	struct mbuf *m_notify;
3174 	struct sctp_shutdown_event *sse;
3175 	struct sctp_queued_to_read *control;
3176 
3177 	/*
3178 	 * For TCP model AND UDP connected sockets we will send an error up
3179 	 * when an SHUTDOWN completes
3180 	 */
3181 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3182 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3183 		/* mark socket closed for read/write and wakeup! */
3184 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3185 		struct socket *so;
3186 
3187 		so = SCTP_INP_SO(stcb->sctp_ep);
3188 		atomic_add_int(&stcb->asoc.refcnt, 1);
3189 		SCTP_TCB_UNLOCK(stcb);
3190 		SCTP_SOCKET_LOCK(so, 1);
3191 		SCTP_TCB_LOCK(stcb);
3192 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3193 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3194 			SCTP_SOCKET_UNLOCK(so, 1);
3195 			return;
3196 		}
3197 #endif
3198 		socantsendmore(stcb->sctp_socket);
3199 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3200 		SCTP_SOCKET_UNLOCK(so, 1);
3201 #endif
3202 	}
3203 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3204 		/* event not enabled */
3205 		return;
3206 	}
3207 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3208 	if (m_notify == NULL)
3209 		/* no space left */
3210 		return;
3211 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3212 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3213 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3214 	sse->sse_flags = 0;
3215 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3216 	sse->sse_assoc_id = sctp_get_associd(stcb);
3217 
3218 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3219 	SCTP_BUF_NEXT(m_notify) = NULL;
3220 
3221 	/* append to socket */
3222 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3223 	    0, 0, stcb->asoc.context, 0, 0, 0,
3224 	    m_notify);
3225 	if (control == NULL) {
3226 		/* no memory */
3227 		sctp_m_freem(m_notify);
3228 		return;
3229 	}
3230 	control->spec_flags = M_NOTIFICATION;
3231 	control->length = SCTP_BUF_LEN(m_notify);
3232 	/* not that we need this */
3233 	control->tail_mbuf = m_notify;
3234 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3235 	    control,
3236 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3237 }
3238 
3239 static void
3240 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3241     int so_locked
3242 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3243     SCTP_UNUSED
3244 #endif
3245 )
3246 {
3247 	struct mbuf *m_notify;
3248 	struct sctp_sender_dry_event *event;
3249 	struct sctp_queued_to_read *control;
3250 
3251 	if ((stcb == NULL) ||
3252 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3253 		/* event not enabled */
3254 		return;
3255 	}
3256 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3257 	if (m_notify == NULL) {
3258 		/* no space left */
3259 		return;
3260 	}
3261 	SCTP_BUF_LEN(m_notify) = 0;
3262 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3263 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3264 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3265 	event->sender_dry_flags = 0;
3266 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3267 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3268 
3269 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3270 	SCTP_BUF_NEXT(m_notify) = NULL;
3271 
3272 	/* append to socket */
3273 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3274 	    0, 0, stcb->asoc.context, 0, 0, 0,
3275 	    m_notify);
3276 	if (control == NULL) {
3277 		/* no memory */
3278 		sctp_m_freem(m_notify);
3279 		return;
3280 	}
3281 	control->length = SCTP_BUF_LEN(m_notify);
3282 	control->spec_flags = M_NOTIFICATION;
3283 	/* not that we need this */
3284 	control->tail_mbuf = m_notify;
3285 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3286 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3287 }
3288 
3289 
3290 void
3291 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3292 {
3293 	struct mbuf *m_notify;
3294 	struct sctp_queued_to_read *control;
3295 	struct sctp_stream_change_event *stradd;
3296 
3297 	if ((stcb == NULL) ||
3298 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3299 		/* event not enabled */
3300 		return;
3301 	}
3302 	if ((stcb->asoc.peer_req_out) && flag) {
3303 		/* Peer made the request, don't tell the local user */
3304 		stcb->asoc.peer_req_out = 0;
3305 		return;
3306 	}
3307 	stcb->asoc.peer_req_out = 0;
3308 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3309 	if (m_notify == NULL)
3310 		/* no space left */
3311 		return;
3312 	SCTP_BUF_LEN(m_notify) = 0;
3313 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3314 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3315 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3316 	stradd->strchange_flags = flag;
3317 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3318 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3319 	stradd->strchange_instrms = numberin;
3320 	stradd->strchange_outstrms = numberout;
3321 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3322 	SCTP_BUF_NEXT(m_notify) = NULL;
3323 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3324 		/* no space */
3325 		sctp_m_freem(m_notify);
3326 		return;
3327 	}
3328 	/* append to socket */
3329 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3330 	    0, 0, stcb->asoc.context, 0, 0, 0,
3331 	    m_notify);
3332 	if (control == NULL) {
3333 		/* no memory */
3334 		sctp_m_freem(m_notify);
3335 		return;
3336 	}
3337 	control->spec_flags = M_NOTIFICATION;
3338 	control->length = SCTP_BUF_LEN(m_notify);
3339 	/* not that we need this */
3340 	control->tail_mbuf = m_notify;
3341 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3342 	    control,
3343 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3344 }
3345 
3346 void
3347 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3348 {
3349 	struct mbuf *m_notify;
3350 	struct sctp_queued_to_read *control;
3351 	struct sctp_assoc_reset_event *strasoc;
3352 
3353 	if ((stcb == NULL) ||
3354 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3355 		/* event not enabled */
3356 		return;
3357 	}
3358 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3359 	if (m_notify == NULL)
3360 		/* no space left */
3361 		return;
3362 	SCTP_BUF_LEN(m_notify) = 0;
3363 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3364 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3365 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3366 	strasoc->assocreset_flags = flag;
3367 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3368 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3369 	strasoc->assocreset_local_tsn = sending_tsn;
3370 	strasoc->assocreset_remote_tsn = recv_tsn;
3371 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3372 	SCTP_BUF_NEXT(m_notify) = NULL;
3373 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3374 		/* no space */
3375 		sctp_m_freem(m_notify);
3376 		return;
3377 	}
3378 	/* append to socket */
3379 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3380 	    0, 0, stcb->asoc.context, 0, 0, 0,
3381 	    m_notify);
3382 	if (control == NULL) {
3383 		/* no memory */
3384 		sctp_m_freem(m_notify);
3385 		return;
3386 	}
3387 	control->spec_flags = M_NOTIFICATION;
3388 	control->length = SCTP_BUF_LEN(m_notify);
3389 	/* not that we need this */
3390 	control->tail_mbuf = m_notify;
3391 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3392 	    control,
3393 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3394 }
3395 
3396 
3397 
3398 static void
3399 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3400     int number_entries, uint16_t * list, int flag)
3401 {
3402 	struct mbuf *m_notify;
3403 	struct sctp_queued_to_read *control;
3404 	struct sctp_stream_reset_event *strreset;
3405 	int len;
3406 
3407 	if ((stcb == NULL) ||
3408 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3409 		/* event not enabled */
3410 		return;
3411 	}
3412 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3413 	if (m_notify == NULL)
3414 		/* no space left */
3415 		return;
3416 	SCTP_BUF_LEN(m_notify) = 0;
3417 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3418 	if (len > M_TRAILINGSPACE(m_notify)) {
3419 		/* never enough room */
3420 		sctp_m_freem(m_notify);
3421 		return;
3422 	}
3423 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3424 	memset(strreset, 0, len);
3425 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3426 	strreset->strreset_flags = flag;
3427 	strreset->strreset_length = len;
3428 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3429 	if (number_entries) {
3430 		int i;
3431 
3432 		for (i = 0; i < number_entries; i++) {
3433 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3434 		}
3435 	}
3436 	SCTP_BUF_LEN(m_notify) = len;
3437 	SCTP_BUF_NEXT(m_notify) = NULL;
3438 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3439 		/* no space */
3440 		sctp_m_freem(m_notify);
3441 		return;
3442 	}
3443 	/* append to socket */
3444 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3445 	    0, 0, stcb->asoc.context, 0, 0, 0,
3446 	    m_notify);
3447 	if (control == NULL) {
3448 		/* no memory */
3449 		sctp_m_freem(m_notify);
3450 		return;
3451 	}
3452 	control->spec_flags = M_NOTIFICATION;
3453 	control->length = SCTP_BUF_LEN(m_notify);
3454 	/* not that we need this */
3455 	control->tail_mbuf = m_notify;
3456 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3457 	    control,
3458 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3459 }
3460 
3461 
3462 static void
3463 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3464 {
3465 	struct mbuf *m_notify;
3466 	struct sctp_remote_error *sre;
3467 	struct sctp_queued_to_read *control;
3468 	size_t notif_len, chunk_len;
3469 
3470 	if ((stcb == NULL) ||
3471 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3472 		return;
3473 	}
3474 	if (chunk != NULL) {
3475 		chunk_len = ntohs(chunk->ch.chunk_length);
3476 	} else {
3477 		chunk_len = 0;
3478 	}
3479 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3480 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3481 	if (m_notify == NULL) {
3482 		/* Retry with smaller value. */
3483 		notif_len = sizeof(struct sctp_remote_error);
3484 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3485 		if (m_notify == NULL) {
3486 			return;
3487 		}
3488 	}
3489 	SCTP_BUF_NEXT(m_notify) = NULL;
3490 	sre = mtod(m_notify, struct sctp_remote_error *);
3491 	sre->sre_type = SCTP_REMOTE_ERROR;
3492 	sre->sre_flags = 0;
3493 	sre->sre_length = sizeof(struct sctp_remote_error);
3494 	sre->sre_error = error;
3495 	sre->sre_assoc_id = sctp_get_associd(stcb);
3496 	if (notif_len > sizeof(struct sctp_remote_error)) {
3497 		memcpy(sre->sre_data, chunk, chunk_len);
3498 		sre->sre_length += chunk_len;
3499 	}
3500 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3501 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3502 	    0, 0, stcb->asoc.context, 0, 0, 0,
3503 	    m_notify);
3504 	if (control != NULL) {
3505 		control->length = SCTP_BUF_LEN(m_notify);
3506 		/* not that we need this */
3507 		control->tail_mbuf = m_notify;
3508 		control->spec_flags = M_NOTIFICATION;
3509 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3510 		    control,
3511 		    &stcb->sctp_socket->so_rcv, 1,
3512 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3513 	} else {
3514 		sctp_m_freem(m_notify);
3515 	}
3516 }
3517 
3518 
3519 void
3520 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3521     uint32_t error, void *data, int so_locked
3522 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3523     SCTP_UNUSED
3524 #endif
3525 )
3526 {
3527 	if ((stcb == NULL) ||
3528 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3529 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3530 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3531 		/* If the socket is gone we are out of here */
3532 		return;
3533 	}
3534 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3535 		return;
3536 	}
3537 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3538 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3539 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3540 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3541 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3542 			/* Don't report these in front states */
3543 			return;
3544 		}
3545 	}
3546 	switch (notification) {
3547 	case SCTP_NOTIFY_ASSOC_UP:
3548 		if (stcb->asoc.assoc_up_sent == 0) {
3549 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3550 			stcb->asoc.assoc_up_sent = 1;
3551 		}
3552 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3553 			sctp_notify_adaptation_layer(stcb);
3554 		}
3555 		if (stcb->asoc.peer_supports_auth == 0) {
3556 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3557 			    NULL, so_locked);
3558 		}
3559 		break;
3560 	case SCTP_NOTIFY_ASSOC_DOWN:
3561 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3562 		break;
3563 	case SCTP_NOTIFY_INTERFACE_DOWN:
3564 		{
3565 			struct sctp_nets *net;
3566 
3567 			net = (struct sctp_nets *)data;
3568 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3569 			    (struct sockaddr *)&net->ro._l_addr, error);
3570 			break;
3571 		}
3572 	case SCTP_NOTIFY_INTERFACE_UP:
3573 		{
3574 			struct sctp_nets *net;
3575 
3576 			net = (struct sctp_nets *)data;
3577 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3578 			    (struct sockaddr *)&net->ro._l_addr, error);
3579 			break;
3580 		}
3581 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3582 		{
3583 			struct sctp_nets *net;
3584 
3585 			net = (struct sctp_nets *)data;
3586 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3587 			    (struct sockaddr *)&net->ro._l_addr, error);
3588 			break;
3589 		}
3590 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3591 		sctp_notify_send_failed2(stcb, error,
3592 		    (struct sctp_stream_queue_pending *)data, so_locked);
3593 		break;
3594 	case SCTP_NOTIFY_SENT_DG_FAIL:
3595 		sctp_notify_send_failed(stcb, 1, error,
3596 		    (struct sctp_tmit_chunk *)data, so_locked);
3597 		break;
3598 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3599 		sctp_notify_send_failed(stcb, 0, error,
3600 		    (struct sctp_tmit_chunk *)data, so_locked);
3601 		break;
3602 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3603 		{
3604 			uint32_t val;
3605 
3606 			val = *((uint32_t *) data);
3607 
3608 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3609 			break;
3610 		}
3611 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3612 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3613 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3614 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3615 		} else {
3616 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3617 		}
3618 		break;
3619 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3620 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3621 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3622 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3623 		} else {
3624 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3625 		}
3626 		break;
3627 	case SCTP_NOTIFY_ASSOC_RESTART:
3628 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3629 		if (stcb->asoc.peer_supports_auth == 0) {
3630 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3631 			    NULL, so_locked);
3632 		}
3633 		break;
3634 	case SCTP_NOTIFY_STR_RESET_SEND:
3635 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3636 		break;
3637 	case SCTP_NOTIFY_STR_RESET_RECV:
3638 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3639 		break;
3640 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3641 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3642 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3643 		break;
3644 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3645 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3646 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3647 		break;
3648 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3649 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3650 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3651 		break;
3652 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3653 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3654 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3655 		break;
3656 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3657 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3658 		    error);
3659 		break;
3660 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3661 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3662 		    error);
3663 		break;
3664 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3665 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3666 		    error);
3667 		break;
3668 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3669 		sctp_notify_shutdown_event(stcb);
3670 		break;
3671 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3672 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3673 		    (uint16_t) (uintptr_t) data,
3674 		    so_locked);
3675 		break;
3676 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3677 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3678 		    (uint16_t) (uintptr_t) data,
3679 		    so_locked);
3680 		break;
3681 	case SCTP_NOTIFY_NO_PEER_AUTH:
3682 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3683 		    (uint16_t) (uintptr_t) data,
3684 		    so_locked);
3685 		break;
3686 	case SCTP_NOTIFY_SENDER_DRY:
3687 		sctp_notify_sender_dry_event(stcb, so_locked);
3688 		break;
3689 	case SCTP_NOTIFY_REMOTE_ERROR:
3690 		sctp_notify_remote_error(stcb, error, data);
3691 		break;
3692 	default:
3693 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3694 		    __FUNCTION__, notification, notification);
3695 		break;
3696 	}			/* end switch */
3697 }
3698 
3699 void
3700 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3701 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3702     SCTP_UNUSED
3703 #endif
3704 )
3705 {
3706 	struct sctp_association *asoc;
3707 	struct sctp_stream_out *outs;
3708 	struct sctp_tmit_chunk *chk, *nchk;
3709 	struct sctp_stream_queue_pending *sp, *nsp;
3710 	int i;
3711 
3712 	if (stcb == NULL) {
3713 		return;
3714 	}
3715 	asoc = &stcb->asoc;
3716 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3717 		/* already being freed */
3718 		return;
3719 	}
3720 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3721 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3722 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3723 		return;
3724 	}
3725 	/* now through all the gunk freeing chunks */
3726 	if (holds_lock == 0) {
3727 		SCTP_TCB_SEND_LOCK(stcb);
3728 	}
3729 	/* sent queue SHOULD be empty */
3730 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3731 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3732 		asoc->sent_queue_cnt--;
3733 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3734 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3735 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3736 #ifdef INVARIANTS
3737 			} else {
3738 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3739 #endif
3740 			}
3741 		}
3742 		if (chk->data != NULL) {
3743 			sctp_free_bufspace(stcb, asoc, chk, 1);
3744 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3745 			    error, chk, so_locked);
3746 			if (chk->data) {
3747 				sctp_m_freem(chk->data);
3748 				chk->data = NULL;
3749 			}
3750 		}
3751 		sctp_free_a_chunk(stcb, chk, so_locked);
3752 		/* sa_ignore FREED_MEMORY */
3753 	}
3754 	/* pending send queue SHOULD be empty */
3755 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3756 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3757 		asoc->send_queue_cnt--;
3758 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3759 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3760 #ifdef INVARIANTS
3761 		} else {
3762 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3763 #endif
3764 		}
3765 		if (chk->data != NULL) {
3766 			sctp_free_bufspace(stcb, asoc, chk, 1);
3767 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3768 			    error, chk, so_locked);
3769 			if (chk->data) {
3770 				sctp_m_freem(chk->data);
3771 				chk->data = NULL;
3772 			}
3773 		}
3774 		sctp_free_a_chunk(stcb, chk, so_locked);
3775 		/* sa_ignore FREED_MEMORY */
3776 	}
3777 	for (i = 0; i < asoc->streamoutcnt; i++) {
3778 		/* For each stream */
3779 		outs = &asoc->strmout[i];
3780 		/* clean up any sends there */
3781 		asoc->locked_on_sending = NULL;
3782 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3783 			asoc->stream_queue_cnt--;
3784 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3785 			sctp_free_spbufspace(stcb, asoc, sp);
3786 			if (sp->data) {
3787 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3788 				    error, (void *)sp, so_locked);
3789 				if (sp->data) {
3790 					sctp_m_freem(sp->data);
3791 					sp->data = NULL;
3792 					sp->tail_mbuf = NULL;
3793 					sp->length = 0;
3794 				}
3795 			}
3796 			if (sp->net) {
3797 				sctp_free_remote_addr(sp->net);
3798 				sp->net = NULL;
3799 			}
3800 			/* Free the chunk */
3801 			sctp_free_a_strmoq(stcb, sp, so_locked);
3802 			/* sa_ignore FREED_MEMORY */
3803 		}
3804 	}
3805 
3806 	if (holds_lock == 0) {
3807 		SCTP_TCB_SEND_UNLOCK(stcb);
3808 	}
3809 }
3810 
3811 void
3812 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3813     struct sctp_abort_chunk *abort, int so_locked
3814 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3815     SCTP_UNUSED
3816 #endif
3817 )
3818 {
3819 	if (stcb == NULL) {
3820 		return;
3821 	}
3822 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3823 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3824 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3825 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3826 	}
3827 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3828 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3829 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3830 		return;
3831 	}
3832 	/* Tell them we lost the asoc */
3833 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3834 	if (from_peer) {
3835 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3836 	} else {
3837 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3838 	}
3839 }
3840 
3841 void
3842 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3843     struct mbuf *m, int iphlen,
3844     struct sockaddr *src, struct sockaddr *dst,
3845     struct sctphdr *sh, struct mbuf *op_err,
3846     uint8_t use_mflowid, uint32_t mflowid,
3847     uint32_t vrf_id, uint16_t port)
3848 {
3849 	uint32_t vtag;
3850 
3851 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3852 	struct socket *so;
3853 
3854 #endif
3855 
3856 	vtag = 0;
3857 	if (stcb != NULL) {
3858 		/* We have a TCB to abort, send notification too */
3859 		vtag = stcb->asoc.peer_vtag;
3860 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3861 		/* get the assoc vrf id and table id */
3862 		vrf_id = stcb->asoc.vrf_id;
3863 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3864 	}
3865 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3866 	    use_mflowid, mflowid,
3867 	    vrf_id, port);
3868 	if (stcb != NULL) {
3869 		/* Ok, now lets free it */
3870 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3871 		so = SCTP_INP_SO(inp);
3872 		atomic_add_int(&stcb->asoc.refcnt, 1);
3873 		SCTP_TCB_UNLOCK(stcb);
3874 		SCTP_SOCKET_LOCK(so, 1);
3875 		SCTP_TCB_LOCK(stcb);
3876 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3877 #endif
3878 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3879 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3880 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3881 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3882 		}
3883 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3885 		SCTP_SOCKET_UNLOCK(so, 1);
3886 #endif
3887 	}
3888 }
3889 
3890 #ifdef SCTP_ASOCLOG_OF_TSNS
3891 void
3892 sctp_print_out_track_log(struct sctp_tcb *stcb)
3893 {
3894 #ifdef NOSIY_PRINTS
3895 	int i;
3896 
3897 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3898 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3899 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3900 		SCTP_PRINTF("None rcvd\n");
3901 		goto none_in;
3902 	}
3903 	if (stcb->asoc.tsn_in_wrapped) {
3904 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3905 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3906 			    stcb->asoc.in_tsnlog[i].tsn,
3907 			    stcb->asoc.in_tsnlog[i].strm,
3908 			    stcb->asoc.in_tsnlog[i].seq,
3909 			    stcb->asoc.in_tsnlog[i].flgs,
3910 			    stcb->asoc.in_tsnlog[i].sz);
3911 		}
3912 	}
3913 	if (stcb->asoc.tsn_in_at) {
3914 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3915 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3916 			    stcb->asoc.in_tsnlog[i].tsn,
3917 			    stcb->asoc.in_tsnlog[i].strm,
3918 			    stcb->asoc.in_tsnlog[i].seq,
3919 			    stcb->asoc.in_tsnlog[i].flgs,
3920 			    stcb->asoc.in_tsnlog[i].sz);
3921 		}
3922 	}
3923 none_in:
3924 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3925 	if ((stcb->asoc.tsn_out_at == 0) &&
3926 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3927 		SCTP_PRINTF("None sent\n");
3928 	}
3929 	if (stcb->asoc.tsn_out_wrapped) {
3930 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3931 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3932 			    stcb->asoc.out_tsnlog[i].tsn,
3933 			    stcb->asoc.out_tsnlog[i].strm,
3934 			    stcb->asoc.out_tsnlog[i].seq,
3935 			    stcb->asoc.out_tsnlog[i].flgs,
3936 			    stcb->asoc.out_tsnlog[i].sz);
3937 		}
3938 	}
3939 	if (stcb->asoc.tsn_out_at) {
3940 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3941 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3942 			    stcb->asoc.out_tsnlog[i].tsn,
3943 			    stcb->asoc.out_tsnlog[i].strm,
3944 			    stcb->asoc.out_tsnlog[i].seq,
3945 			    stcb->asoc.out_tsnlog[i].flgs,
3946 			    stcb->asoc.out_tsnlog[i].sz);
3947 		}
3948 	}
3949 #endif
3950 }
3951 
3952 #endif
3953 
3954 void
3955 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3956     struct mbuf *op_err,
3957     int so_locked
3958 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3959     SCTP_UNUSED
3960 #endif
3961 )
3962 {
3963 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3964 	struct socket *so;
3965 
3966 #endif
3967 
3968 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3969 	so = SCTP_INP_SO(inp);
3970 #endif
3971 	if (stcb == NULL) {
3972 		/* Got to have a TCB */
3973 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3974 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
3975 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3976 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3977 			}
3978 		}
3979 		return;
3980 	} else {
3981 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3982 	}
3983 	/* notify the ulp */
3984 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3985 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3986 	}
3987 	/* notify the peer */
3988 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3989 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3990 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3991 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3992 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3993 	}
3994 	/* now free the asoc */
3995 #ifdef SCTP_ASOCLOG_OF_TSNS
3996 	sctp_print_out_track_log(stcb);
3997 #endif
3998 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3999 	if (!so_locked) {
4000 		atomic_add_int(&stcb->asoc.refcnt, 1);
4001 		SCTP_TCB_UNLOCK(stcb);
4002 		SCTP_SOCKET_LOCK(so, 1);
4003 		SCTP_TCB_LOCK(stcb);
4004 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4005 	}
4006 #endif
4007 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4008 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4009 	if (!so_locked) {
4010 		SCTP_SOCKET_UNLOCK(so, 1);
4011 	}
4012 #endif
4013 }
4014 
4015 void
4016 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4017     struct sockaddr *src, struct sockaddr *dst,
4018     struct sctphdr *sh, struct sctp_inpcb *inp,
4019     struct mbuf *cause,
4020     uint8_t use_mflowid, uint32_t mflowid,
4021     uint32_t vrf_id, uint16_t port)
4022 {
4023 	struct sctp_chunkhdr *ch, chunk_buf;
4024 	unsigned int chk_length;
4025 	int contains_init_chunk;
4026 
4027 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4028 	/* Generate a TO address for future reference */
4029 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4030 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4031 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4032 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4033 		}
4034 	}
4035 	contains_init_chunk = 0;
4036 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4037 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4038 	while (ch != NULL) {
4039 		chk_length = ntohs(ch->chunk_length);
4040 		if (chk_length < sizeof(*ch)) {
4041 			/* break to abort land */
4042 			break;
4043 		}
4044 		switch (ch->chunk_type) {
4045 		case SCTP_INIT:
4046 			contains_init_chunk = 1;
4047 			break;
4048 		case SCTP_PACKET_DROPPED:
4049 			/* we don't respond to pkt-dropped */
4050 			return;
4051 		case SCTP_ABORT_ASSOCIATION:
4052 			/* we don't respond with an ABORT to an ABORT */
4053 			return;
4054 		case SCTP_SHUTDOWN_COMPLETE:
4055 			/*
4056 			 * we ignore it since we are not waiting for it and
4057 			 * peer is gone
4058 			 */
4059 			return;
4060 		case SCTP_SHUTDOWN_ACK:
4061 			sctp_send_shutdown_complete2(src, dst, sh,
4062 			    use_mflowid, mflowid,
4063 			    vrf_id, port);
4064 			return;
4065 		default:
4066 			break;
4067 		}
4068 		offset += SCTP_SIZE32(chk_length);
4069 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4070 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4071 	}
4072 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4073 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4074 	    (contains_init_chunk == 0))) {
4075 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4076 		    use_mflowid, mflowid,
4077 		    vrf_id, port);
4078 	}
4079 }
4080 
4081 /*
4082  * check the inbound datagram to make sure there is not an abort inside it,
4083  * if there is return 1, else return 0.
4084  */
4085 int
4086 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4087 {
4088 	struct sctp_chunkhdr *ch;
4089 	struct sctp_init_chunk *init_chk, chunk_buf;
4090 	int offset;
4091 	unsigned int chk_length;
4092 
4093 	offset = iphlen + sizeof(struct sctphdr);
4094 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4095 	    (uint8_t *) & chunk_buf);
4096 	while (ch != NULL) {
4097 		chk_length = ntohs(ch->chunk_length);
4098 		if (chk_length < sizeof(*ch)) {
4099 			/* packet is probably corrupt */
4100 			break;
4101 		}
4102 		/* we seem to be ok, is it an abort? */
4103 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4104 			/* yep, tell them */
4105 			return (1);
4106 		}
4107 		if (ch->chunk_type == SCTP_INITIATION) {
4108 			/* need to update the Vtag */
4109 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4110 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4111 			if (init_chk != NULL) {
4112 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4113 			}
4114 		}
4115 		/* Nope, move to the next chunk */
4116 		offset += SCTP_SIZE32(chk_length);
4117 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4118 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4119 	}
4120 	return (0);
4121 }
4122 
4123 /*
4124  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4125  * set (i.e. it's 0) so, create this function to compare link local scopes
4126  */
4127 #ifdef INET6
4128 uint32_t
4129 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4130 {
4131 	struct sockaddr_in6 a, b;
4132 
4133 	/* save copies */
4134 	a = *addr1;
4135 	b = *addr2;
4136 
4137 	if (a.sin6_scope_id == 0)
4138 		if (sa6_recoverscope(&a)) {
4139 			/* can't get scope, so can't match */
4140 			return (0);
4141 		}
4142 	if (b.sin6_scope_id == 0)
4143 		if (sa6_recoverscope(&b)) {
4144 			/* can't get scope, so can't match */
4145 			return (0);
4146 		}
4147 	if (a.sin6_scope_id != b.sin6_scope_id)
4148 		return (0);
4149 
4150 	return (1);
4151 }
4152 
4153 /*
4154  * returns a sockaddr_in6 with embedded scope recovered and removed
4155  */
4156 struct sockaddr_in6 *
4157 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4158 {
4159 	/* check and strip embedded scope junk */
4160 	if (addr->sin6_family == AF_INET6) {
4161 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4162 			if (addr->sin6_scope_id == 0) {
4163 				*store = *addr;
4164 				if (!sa6_recoverscope(store)) {
4165 					/* use the recovered scope */
4166 					addr = store;
4167 				}
4168 			} else {
4169 				/* else, return the original "to" addr */
4170 				in6_clearscope(&addr->sin6_addr);
4171 			}
4172 		}
4173 	}
4174 	return (addr);
4175 }
4176 
4177 #endif
4178 
4179 /*
4180  * are the two addresses the same?  currently a "scopeless" check returns: 1
4181  * if same, 0 if not
4182  */
4183 int
4184 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4185 {
4186 
4187 	/* must be valid */
4188 	if (sa1 == NULL || sa2 == NULL)
4189 		return (0);
4190 
4191 	/* must be the same family */
4192 	if (sa1->sa_family != sa2->sa_family)
4193 		return (0);
4194 
4195 	switch (sa1->sa_family) {
4196 #ifdef INET6
4197 	case AF_INET6:
4198 		{
4199 			/* IPv6 addresses */
4200 			struct sockaddr_in6 *sin6_1, *sin6_2;
4201 
4202 			sin6_1 = (struct sockaddr_in6 *)sa1;
4203 			sin6_2 = (struct sockaddr_in6 *)sa2;
4204 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4205 			    sin6_2));
4206 		}
4207 #endif
4208 #ifdef INET
4209 	case AF_INET:
4210 		{
4211 			/* IPv4 addresses */
4212 			struct sockaddr_in *sin_1, *sin_2;
4213 
4214 			sin_1 = (struct sockaddr_in *)sa1;
4215 			sin_2 = (struct sockaddr_in *)sa2;
4216 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4217 		}
4218 #endif
4219 	default:
4220 		/* we don't do these... */
4221 		return (0);
4222 	}
4223 }
4224 
4225 void
4226 sctp_print_address(struct sockaddr *sa)
4227 {
4228 #ifdef INET6
4229 	char ip6buf[INET6_ADDRSTRLEN];
4230 
4231 #endif
4232 
4233 	switch (sa->sa_family) {
4234 #ifdef INET6
4235 	case AF_INET6:
4236 		{
4237 			struct sockaddr_in6 *sin6;
4238 
4239 			sin6 = (struct sockaddr_in6 *)sa;
4240 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4241 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4242 			    ntohs(sin6->sin6_port),
4243 			    sin6->sin6_scope_id);
4244 			break;
4245 		}
4246 #endif
4247 #ifdef INET
4248 	case AF_INET:
4249 		{
4250 			struct sockaddr_in *sin;
4251 			unsigned char *p;
4252 
4253 			sin = (struct sockaddr_in *)sa;
4254 			p = (unsigned char *)&sin->sin_addr;
4255 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4256 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4257 			break;
4258 		}
4259 #endif
4260 	default:
4261 		SCTP_PRINTF("?\n");
4262 		break;
4263 	}
4264 }
4265 
4266 void
4267 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4268     struct sctp_inpcb *new_inp,
4269     struct sctp_tcb *stcb,
4270     int waitflags)
4271 {
4272 	/*
4273 	 * go through our old INP and pull off any control structures that
4274 	 * belong to stcb and move then to the new inp.
4275 	 */
4276 	struct socket *old_so, *new_so;
4277 	struct sctp_queued_to_read *control, *nctl;
4278 	struct sctp_readhead tmp_queue;
4279 	struct mbuf *m;
4280 	int error = 0;
4281 
4282 	old_so = old_inp->sctp_socket;
4283 	new_so = new_inp->sctp_socket;
4284 	TAILQ_INIT(&tmp_queue);
4285 	error = sblock(&old_so->so_rcv, waitflags);
4286 	if (error) {
4287 		/*
4288 		 * Gak, can't get sblock, we have a problem. data will be
4289 		 * left stranded.. and we don't dare look at it since the
4290 		 * other thread may be reading something. Oh well, its a
4291 		 * screwed up app that does a peeloff OR a accept while
4292 		 * reading from the main socket... actually its only the
4293 		 * peeloff() case, since I think read will fail on a
4294 		 * listening socket..
4295 		 */
4296 		return;
4297 	}
4298 	/* lock the socket buffers */
4299 	SCTP_INP_READ_LOCK(old_inp);
4300 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4301 		/* Pull off all for out target stcb */
4302 		if (control->stcb == stcb) {
4303 			/* remove it we want it */
4304 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4305 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4306 			m = control->data;
4307 			while (m) {
4308 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4309 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4310 				}
4311 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4312 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4313 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4314 				}
4315 				m = SCTP_BUF_NEXT(m);
4316 			}
4317 		}
4318 	}
4319 	SCTP_INP_READ_UNLOCK(old_inp);
4320 	/* Remove the sb-lock on the old socket */
4321 
4322 	sbunlock(&old_so->so_rcv);
4323 	/* Now we move them over to the new socket buffer */
4324 	SCTP_INP_READ_LOCK(new_inp);
4325 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4326 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4327 		m = control->data;
4328 		while (m) {
4329 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4330 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4331 			}
4332 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4333 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4334 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4335 			}
4336 			m = SCTP_BUF_NEXT(m);
4337 		}
4338 	}
4339 	SCTP_INP_READ_UNLOCK(new_inp);
4340 }
4341 
4342 void
4343 sctp_add_to_readq(struct sctp_inpcb *inp,
4344     struct sctp_tcb *stcb,
4345     struct sctp_queued_to_read *control,
4346     struct sockbuf *sb,
4347     int end,
4348     int inp_read_lock_held,
4349     int so_locked
4350 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4351     SCTP_UNUSED
4352 #endif
4353 )
4354 {
4355 	/*
4356 	 * Here we must place the control on the end of the socket read
4357 	 * queue AND increment sb_cc so that select will work properly on
4358 	 * read.
4359 	 */
4360 	struct mbuf *m, *prev = NULL;
4361 
4362 	if (inp == NULL) {
4363 		/* Gak, TSNH!! */
4364 #ifdef INVARIANTS
4365 		panic("Gak, inp NULL on add_to_readq");
4366 #endif
4367 		return;
4368 	}
4369 	if (inp_read_lock_held == 0)
4370 		SCTP_INP_READ_LOCK(inp);
4371 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4372 		sctp_free_remote_addr(control->whoFrom);
4373 		if (control->data) {
4374 			sctp_m_freem(control->data);
4375 			control->data = NULL;
4376 		}
4377 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4378 		if (inp_read_lock_held == 0)
4379 			SCTP_INP_READ_UNLOCK(inp);
4380 		return;
4381 	}
4382 	if (!(control->spec_flags & M_NOTIFICATION)) {
4383 		atomic_add_int(&inp->total_recvs, 1);
4384 		if (!control->do_not_ref_stcb) {
4385 			atomic_add_int(&stcb->total_recvs, 1);
4386 		}
4387 	}
4388 	m = control->data;
4389 	control->held_length = 0;
4390 	control->length = 0;
4391 	while (m) {
4392 		if (SCTP_BUF_LEN(m) == 0) {
4393 			/* Skip mbufs with NO length */
4394 			if (prev == NULL) {
4395 				/* First one */
4396 				control->data = sctp_m_free(m);
4397 				m = control->data;
4398 			} else {
4399 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4400 				m = SCTP_BUF_NEXT(prev);
4401 			}
4402 			if (m == NULL) {
4403 				control->tail_mbuf = prev;
4404 			}
4405 			continue;
4406 		}
4407 		prev = m;
4408 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4409 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4410 		}
4411 		sctp_sballoc(stcb, sb, m);
4412 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4413 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4414 		}
4415 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4416 		m = SCTP_BUF_NEXT(m);
4417 	}
4418 	if (prev != NULL) {
4419 		control->tail_mbuf = prev;
4420 	} else {
4421 		/* Everything got collapsed out?? */
4422 		sctp_free_remote_addr(control->whoFrom);
4423 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4424 		if (inp_read_lock_held == 0)
4425 			SCTP_INP_READ_UNLOCK(inp);
4426 		return;
4427 	}
4428 	if (end) {
4429 		control->end_added = 1;
4430 	}
4431 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4432 	if (inp_read_lock_held == 0)
4433 		SCTP_INP_READ_UNLOCK(inp);
4434 	if (inp && inp->sctp_socket) {
4435 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4436 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4437 		} else {
4438 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4439 			struct socket *so;
4440 
4441 			so = SCTP_INP_SO(inp);
4442 			if (!so_locked) {
4443 				if (stcb) {
4444 					atomic_add_int(&stcb->asoc.refcnt, 1);
4445 					SCTP_TCB_UNLOCK(stcb);
4446 				}
4447 				SCTP_SOCKET_LOCK(so, 1);
4448 				if (stcb) {
4449 					SCTP_TCB_LOCK(stcb);
4450 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4451 				}
4452 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4453 					SCTP_SOCKET_UNLOCK(so, 1);
4454 					return;
4455 				}
4456 			}
4457 #endif
4458 			sctp_sorwakeup(inp, inp->sctp_socket);
4459 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4460 			if (!so_locked) {
4461 				SCTP_SOCKET_UNLOCK(so, 1);
4462 			}
4463 #endif
4464 		}
4465 	}
4466 }
4467 
4468 
4469 int
4470 sctp_append_to_readq(struct sctp_inpcb *inp,
4471     struct sctp_tcb *stcb,
4472     struct sctp_queued_to_read *control,
4473     struct mbuf *m,
4474     int end,
4475     int ctls_cumack,
4476     struct sockbuf *sb)
4477 {
4478 	/*
4479 	 * A partial delivery API event is underway. OR we are appending on
4480 	 * the reassembly queue.
4481 	 *
4482 	 * If PDAPI this means we need to add m to the end of the data.
4483 	 * Increase the length in the control AND increment the sb_cc.
4484 	 * Otherwise sb is NULL and all we need to do is put it at the end
4485 	 * of the mbuf chain.
4486 	 */
4487 	int len = 0;
4488 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4489 
4490 	if (inp) {
4491 		SCTP_INP_READ_LOCK(inp);
4492 	}
4493 	if (control == NULL) {
4494 get_out:
4495 		if (inp) {
4496 			SCTP_INP_READ_UNLOCK(inp);
4497 		}
4498 		return (-1);
4499 	}
4500 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4501 		SCTP_INP_READ_UNLOCK(inp);
4502 		return (0);
4503 	}
4504 	if (control->end_added) {
4505 		/* huh this one is complete? */
4506 		goto get_out;
4507 	}
4508 	mm = m;
4509 	if (mm == NULL) {
4510 		goto get_out;
4511 	}
4512 	while (mm) {
4513 		if (SCTP_BUF_LEN(mm) == 0) {
4514 			/* Skip mbufs with NO lenght */
4515 			if (prev == NULL) {
4516 				/* First one */
4517 				m = sctp_m_free(mm);
4518 				mm = m;
4519 			} else {
4520 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4521 				mm = SCTP_BUF_NEXT(prev);
4522 			}
4523 			continue;
4524 		}
4525 		prev = mm;
4526 		len += SCTP_BUF_LEN(mm);
4527 		if (sb) {
4528 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4529 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4530 			}
4531 			sctp_sballoc(stcb, sb, mm);
4532 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4533 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4534 			}
4535 		}
4536 		mm = SCTP_BUF_NEXT(mm);
4537 	}
4538 	if (prev) {
4539 		tail = prev;
4540 	} else {
4541 		/* Really there should always be a prev */
4542 		if (m == NULL) {
4543 			/* Huh nothing left? */
4544 #ifdef INVARIANTS
4545 			panic("Nothing left to add?");
4546 #else
4547 			goto get_out;
4548 #endif
4549 		}
4550 		tail = m;
4551 	}
4552 	if (control->tail_mbuf) {
4553 		/* append */
4554 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4555 		control->tail_mbuf = tail;
4556 	} else {
4557 		/* nothing there */
4558 #ifdef INVARIANTS
4559 		if (control->data != NULL) {
4560 			panic("This should NOT happen");
4561 		}
4562 #endif
4563 		control->data = m;
4564 		control->tail_mbuf = tail;
4565 	}
4566 	atomic_add_int(&control->length, len);
4567 	if (end) {
4568 		/* message is complete */
4569 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4570 			stcb->asoc.control_pdapi = NULL;
4571 		}
4572 		control->held_length = 0;
4573 		control->end_added = 1;
4574 	}
4575 	if (stcb == NULL) {
4576 		control->do_not_ref_stcb = 1;
4577 	}
4578 	/*
4579 	 * When we are appending in partial delivery, the cum-ack is used
4580 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4581 	 * is populated in the outbound sinfo structure from the true cumack
4582 	 * if the association exists...
4583 	 */
4584 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4585 	if (inp) {
4586 		SCTP_INP_READ_UNLOCK(inp);
4587 	}
4588 	if (inp && inp->sctp_socket) {
4589 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4590 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4591 		} else {
4592 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4593 			struct socket *so;
4594 
4595 			so = SCTP_INP_SO(inp);
4596 			if (stcb) {
4597 				atomic_add_int(&stcb->asoc.refcnt, 1);
4598 				SCTP_TCB_UNLOCK(stcb);
4599 			}
4600 			SCTP_SOCKET_LOCK(so, 1);
4601 			if (stcb) {
4602 				SCTP_TCB_LOCK(stcb);
4603 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4604 			}
4605 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4606 				SCTP_SOCKET_UNLOCK(so, 1);
4607 				return (0);
4608 			}
4609 #endif
4610 			sctp_sorwakeup(inp, inp->sctp_socket);
4611 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4612 			SCTP_SOCKET_UNLOCK(so, 1);
4613 #endif
4614 		}
4615 	}
4616 	return (0);
4617 }
4618 
4619 
4620 
4621 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4622  *************ALTERNATE ROUTING CODE
4623  */
4624 
4625 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4626  *************ALTERNATE ROUTING CODE
4627  */
4628 
4629 struct mbuf *
4630 sctp_generate_cause(uint16_t code, char *info)
4631 {
4632 	struct mbuf *m;
4633 	struct sctp_gen_error_cause *cause;
4634 	size_t info_len, len;
4635 
4636 	if ((code == 0) || (info == NULL)) {
4637 		return (NULL);
4638 	}
4639 	info_len = strlen(info);
4640 	len = sizeof(struct sctp_paramhdr) + info_len;
4641 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4642 	if (m != NULL) {
4643 		SCTP_BUF_LEN(m) = len;
4644 		cause = mtod(m, struct sctp_gen_error_cause *);
4645 		cause->code = htons(code);
4646 		cause->length = htons((uint16_t) len);
4647 		memcpy(cause->info, info, info_len);
4648 	}
4649 	return (m);
4650 }
4651 
4652 struct mbuf *
4653 sctp_generate_no_user_data_cause(uint32_t tsn)
4654 {
4655 	struct mbuf *m;
4656 	struct sctp_error_no_user_data *no_user_data_cause;
4657 	size_t len;
4658 
4659 	len = sizeof(struct sctp_error_no_user_data);
4660 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4661 	if (m != NULL) {
4662 		SCTP_BUF_LEN(m) = len;
4663 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4664 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4665 		no_user_data_cause->cause.length = htons((uint16_t) len);
4666 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4667 	}
4668 	return (m);
4669 }
4670 
4671 #ifdef SCTP_MBCNT_LOGGING
4672 void
4673 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4674     struct sctp_tmit_chunk *tp1, int chk_cnt)
4675 {
4676 	if (tp1->data == NULL) {
4677 		return;
4678 	}
4679 	asoc->chunks_on_out_queue -= chk_cnt;
4680 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4681 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4682 		    asoc->total_output_queue_size,
4683 		    tp1->book_size,
4684 		    0,
4685 		    tp1->mbcnt);
4686 	}
4687 	if (asoc->total_output_queue_size >= tp1->book_size) {
4688 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4689 	} else {
4690 		asoc->total_output_queue_size = 0;
4691 	}
4692 
4693 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4694 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4695 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4696 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4697 		} else {
4698 			stcb->sctp_socket->so_snd.sb_cc = 0;
4699 
4700 		}
4701 	}
4702 }
4703 
4704 #endif
4705 
4706 int
4707 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4708     uint8_t sent, int so_locked
4709 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4710     SCTP_UNUSED
4711 #endif
4712 )
4713 {
4714 	struct sctp_stream_out *strq;
4715 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4716 	struct sctp_stream_queue_pending *sp;
4717 	uint16_t stream = 0, seq = 0;
4718 	uint8_t foundeom = 0;
4719 	int ret_sz = 0;
4720 	int notdone;
4721 	int do_wakeup_routine = 0;
4722 
4723 	stream = tp1->rec.data.stream_number;
4724 	seq = tp1->rec.data.stream_seq;
4725 	do {
4726 		ret_sz += tp1->book_size;
4727 		if (tp1->data != NULL) {
4728 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4729 				sctp_flight_size_decrease(tp1);
4730 				sctp_total_flight_decrease(stcb, tp1);
4731 			}
4732 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4733 			stcb->asoc.peers_rwnd += tp1->send_size;
4734 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4735 			if (sent) {
4736 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4737 			} else {
4738 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4739 			}
4740 			if (tp1->data) {
4741 				sctp_m_freem(tp1->data);
4742 				tp1->data = NULL;
4743 			}
4744 			do_wakeup_routine = 1;
4745 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4746 				stcb->asoc.sent_queue_cnt_removeable--;
4747 			}
4748 		}
4749 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4750 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4751 		    SCTP_DATA_NOT_FRAG) {
4752 			/* not frag'ed we ae done   */
4753 			notdone = 0;
4754 			foundeom = 1;
4755 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4756 			/* end of frag, we are done */
4757 			notdone = 0;
4758 			foundeom = 1;
4759 		} else {
4760 			/*
4761 			 * Its a begin or middle piece, we must mark all of
4762 			 * it
4763 			 */
4764 			notdone = 1;
4765 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4766 		}
4767 	} while (tp1 && notdone);
4768 	if (foundeom == 0) {
4769 		/*
4770 		 * The multi-part message was scattered across the send and
4771 		 * sent queue.
4772 		 */
4773 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4774 			if ((tp1->rec.data.stream_number != stream) ||
4775 			    (tp1->rec.data.stream_seq != seq)) {
4776 				break;
4777 			}
4778 			/*
4779 			 * save to chk in case we have some on stream out
4780 			 * queue. If so and we have an un-transmitted one we
4781 			 * don't have to fudge the TSN.
4782 			 */
4783 			chk = tp1;
4784 			ret_sz += tp1->book_size;
4785 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4786 			if (sent) {
4787 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4788 			} else {
4789 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4790 			}
4791 			if (tp1->data) {
4792 				sctp_m_freem(tp1->data);
4793 				tp1->data = NULL;
4794 			}
4795 			/* No flight involved here book the size to 0 */
4796 			tp1->book_size = 0;
4797 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4798 				foundeom = 1;
4799 			}
4800 			do_wakeup_routine = 1;
4801 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4802 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4803 			/*
4804 			 * on to the sent queue so we can wait for it to be
4805 			 * passed by.
4806 			 */
4807 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4808 			    sctp_next);
4809 			stcb->asoc.send_queue_cnt--;
4810 			stcb->asoc.sent_queue_cnt++;
4811 		}
4812 	}
4813 	if (foundeom == 0) {
4814 		/*
4815 		 * Still no eom found. That means there is stuff left on the
4816 		 * stream out queue.. yuck.
4817 		 */
4818 		SCTP_TCB_SEND_LOCK(stcb);
4819 		strq = &stcb->asoc.strmout[stream];
4820 		sp = TAILQ_FIRST(&strq->outqueue);
4821 		if (sp != NULL) {
4822 			sp->discard_rest = 1;
4823 			/*
4824 			 * We may need to put a chunk on the queue that
4825 			 * holds the TSN that would have been sent with the
4826 			 * LAST bit.
4827 			 */
4828 			if (chk == NULL) {
4829 				/* Yep, we have to */
4830 				sctp_alloc_a_chunk(stcb, chk);
4831 				if (chk == NULL) {
4832 					/*
4833 					 * we are hosed. All we can do is
4834 					 * nothing.. which will cause an
4835 					 * abort if the peer is paying
4836 					 * attention.
4837 					 */
4838 					goto oh_well;
4839 				}
4840 				memset(chk, 0, sizeof(*chk));
4841 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4842 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4843 				chk->asoc = &stcb->asoc;
4844 				chk->rec.data.stream_seq = strq->next_sequence_send;
4845 				chk->rec.data.stream_number = sp->stream;
4846 				chk->rec.data.payloadtype = sp->ppid;
4847 				chk->rec.data.context = sp->context;
4848 				chk->flags = sp->act_flags;
4849 				if (sp->net)
4850 					chk->whoTo = sp->net;
4851 				else
4852 					chk->whoTo = stcb->asoc.primary_destination;
4853 				atomic_add_int(&chk->whoTo->ref_count, 1);
4854 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4855 				stcb->asoc.pr_sctp_cnt++;
4856 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4857 				stcb->asoc.sent_queue_cnt++;
4858 				stcb->asoc.pr_sctp_cnt++;
4859 			} else {
4860 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4861 			}
4862 			strq->next_sequence_send++;
4863 	oh_well:
4864 			if (sp->data) {
4865 				/*
4866 				 * Pull any data to free up the SB and allow
4867 				 * sender to "add more" while we will throw
4868 				 * away :-)
4869 				 */
4870 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4871 				ret_sz += sp->length;
4872 				do_wakeup_routine = 1;
4873 				sp->some_taken = 1;
4874 				sctp_m_freem(sp->data);
4875 				sp->data = NULL;
4876 				sp->tail_mbuf = NULL;
4877 				sp->length = 0;
4878 			}
4879 		}
4880 		SCTP_TCB_SEND_UNLOCK(stcb);
4881 	}
4882 	if (do_wakeup_routine) {
4883 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4884 		struct socket *so;
4885 
4886 		so = SCTP_INP_SO(stcb->sctp_ep);
4887 		if (!so_locked) {
4888 			atomic_add_int(&stcb->asoc.refcnt, 1);
4889 			SCTP_TCB_UNLOCK(stcb);
4890 			SCTP_SOCKET_LOCK(so, 1);
4891 			SCTP_TCB_LOCK(stcb);
4892 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4893 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4894 				/* assoc was freed while we were unlocked */
4895 				SCTP_SOCKET_UNLOCK(so, 1);
4896 				return (ret_sz);
4897 			}
4898 		}
4899 #endif
4900 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4901 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4902 		if (!so_locked) {
4903 			SCTP_SOCKET_UNLOCK(so, 1);
4904 		}
4905 #endif
4906 	}
4907 	return (ret_sz);
4908 }
4909 
4910 /*
4911  * checks to see if the given address, sa, is one that is currently known by
4912  * the kernel note: can't distinguish the same address on multiple interfaces
4913  * and doesn't handle multiple addresses with different zone/scope id's note:
4914  * ifa_ifwithaddr() compares the entire sockaddr struct
4915  */
4916 struct sctp_ifa *
4917 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4918     int holds_lock)
4919 {
4920 	struct sctp_laddr *laddr;
4921 
4922 	if (holds_lock == 0) {
4923 		SCTP_INP_RLOCK(inp);
4924 	}
4925 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4926 		if (laddr->ifa == NULL)
4927 			continue;
4928 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4929 			continue;
4930 #ifdef INET
4931 		if (addr->sa_family == AF_INET) {
4932 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4933 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4934 				/* found him. */
4935 				if (holds_lock == 0) {
4936 					SCTP_INP_RUNLOCK(inp);
4937 				}
4938 				return (laddr->ifa);
4939 				break;
4940 			}
4941 		}
4942 #endif
4943 #ifdef INET6
4944 		if (addr->sa_family == AF_INET6) {
4945 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4946 			    &laddr->ifa->address.sin6)) {
4947 				/* found him. */
4948 				if (holds_lock == 0) {
4949 					SCTP_INP_RUNLOCK(inp);
4950 				}
4951 				return (laddr->ifa);
4952 				break;
4953 			}
4954 		}
4955 #endif
4956 	}
4957 	if (holds_lock == 0) {
4958 		SCTP_INP_RUNLOCK(inp);
4959 	}
4960 	return (NULL);
4961 }
4962 
4963 uint32_t
4964 sctp_get_ifa_hash_val(struct sockaddr *addr)
4965 {
4966 	switch (addr->sa_family) {
4967 #ifdef INET
4968 	case AF_INET:
4969 		{
4970 			struct sockaddr_in *sin;
4971 
4972 			sin = (struct sockaddr_in *)addr;
4973 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4974 		}
4975 #endif
4976 #ifdef INET6
4977 	case AF_INET6:
4978 		{
4979 			struct sockaddr_in6 *sin6;
4980 			uint32_t hash_of_addr;
4981 
4982 			sin6 = (struct sockaddr_in6 *)addr;
4983 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4984 			    sin6->sin6_addr.s6_addr32[1] +
4985 			    sin6->sin6_addr.s6_addr32[2] +
4986 			    sin6->sin6_addr.s6_addr32[3]);
4987 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4988 			return (hash_of_addr);
4989 		}
4990 #endif
4991 	default:
4992 		break;
4993 	}
4994 	return (0);
4995 }
4996 
4997 struct sctp_ifa *
4998 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4999 {
5000 	struct sctp_ifa *sctp_ifap;
5001 	struct sctp_vrf *vrf;
5002 	struct sctp_ifalist *hash_head;
5003 	uint32_t hash_of_addr;
5004 
5005 	if (holds_lock == 0)
5006 		SCTP_IPI_ADDR_RLOCK();
5007 
5008 	vrf = sctp_find_vrf(vrf_id);
5009 	if (vrf == NULL) {
5010 stage_right:
5011 		if (holds_lock == 0)
5012 			SCTP_IPI_ADDR_RUNLOCK();
5013 		return (NULL);
5014 	}
5015 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5016 
5017 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5018 	if (hash_head == NULL) {
5019 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5020 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5021 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5022 		sctp_print_address(addr);
5023 		SCTP_PRINTF("No such bucket for address\n");
5024 		if (holds_lock == 0)
5025 			SCTP_IPI_ADDR_RUNLOCK();
5026 
5027 		return (NULL);
5028 	}
5029 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5030 		if (sctp_ifap == NULL) {
5031 #ifdef INVARIANTS
5032 			panic("Huh LIST_FOREACH corrupt");
5033 			goto stage_right;
5034 #else
5035 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5036 			goto stage_right;
5037 #endif
5038 		}
5039 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5040 			continue;
5041 #ifdef INET
5042 		if (addr->sa_family == AF_INET) {
5043 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5044 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5045 				/* found him. */
5046 				if (holds_lock == 0)
5047 					SCTP_IPI_ADDR_RUNLOCK();
5048 				return (sctp_ifap);
5049 				break;
5050 			}
5051 		}
5052 #endif
5053 #ifdef INET6
5054 		if (addr->sa_family == AF_INET6) {
5055 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5056 			    &sctp_ifap->address.sin6)) {
5057 				/* found him. */
5058 				if (holds_lock == 0)
5059 					SCTP_IPI_ADDR_RUNLOCK();
5060 				return (sctp_ifap);
5061 				break;
5062 			}
5063 		}
5064 #endif
5065 	}
5066 	if (holds_lock == 0)
5067 		SCTP_IPI_ADDR_RUNLOCK();
5068 	return (NULL);
5069 }
5070 
5071 static void
5072 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5073     uint32_t rwnd_req)
5074 {
5075 	/* User pulled some data, do we need a rwnd update? */
5076 	int r_unlocked = 0;
5077 	uint32_t dif, rwnd;
5078 	struct socket *so = NULL;
5079 
5080 	if (stcb == NULL)
5081 		return;
5082 
5083 	atomic_add_int(&stcb->asoc.refcnt, 1);
5084 
5085 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5086 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5087 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5088 		/* Pre-check If we are freeing no update */
5089 		goto no_lock;
5090 	}
5091 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5092 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5093 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5094 		goto out;
5095 	}
5096 	so = stcb->sctp_socket;
5097 	if (so == NULL) {
5098 		goto out;
5099 	}
5100 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5101 	/* Have you have freed enough to look */
5102 	*freed_so_far = 0;
5103 	/* Yep, its worth a look and the lock overhead */
5104 
5105 	/* Figure out what the rwnd would be */
5106 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5107 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5108 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5109 	} else {
5110 		dif = 0;
5111 	}
5112 	if (dif >= rwnd_req) {
5113 		if (hold_rlock) {
5114 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5115 			r_unlocked = 1;
5116 		}
5117 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5118 			/*
5119 			 * One last check before we allow the guy possibly
5120 			 * to get in. There is a race, where the guy has not
5121 			 * reached the gate. In that case
5122 			 */
5123 			goto out;
5124 		}
5125 		SCTP_TCB_LOCK(stcb);
5126 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5127 			/* No reports here */
5128 			SCTP_TCB_UNLOCK(stcb);
5129 			goto out;
5130 		}
5131 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5132 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5133 
5134 		sctp_chunk_output(stcb->sctp_ep, stcb,
5135 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5136 		/* make sure no timer is running */
5137 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5138 		SCTP_TCB_UNLOCK(stcb);
5139 	} else {
5140 		/* Update how much we have pending */
5141 		stcb->freed_by_sorcv_sincelast = dif;
5142 	}
5143 out:
5144 	if (so && r_unlocked && hold_rlock) {
5145 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5146 	}
5147 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5148 no_lock:
5149 	atomic_add_int(&stcb->asoc.refcnt, -1);
5150 	return;
5151 }
5152 
5153 int
5154 sctp_sorecvmsg(struct socket *so,
5155     struct uio *uio,
5156     struct mbuf **mp,
5157     struct sockaddr *from,
5158     int fromlen,
5159     int *msg_flags,
5160     struct sctp_sndrcvinfo *sinfo,
5161     int filling_sinfo)
5162 {
5163 	/*
5164 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5165 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5166 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5167 	 * On the way out we may send out any combination of:
5168 	 * MSG_NOTIFICATION MSG_EOR
5169 	 *
5170 	 */
5171 	struct sctp_inpcb *inp = NULL;
5172 	int my_len = 0;
5173 	int cp_len = 0, error = 0;
5174 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5175 	struct mbuf *m = NULL;
5176 	struct sctp_tcb *stcb = NULL;
5177 	int wakeup_read_socket = 0;
5178 	int freecnt_applied = 0;
5179 	int out_flags = 0, in_flags = 0;
5180 	int block_allowed = 1;
5181 	uint32_t freed_so_far = 0;
5182 	uint32_t copied_so_far = 0;
5183 	int in_eeor_mode = 0;
5184 	int no_rcv_needed = 0;
5185 	uint32_t rwnd_req = 0;
5186 	int hold_sblock = 0;
5187 	int hold_rlock = 0;
5188 	int slen = 0;
5189 	uint32_t held_length = 0;
5190 	int sockbuf_lock = 0;
5191 
5192 	if (uio == NULL) {
5193 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5194 		return (EINVAL);
5195 	}
5196 	if (msg_flags) {
5197 		in_flags = *msg_flags;
5198 		if (in_flags & MSG_PEEK)
5199 			SCTP_STAT_INCR(sctps_read_peeks);
5200 	} else {
5201 		in_flags = 0;
5202 	}
5203 	slen = uio->uio_resid;
5204 
5205 	/* Pull in and set up our int flags */
5206 	if (in_flags & MSG_OOB) {
5207 		/* Out of band's NOT supported */
5208 		return (EOPNOTSUPP);
5209 	}
5210 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5211 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5212 		return (EINVAL);
5213 	}
5214 	if ((in_flags & (MSG_DONTWAIT
5215 	    | MSG_NBIO
5216 	    )) ||
5217 	    SCTP_SO_IS_NBIO(so)) {
5218 		block_allowed = 0;
5219 	}
5220 	/* setup the endpoint */
5221 	inp = (struct sctp_inpcb *)so->so_pcb;
5222 	if (inp == NULL) {
5223 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5224 		return (EFAULT);
5225 	}
5226 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5227 	/* Must be at least a MTU's worth */
5228 	if (rwnd_req < SCTP_MIN_RWND)
5229 		rwnd_req = SCTP_MIN_RWND;
5230 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5231 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5232 		sctp_misc_ints(SCTP_SORECV_ENTER,
5233 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5234 	}
5235 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5236 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5237 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5238 	}
5239 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5240 	if (error) {
5241 		goto release_unlocked;
5242 	}
5243 	sockbuf_lock = 1;
5244 restart:
5245 
5246 
5247 restart_nosblocks:
5248 	if (hold_sblock == 0) {
5249 		SOCKBUF_LOCK(&so->so_rcv);
5250 		hold_sblock = 1;
5251 	}
5252 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5253 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5254 		goto out;
5255 	}
5256 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5257 		if (so->so_error) {
5258 			error = so->so_error;
5259 			if ((in_flags & MSG_PEEK) == 0)
5260 				so->so_error = 0;
5261 			goto out;
5262 		} else {
5263 			if (so->so_rcv.sb_cc == 0) {
5264 				/* indicate EOF */
5265 				error = 0;
5266 				goto out;
5267 			}
5268 		}
5269 	}
5270 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5271 		/* we need to wait for data */
5272 		if ((so->so_rcv.sb_cc == 0) &&
5273 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5274 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5275 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5276 				/*
5277 				 * For active open side clear flags for
5278 				 * re-use passive open is blocked by
5279 				 * connect.
5280 				 */
5281 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5282 					/*
5283 					 * You were aborted, passive side
5284 					 * always hits here
5285 					 */
5286 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5287 					error = ECONNRESET;
5288 				}
5289 				so->so_state &= ~(SS_ISCONNECTING |
5290 				    SS_ISDISCONNECTING |
5291 				    SS_ISCONFIRMING |
5292 				    SS_ISCONNECTED);
5293 				if (error == 0) {
5294 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5295 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5296 						error = ENOTCONN;
5297 					}
5298 				}
5299 				goto out;
5300 			}
5301 		}
5302 		error = sbwait(&so->so_rcv);
5303 		if (error) {
5304 			goto out;
5305 		}
5306 		held_length = 0;
5307 		goto restart_nosblocks;
5308 	} else if (so->so_rcv.sb_cc == 0) {
5309 		if (so->so_error) {
5310 			error = so->so_error;
5311 			if ((in_flags & MSG_PEEK) == 0)
5312 				so->so_error = 0;
5313 		} else {
5314 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5315 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5316 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5317 					/*
5318 					 * For active open side clear flags
5319 					 * for re-use passive open is
5320 					 * blocked by connect.
5321 					 */
5322 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5323 						/*
5324 						 * You were aborted, passive
5325 						 * side always hits here
5326 						 */
5327 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5328 						error = ECONNRESET;
5329 					}
5330 					so->so_state &= ~(SS_ISCONNECTING |
5331 					    SS_ISDISCONNECTING |
5332 					    SS_ISCONFIRMING |
5333 					    SS_ISCONNECTED);
5334 					if (error == 0) {
5335 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5336 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5337 							error = ENOTCONN;
5338 						}
5339 					}
5340 					goto out;
5341 				}
5342 			}
5343 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5344 			error = EWOULDBLOCK;
5345 		}
5346 		goto out;
5347 	}
5348 	if (hold_sblock == 1) {
5349 		SOCKBUF_UNLOCK(&so->so_rcv);
5350 		hold_sblock = 0;
5351 	}
5352 	/* we possibly have data we can read */
5353 	/* sa_ignore FREED_MEMORY */
5354 	control = TAILQ_FIRST(&inp->read_queue);
5355 	if (control == NULL) {
5356 		/*
5357 		 * This could be happening since the appender did the
5358 		 * increment but as not yet did the tailq insert onto the
5359 		 * read_queue
5360 		 */
5361 		if (hold_rlock == 0) {
5362 			SCTP_INP_READ_LOCK(inp);
5363 		}
5364 		control = TAILQ_FIRST(&inp->read_queue);
5365 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5366 #ifdef INVARIANTS
5367 			panic("Huh, its non zero and nothing on control?");
5368 #endif
5369 			so->so_rcv.sb_cc = 0;
5370 		}
5371 		SCTP_INP_READ_UNLOCK(inp);
5372 		hold_rlock = 0;
5373 		goto restart;
5374 	}
5375 	if ((control->length == 0) &&
5376 	    (control->do_not_ref_stcb)) {
5377 		/*
5378 		 * Clean up code for freeing assoc that left behind a
5379 		 * pdapi.. maybe a peer in EEOR that just closed after
5380 		 * sending and never indicated a EOR.
5381 		 */
5382 		if (hold_rlock == 0) {
5383 			hold_rlock = 1;
5384 			SCTP_INP_READ_LOCK(inp);
5385 		}
5386 		control->held_length = 0;
5387 		if (control->data) {
5388 			/* Hmm there is data here .. fix */
5389 			struct mbuf *m_tmp;
5390 			int cnt = 0;
5391 
5392 			m_tmp = control->data;
5393 			while (m_tmp) {
5394 				cnt += SCTP_BUF_LEN(m_tmp);
5395 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5396 					control->tail_mbuf = m_tmp;
5397 					control->end_added = 1;
5398 				}
5399 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5400 			}
5401 			control->length = cnt;
5402 		} else {
5403 			/* remove it */
5404 			TAILQ_REMOVE(&inp->read_queue, control, next);
5405 			/* Add back any hiddend data */
5406 			sctp_free_remote_addr(control->whoFrom);
5407 			sctp_free_a_readq(stcb, control);
5408 		}
5409 		if (hold_rlock) {
5410 			hold_rlock = 0;
5411 			SCTP_INP_READ_UNLOCK(inp);
5412 		}
5413 		goto restart;
5414 	}
5415 	if ((control->length == 0) &&
5416 	    (control->end_added == 1)) {
5417 		/*
5418 		 * Do we also need to check for (control->pdapi_aborted ==
5419 		 * 1)?
5420 		 */
5421 		if (hold_rlock == 0) {
5422 			hold_rlock = 1;
5423 			SCTP_INP_READ_LOCK(inp);
5424 		}
5425 		TAILQ_REMOVE(&inp->read_queue, control, next);
5426 		if (control->data) {
5427 #ifdef INVARIANTS
5428 			panic("control->data not null but control->length == 0");
5429 #else
5430 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5431 			sctp_m_freem(control->data);
5432 			control->data = NULL;
5433 #endif
5434 		}
5435 		if (control->aux_data) {
5436 			sctp_m_free(control->aux_data);
5437 			control->aux_data = NULL;
5438 		}
5439 		sctp_free_remote_addr(control->whoFrom);
5440 		sctp_free_a_readq(stcb, control);
5441 		if (hold_rlock) {
5442 			hold_rlock = 0;
5443 			SCTP_INP_READ_UNLOCK(inp);
5444 		}
5445 		goto restart;
5446 	}
5447 	if (control->length == 0) {
5448 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5449 		    (filling_sinfo)) {
5450 			/* find a more suitable one then this */
5451 			ctl = TAILQ_NEXT(control, next);
5452 			while (ctl) {
5453 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5454 				    (ctl->some_taken ||
5455 				    (ctl->spec_flags & M_NOTIFICATION) ||
5456 				    ((ctl->do_not_ref_stcb == 0) &&
5457 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5458 				    ) {
5459 					/*-
5460 					 * If we have a different TCB next, and there is data
5461 					 * present. If we have already taken some (pdapi), OR we can
5462 					 * ref the tcb and no delivery as started on this stream, we
5463 					 * take it. Note we allow a notification on a different
5464 					 * assoc to be delivered..
5465 					 */
5466 					control = ctl;
5467 					goto found_one;
5468 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5469 					    (ctl->length) &&
5470 					    ((ctl->some_taken) ||
5471 					    ((ctl->do_not_ref_stcb == 0) &&
5472 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5473 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5474 					/*-
5475 					 * If we have the same tcb, and there is data present, and we
5476 					 * have the strm interleave feature present. Then if we have
5477 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5478 					 * not started a delivery for this stream, we can take it.
5479 					 * Note we do NOT allow a notificaiton on the same assoc to
5480 					 * be delivered.
5481 					 */
5482 					control = ctl;
5483 					goto found_one;
5484 				}
5485 				ctl = TAILQ_NEXT(ctl, next);
5486 			}
5487 		}
5488 		/*
5489 		 * if we reach here, not suitable replacement is available
5490 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5491 		 * into the our held count, and its time to sleep again.
5492 		 */
5493 		held_length = so->so_rcv.sb_cc;
5494 		control->held_length = so->so_rcv.sb_cc;
5495 		goto restart;
5496 	}
5497 	/* Clear the held length since there is something to read */
5498 	control->held_length = 0;
5499 	if (hold_rlock) {
5500 		SCTP_INP_READ_UNLOCK(inp);
5501 		hold_rlock = 0;
5502 	}
5503 found_one:
5504 	/*
5505 	 * If we reach here, control has a some data for us to read off.
5506 	 * Note that stcb COULD be NULL.
5507 	 */
5508 	control->some_taken++;
5509 	if (hold_sblock) {
5510 		SOCKBUF_UNLOCK(&so->so_rcv);
5511 		hold_sblock = 0;
5512 	}
5513 	stcb = control->stcb;
5514 	if (stcb) {
5515 		if ((control->do_not_ref_stcb == 0) &&
5516 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5517 			if (freecnt_applied == 0)
5518 				stcb = NULL;
5519 		} else if (control->do_not_ref_stcb == 0) {
5520 			/* you can't free it on me please */
5521 			/*
5522 			 * The lock on the socket buffer protects us so the
5523 			 * free code will stop. But since we used the
5524 			 * socketbuf lock and the sender uses the tcb_lock
5525 			 * to increment, we need to use the atomic add to
5526 			 * the refcnt
5527 			 */
5528 			if (freecnt_applied) {
5529 #ifdef INVARIANTS
5530 				panic("refcnt already incremented");
5531 #else
5532 				SCTP_PRINTF("refcnt already incremented?\n");
5533 #endif
5534 			} else {
5535 				atomic_add_int(&stcb->asoc.refcnt, 1);
5536 				freecnt_applied = 1;
5537 			}
5538 			/*
5539 			 * Setup to remember how much we have not yet told
5540 			 * the peer our rwnd has opened up. Note we grab the
5541 			 * value from the tcb from last time. Note too that
5542 			 * sack sending clears this when a sack is sent,
5543 			 * which is fine. Once we hit the rwnd_req, we then
5544 			 * will go to the sctp_user_rcvd() that will not
5545 			 * lock until it KNOWs it MUST send a WUP-SACK.
5546 			 */
5547 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5548 			stcb->freed_by_sorcv_sincelast = 0;
5549 		}
5550 	}
5551 	if (stcb &&
5552 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5553 	    control->do_not_ref_stcb == 0) {
5554 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5555 	}
5556 	/* First lets get off the sinfo and sockaddr info */
5557 	if ((sinfo) && filling_sinfo) {
5558 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5559 		nxt = TAILQ_NEXT(control, next);
5560 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5561 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5562 			struct sctp_extrcvinfo *s_extra;
5563 
5564 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5565 			if ((nxt) &&
5566 			    (nxt->length)) {
5567 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5568 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5569 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5570 				}
5571 				if (nxt->spec_flags & M_NOTIFICATION) {
5572 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5573 				}
5574 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5575 				s_extra->sreinfo_next_length = nxt->length;
5576 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5577 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5578 				if (nxt->tail_mbuf != NULL) {
5579 					if (nxt->end_added) {
5580 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5581 					}
5582 				}
5583 			} else {
5584 				/*
5585 				 * we explicitly 0 this, since the memcpy
5586 				 * got some other things beyond the older
5587 				 * sinfo_ that is on the control's structure
5588 				 * :-D
5589 				 */
5590 				nxt = NULL;
5591 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5592 				s_extra->sreinfo_next_aid = 0;
5593 				s_extra->sreinfo_next_length = 0;
5594 				s_extra->sreinfo_next_ppid = 0;
5595 				s_extra->sreinfo_next_stream = 0;
5596 			}
5597 		}
5598 		/*
5599 		 * update off the real current cum-ack, if we have an stcb.
5600 		 */
5601 		if ((control->do_not_ref_stcb == 0) && stcb)
5602 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5603 		/*
5604 		 * mask off the high bits, we keep the actual chunk bits in
5605 		 * there.
5606 		 */
5607 		sinfo->sinfo_flags &= 0x00ff;
5608 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5609 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5610 		}
5611 	}
5612 #ifdef SCTP_ASOCLOG_OF_TSNS
5613 	{
5614 		int index, newindex;
5615 		struct sctp_pcbtsn_rlog *entry;
5616 
5617 		do {
5618 			index = inp->readlog_index;
5619 			newindex = index + 1;
5620 			if (newindex >= SCTP_READ_LOG_SIZE) {
5621 				newindex = 0;
5622 			}
5623 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5624 		entry = &inp->readlog[index];
5625 		entry->vtag = control->sinfo_assoc_id;
5626 		entry->strm = control->sinfo_stream;
5627 		entry->seq = control->sinfo_ssn;
5628 		entry->sz = control->length;
5629 		entry->flgs = control->sinfo_flags;
5630 	}
5631 #endif
5632 	if (fromlen && from) {
5633 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5634 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5635 #ifdef INET6
5636 		case AF_INET6:
5637 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5638 			break;
5639 #endif
5640 #ifdef INET
5641 		case AF_INET:
5642 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5643 			break;
5644 #endif
5645 		default:
5646 			break;
5647 		}
5648 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5649 
5650 #if defined(INET) && defined(INET6)
5651 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5652 		    (from->sa_family == AF_INET) &&
5653 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5654 			struct sockaddr_in *sin;
5655 			struct sockaddr_in6 sin6;
5656 
5657 			sin = (struct sockaddr_in *)from;
5658 			bzero(&sin6, sizeof(sin6));
5659 			sin6.sin6_family = AF_INET6;
5660 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5661 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5662 			bcopy(&sin->sin_addr,
5663 			    &sin6.sin6_addr.s6_addr32[3],
5664 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5665 			sin6.sin6_port = sin->sin_port;
5666 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5667 		}
5668 #endif
5669 #ifdef INET6
5670 		{
5671 			struct sockaddr_in6 lsa6, *from6;
5672 
5673 			from6 = (struct sockaddr_in6 *)from;
5674 			sctp_recover_scope_mac(from6, (&lsa6));
5675 		}
5676 #endif
5677 	}
5678 	/* now copy out what data we can */
5679 	if (mp == NULL) {
5680 		/* copy out each mbuf in the chain up to length */
5681 get_more_data:
5682 		m = control->data;
5683 		while (m) {
5684 			/* Move out all we can */
5685 			cp_len = (int)uio->uio_resid;
5686 			my_len = (int)SCTP_BUF_LEN(m);
5687 			if (cp_len > my_len) {
5688 				/* not enough in this buf */
5689 				cp_len = my_len;
5690 			}
5691 			if (hold_rlock) {
5692 				SCTP_INP_READ_UNLOCK(inp);
5693 				hold_rlock = 0;
5694 			}
5695 			if (cp_len > 0)
5696 				error = uiomove(mtod(m, char *), cp_len, uio);
5697 			/* re-read */
5698 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5699 				goto release;
5700 			}
5701 			if ((control->do_not_ref_stcb == 0) && stcb &&
5702 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5703 				no_rcv_needed = 1;
5704 			}
5705 			if (error) {
5706 				/* error we are out of here */
5707 				goto release;
5708 			}
5709 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5710 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5711 			    ((control->end_added == 0) ||
5712 			    (control->end_added &&
5713 			    (TAILQ_NEXT(control, next) == NULL)))
5714 			    ) {
5715 				SCTP_INP_READ_LOCK(inp);
5716 				hold_rlock = 1;
5717 			}
5718 			if (cp_len == SCTP_BUF_LEN(m)) {
5719 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5720 				    (control->end_added)) {
5721 					out_flags |= MSG_EOR;
5722 					if ((control->do_not_ref_stcb == 0) &&
5723 					    (control->stcb != NULL) &&
5724 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5725 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5726 				}
5727 				if (control->spec_flags & M_NOTIFICATION) {
5728 					out_flags |= MSG_NOTIFICATION;
5729 				}
5730 				/* we ate up the mbuf */
5731 				if (in_flags & MSG_PEEK) {
5732 					/* just looking */
5733 					m = SCTP_BUF_NEXT(m);
5734 					copied_so_far += cp_len;
5735 				} else {
5736 					/* dispose of the mbuf */
5737 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5738 						sctp_sblog(&so->so_rcv,
5739 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5740 					}
5741 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5742 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5743 						sctp_sblog(&so->so_rcv,
5744 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5745 					}
5746 					copied_so_far += cp_len;
5747 					freed_so_far += cp_len;
5748 					freed_so_far += MSIZE;
5749 					atomic_subtract_int(&control->length, cp_len);
5750 					control->data = sctp_m_free(m);
5751 					m = control->data;
5752 					/*
5753 					 * been through it all, must hold sb
5754 					 * lock ok to null tail
5755 					 */
5756 					if (control->data == NULL) {
5757 #ifdef INVARIANTS
5758 						if ((control->end_added == 0) ||
5759 						    (TAILQ_NEXT(control, next) == NULL)) {
5760 							/*
5761 							 * If the end is not
5762 							 * added, OR the
5763 							 * next is NOT null
5764 							 * we MUST have the
5765 							 * lock.
5766 							 */
5767 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5768 								panic("Hmm we don't own the lock?");
5769 							}
5770 						}
5771 #endif
5772 						control->tail_mbuf = NULL;
5773 #ifdef INVARIANTS
5774 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5775 							panic("end_added, nothing left and no MSG_EOR");
5776 						}
5777 #endif
5778 					}
5779 				}
5780 			} else {
5781 				/* Do we need to trim the mbuf? */
5782 				if (control->spec_flags & M_NOTIFICATION) {
5783 					out_flags |= MSG_NOTIFICATION;
5784 				}
5785 				if ((in_flags & MSG_PEEK) == 0) {
5786 					SCTP_BUF_RESV_UF(m, cp_len);
5787 					SCTP_BUF_LEN(m) -= cp_len;
5788 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5789 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5790 					}
5791 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5792 					if ((control->do_not_ref_stcb == 0) &&
5793 					    stcb) {
5794 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5795 					}
5796 					copied_so_far += cp_len;
5797 					freed_so_far += cp_len;
5798 					freed_so_far += MSIZE;
5799 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5800 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5801 						    SCTP_LOG_SBRESULT, 0);
5802 					}
5803 					atomic_subtract_int(&control->length, cp_len);
5804 				} else {
5805 					copied_so_far += cp_len;
5806 				}
5807 			}
5808 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5809 				break;
5810 			}
5811 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5812 			    (control->do_not_ref_stcb == 0) &&
5813 			    (freed_so_far >= rwnd_req)) {
5814 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5815 			}
5816 		}		/* end while(m) */
5817 		/*
5818 		 * At this point we have looked at it all and we either have
5819 		 * a MSG_EOR/or read all the user wants... <OR>
5820 		 * control->length == 0.
5821 		 */
5822 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5823 			/* we are done with this control */
5824 			if (control->length == 0) {
5825 				if (control->data) {
5826 #ifdef INVARIANTS
5827 					panic("control->data not null at read eor?");
5828 #else
5829 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5830 					sctp_m_freem(control->data);
5831 					control->data = NULL;
5832 #endif
5833 				}
5834 		done_with_control:
5835 				if (TAILQ_NEXT(control, next) == NULL) {
5836 					/*
5837 					 * If we don't have a next we need a
5838 					 * lock, if there is a next
5839 					 * interrupt is filling ahead of us
5840 					 * and we don't need a lock to
5841 					 * remove this guy (which is the
5842 					 * head of the queue).
5843 					 */
5844 					if (hold_rlock == 0) {
5845 						SCTP_INP_READ_LOCK(inp);
5846 						hold_rlock = 1;
5847 					}
5848 				}
5849 				TAILQ_REMOVE(&inp->read_queue, control, next);
5850 				/* Add back any hiddend data */
5851 				if (control->held_length) {
5852 					held_length = 0;
5853 					control->held_length = 0;
5854 					wakeup_read_socket = 1;
5855 				}
5856 				if (control->aux_data) {
5857 					sctp_m_free(control->aux_data);
5858 					control->aux_data = NULL;
5859 				}
5860 				no_rcv_needed = control->do_not_ref_stcb;
5861 				sctp_free_remote_addr(control->whoFrom);
5862 				control->data = NULL;
5863 				sctp_free_a_readq(stcb, control);
5864 				control = NULL;
5865 				if ((freed_so_far >= rwnd_req) &&
5866 				    (no_rcv_needed == 0))
5867 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5868 
5869 			} else {
5870 				/*
5871 				 * The user did not read all of this
5872 				 * message, turn off the returned MSG_EOR
5873 				 * since we are leaving more behind on the
5874 				 * control to read.
5875 				 */
5876 #ifdef INVARIANTS
5877 				if (control->end_added &&
5878 				    (control->data == NULL) &&
5879 				    (control->tail_mbuf == NULL)) {
5880 					panic("Gak, control->length is corrupt?");
5881 				}
5882 #endif
5883 				no_rcv_needed = control->do_not_ref_stcb;
5884 				out_flags &= ~MSG_EOR;
5885 			}
5886 		}
5887 		if (out_flags & MSG_EOR) {
5888 			goto release;
5889 		}
5890 		if ((uio->uio_resid == 0) ||
5891 		    ((in_eeor_mode) &&
5892 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5893 			goto release;
5894 		}
5895 		/*
5896 		 * If I hit here the receiver wants more and this message is
5897 		 * NOT done (pd-api). So two questions. Can we block? if not
5898 		 * we are done. Did the user NOT set MSG_WAITALL?
5899 		 */
5900 		if (block_allowed == 0) {
5901 			goto release;
5902 		}
5903 		/*
5904 		 * We need to wait for more data a few things: - We don't
5905 		 * sbunlock() so we don't get someone else reading. - We
5906 		 * must be sure to account for the case where what is added
5907 		 * is NOT to our control when we wakeup.
5908 		 */
5909 
5910 		/*
5911 		 * Do we need to tell the transport a rwnd update might be
5912 		 * needed before we go to sleep?
5913 		 */
5914 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5915 		    ((freed_so_far >= rwnd_req) &&
5916 		    (control->do_not_ref_stcb == 0) &&
5917 		    (no_rcv_needed == 0))) {
5918 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5919 		}
5920 wait_some_more:
5921 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5922 			goto release;
5923 		}
5924 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5925 			goto release;
5926 
5927 		if (hold_rlock == 1) {
5928 			SCTP_INP_READ_UNLOCK(inp);
5929 			hold_rlock = 0;
5930 		}
5931 		if (hold_sblock == 0) {
5932 			SOCKBUF_LOCK(&so->so_rcv);
5933 			hold_sblock = 1;
5934 		}
5935 		if ((copied_so_far) && (control->length == 0) &&
5936 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5937 			goto release;
5938 		}
5939 		if (so->so_rcv.sb_cc <= control->held_length) {
5940 			error = sbwait(&so->so_rcv);
5941 			if (error) {
5942 				goto release;
5943 			}
5944 			control->held_length = 0;
5945 		}
5946 		if (hold_sblock) {
5947 			SOCKBUF_UNLOCK(&so->so_rcv);
5948 			hold_sblock = 0;
5949 		}
5950 		if (control->length == 0) {
5951 			/* still nothing here */
5952 			if (control->end_added == 1) {
5953 				/* he aborted, or is done i.e.did a shutdown */
5954 				out_flags |= MSG_EOR;
5955 				if (control->pdapi_aborted) {
5956 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5957 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5958 
5959 					out_flags |= MSG_TRUNC;
5960 				} else {
5961 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5962 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5963 				}
5964 				goto done_with_control;
5965 			}
5966 			if (so->so_rcv.sb_cc > held_length) {
5967 				control->held_length = so->so_rcv.sb_cc;
5968 				held_length = 0;
5969 			}
5970 			goto wait_some_more;
5971 		} else if (control->data == NULL) {
5972 			/*
5973 			 * we must re-sync since data is probably being
5974 			 * added
5975 			 */
5976 			SCTP_INP_READ_LOCK(inp);
5977 			if ((control->length > 0) && (control->data == NULL)) {
5978 				/*
5979 				 * big trouble.. we have the lock and its
5980 				 * corrupt?
5981 				 */
5982 #ifdef INVARIANTS
5983 				panic("Impossible data==NULL length !=0");
5984 #endif
5985 				out_flags |= MSG_EOR;
5986 				out_flags |= MSG_TRUNC;
5987 				control->length = 0;
5988 				SCTP_INP_READ_UNLOCK(inp);
5989 				goto done_with_control;
5990 			}
5991 			SCTP_INP_READ_UNLOCK(inp);
5992 			/* We will fall around to get more data */
5993 		}
5994 		goto get_more_data;
5995 	} else {
5996 		/*-
5997 		 * Give caller back the mbuf chain,
5998 		 * store in uio_resid the length
5999 		 */
6000 		wakeup_read_socket = 0;
6001 		if ((control->end_added == 0) ||
6002 		    (TAILQ_NEXT(control, next) == NULL)) {
6003 			/* Need to get rlock */
6004 			if (hold_rlock == 0) {
6005 				SCTP_INP_READ_LOCK(inp);
6006 				hold_rlock = 1;
6007 			}
6008 		}
6009 		if (control->end_added) {
6010 			out_flags |= MSG_EOR;
6011 			if ((control->do_not_ref_stcb == 0) &&
6012 			    (control->stcb != NULL) &&
6013 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6014 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6015 		}
6016 		if (control->spec_flags & M_NOTIFICATION) {
6017 			out_flags |= MSG_NOTIFICATION;
6018 		}
6019 		uio->uio_resid = control->length;
6020 		*mp = control->data;
6021 		m = control->data;
6022 		while (m) {
6023 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6024 				sctp_sblog(&so->so_rcv,
6025 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6026 			}
6027 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6028 			freed_so_far += SCTP_BUF_LEN(m);
6029 			freed_so_far += MSIZE;
6030 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6031 				sctp_sblog(&so->so_rcv,
6032 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6033 			}
6034 			m = SCTP_BUF_NEXT(m);
6035 		}
6036 		control->data = control->tail_mbuf = NULL;
6037 		control->length = 0;
6038 		if (out_flags & MSG_EOR) {
6039 			/* Done with this control */
6040 			goto done_with_control;
6041 		}
6042 	}
6043 release:
6044 	if (hold_rlock == 1) {
6045 		SCTP_INP_READ_UNLOCK(inp);
6046 		hold_rlock = 0;
6047 	}
6048 	if (hold_sblock == 1) {
6049 		SOCKBUF_UNLOCK(&so->so_rcv);
6050 		hold_sblock = 0;
6051 	}
6052 	sbunlock(&so->so_rcv);
6053 	sockbuf_lock = 0;
6054 
6055 release_unlocked:
6056 	if (hold_sblock) {
6057 		SOCKBUF_UNLOCK(&so->so_rcv);
6058 		hold_sblock = 0;
6059 	}
6060 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6061 		if ((freed_so_far >= rwnd_req) &&
6062 		    (control && (control->do_not_ref_stcb == 0)) &&
6063 		    (no_rcv_needed == 0))
6064 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6065 	}
6066 out:
6067 	if (msg_flags) {
6068 		*msg_flags = out_flags;
6069 	}
6070 	if (((out_flags & MSG_EOR) == 0) &&
6071 	    ((in_flags & MSG_PEEK) == 0) &&
6072 	    (sinfo) &&
6073 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6074 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6075 		struct sctp_extrcvinfo *s_extra;
6076 
6077 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6078 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6079 	}
6080 	if (hold_rlock == 1) {
6081 		SCTP_INP_READ_UNLOCK(inp);
6082 	}
6083 	if (hold_sblock) {
6084 		SOCKBUF_UNLOCK(&so->so_rcv);
6085 	}
6086 	if (sockbuf_lock) {
6087 		sbunlock(&so->so_rcv);
6088 	}
6089 	if (freecnt_applied) {
6090 		/*
6091 		 * The lock on the socket buffer protects us so the free
6092 		 * code will stop. But since we used the socketbuf lock and
6093 		 * the sender uses the tcb_lock to increment, we need to use
6094 		 * the atomic add to the refcnt.
6095 		 */
6096 		if (stcb == NULL) {
6097 #ifdef INVARIANTS
6098 			panic("stcb for refcnt has gone NULL?");
6099 			goto stage_left;
6100 #else
6101 			goto stage_left;
6102 #endif
6103 		}
6104 		atomic_add_int(&stcb->asoc.refcnt, -1);
6105 		/* Save the value back for next time */
6106 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6107 	}
6108 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6109 		if (stcb) {
6110 			sctp_misc_ints(SCTP_SORECV_DONE,
6111 			    freed_so_far,
6112 			    ((uio) ? (slen - uio->uio_resid) : slen),
6113 			    stcb->asoc.my_rwnd,
6114 			    so->so_rcv.sb_cc);
6115 		} else {
6116 			sctp_misc_ints(SCTP_SORECV_DONE,
6117 			    freed_so_far,
6118 			    ((uio) ? (slen - uio->uio_resid) : slen),
6119 			    0,
6120 			    so->so_rcv.sb_cc);
6121 		}
6122 	}
6123 stage_left:
6124 	if (wakeup_read_socket) {
6125 		sctp_sorwakeup(inp, so);
6126 	}
6127 	return (error);
6128 }
6129 
6130 
6131 #ifdef SCTP_MBUF_LOGGING
6132 struct mbuf *
6133 sctp_m_free(struct mbuf *m)
6134 {
6135 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6136 		if (SCTP_BUF_IS_EXTENDED(m)) {
6137 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6138 		}
6139 	}
6140 	return (m_free(m));
6141 }
6142 
6143 void
6144 sctp_m_freem(struct mbuf *mb)
6145 {
6146 	while (mb != NULL)
6147 		mb = sctp_m_free(mb);
6148 }
6149 
6150 #endif
6151 
6152 int
6153 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6154 {
6155 	/*
6156 	 * Given a local address. For all associations that holds the
6157 	 * address, request a peer-set-primary.
6158 	 */
6159 	struct sctp_ifa *ifa;
6160 	struct sctp_laddr *wi;
6161 
6162 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6163 	if (ifa == NULL) {
6164 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6165 		return (EADDRNOTAVAIL);
6166 	}
6167 	/*
6168 	 * Now that we have the ifa we must awaken the iterator with this
6169 	 * message.
6170 	 */
6171 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6172 	if (wi == NULL) {
6173 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6174 		return (ENOMEM);
6175 	}
6176 	/* Now incr the count and int wi structure */
6177 	SCTP_INCR_LADDR_COUNT();
6178 	bzero(wi, sizeof(*wi));
6179 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6180 	wi->ifa = ifa;
6181 	wi->action = SCTP_SET_PRIM_ADDR;
6182 	atomic_add_int(&ifa->refcount, 1);
6183 
6184 	/* Now add it to the work queue */
6185 	SCTP_WQ_ADDR_LOCK();
6186 	/*
6187 	 * Should this really be a tailq? As it is we will process the
6188 	 * newest first :-0
6189 	 */
6190 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6191 	SCTP_WQ_ADDR_UNLOCK();
6192 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6193 	    (struct sctp_inpcb *)NULL,
6194 	    (struct sctp_tcb *)NULL,
6195 	    (struct sctp_nets *)NULL);
6196 	return (0);
6197 }
6198 
6199 
6200 int
6201 sctp_soreceive(struct socket *so,
6202     struct sockaddr **psa,
6203     struct uio *uio,
6204     struct mbuf **mp0,
6205     struct mbuf **controlp,
6206     int *flagsp)
6207 {
6208 	int error, fromlen;
6209 	uint8_t sockbuf[256];
6210 	struct sockaddr *from;
6211 	struct sctp_extrcvinfo sinfo;
6212 	int filling_sinfo = 1;
6213 	struct sctp_inpcb *inp;
6214 
6215 	inp = (struct sctp_inpcb *)so->so_pcb;
6216 	/* pickup the assoc we are reading from */
6217 	if (inp == NULL) {
6218 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6219 		return (EINVAL);
6220 	}
6221 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6222 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6223 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6224 	    (controlp == NULL)) {
6225 		/* user does not want the sndrcv ctl */
6226 		filling_sinfo = 0;
6227 	}
6228 	if (psa) {
6229 		from = (struct sockaddr *)sockbuf;
6230 		fromlen = sizeof(sockbuf);
6231 		from->sa_len = 0;
6232 	} else {
6233 		from = NULL;
6234 		fromlen = 0;
6235 	}
6236 
6237 	if (filling_sinfo) {
6238 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6239 	}
6240 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6241 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6242 	if (controlp != NULL) {
6243 		/* copy back the sinfo in a CMSG format */
6244 		if (filling_sinfo)
6245 			*controlp = sctp_build_ctl_nchunk(inp,
6246 			    (struct sctp_sndrcvinfo *)&sinfo);
6247 		else
6248 			*controlp = NULL;
6249 	}
6250 	if (psa) {
6251 		/* copy back the address info */
6252 		if (from && from->sa_len) {
6253 			*psa = sodupsockaddr(from, M_NOWAIT);
6254 		} else {
6255 			*psa = NULL;
6256 		}
6257 	}
6258 	return (error);
6259 }
6260 
6261 
6262 
6263 
6264 
6265 int
6266 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6267     int totaddr, int *error)
6268 {
6269 	int added = 0;
6270 	int i;
6271 	struct sctp_inpcb *inp;
6272 	struct sockaddr *sa;
6273 	size_t incr = 0;
6274 
6275 #ifdef INET
6276 	struct sockaddr_in *sin;
6277 
6278 #endif
6279 #ifdef INET6
6280 	struct sockaddr_in6 *sin6;
6281 
6282 #endif
6283 
6284 	sa = addr;
6285 	inp = stcb->sctp_ep;
6286 	*error = 0;
6287 	for (i = 0; i < totaddr; i++) {
6288 		switch (sa->sa_family) {
6289 #ifdef INET
6290 		case AF_INET:
6291 			incr = sizeof(struct sockaddr_in);
6292 			sin = (struct sockaddr_in *)sa;
6293 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6294 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6295 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6296 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6297 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6298 				*error = EINVAL;
6299 				goto out_now;
6300 			}
6301 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6302 				/* assoc gone no un-lock */
6303 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6304 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6305 				*error = ENOBUFS;
6306 				goto out_now;
6307 			}
6308 			added++;
6309 			break;
6310 #endif
6311 #ifdef INET6
6312 		case AF_INET6:
6313 			incr = sizeof(struct sockaddr_in6);
6314 			sin6 = (struct sockaddr_in6 *)sa;
6315 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6316 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6317 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6318 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6319 				*error = EINVAL;
6320 				goto out_now;
6321 			}
6322 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6323 				/* assoc gone no un-lock */
6324 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6325 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6326 				*error = ENOBUFS;
6327 				goto out_now;
6328 			}
6329 			added++;
6330 			break;
6331 #endif
6332 		default:
6333 			break;
6334 		}
6335 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6336 	}
6337 out_now:
6338 	return (added);
6339 }
6340 
6341 struct sctp_tcb *
6342 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6343     int *totaddr, int *num_v4, int *num_v6, int *error,
6344     int limit, int *bad_addr)
6345 {
6346 	struct sockaddr *sa;
6347 	struct sctp_tcb *stcb = NULL;
6348 	size_t incr, at, i;
6349 
6350 	at = incr = 0;
6351 	sa = addr;
6352 
6353 	*error = *num_v6 = *num_v4 = 0;
6354 	/* account and validate addresses */
6355 	for (i = 0; i < (size_t)*totaddr; i++) {
6356 		switch (sa->sa_family) {
6357 #ifdef INET
6358 		case AF_INET:
6359 			(*num_v4) += 1;
6360 			incr = sizeof(struct sockaddr_in);
6361 			if (sa->sa_len != incr) {
6362 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6363 				*error = EINVAL;
6364 				*bad_addr = 1;
6365 				return (NULL);
6366 			}
6367 			break;
6368 #endif
6369 #ifdef INET6
6370 		case AF_INET6:
6371 			{
6372 				struct sockaddr_in6 *sin6;
6373 
6374 				sin6 = (struct sockaddr_in6 *)sa;
6375 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6376 					/* Must be non-mapped for connectx */
6377 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6378 					*error = EINVAL;
6379 					*bad_addr = 1;
6380 					return (NULL);
6381 				}
6382 				(*num_v6) += 1;
6383 				incr = sizeof(struct sockaddr_in6);
6384 				if (sa->sa_len != incr) {
6385 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6386 					*error = EINVAL;
6387 					*bad_addr = 1;
6388 					return (NULL);
6389 				}
6390 				break;
6391 			}
6392 #endif
6393 		default:
6394 			*totaddr = i;
6395 			/* we are done */
6396 			break;
6397 		}
6398 		if (i == (size_t)*totaddr) {
6399 			break;
6400 		}
6401 		SCTP_INP_INCR_REF(inp);
6402 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6403 		if (stcb != NULL) {
6404 			/* Already have or am bring up an association */
6405 			return (stcb);
6406 		} else {
6407 			SCTP_INP_DECR_REF(inp);
6408 		}
6409 		if ((at + incr) > (size_t)limit) {
6410 			*totaddr = i;
6411 			break;
6412 		}
6413 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6414 	}
6415 	return ((struct sctp_tcb *)NULL);
6416 }
6417 
6418 /*
6419  * sctp_bindx(ADD) for one address.
6420  * assumes all arguments are valid/checked by caller.
6421  */
6422 void
6423 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6424     struct sockaddr *sa, sctp_assoc_t assoc_id,
6425     uint32_t vrf_id, int *error, void *p)
6426 {
6427 	struct sockaddr *addr_touse;
6428 
6429 #ifdef INET6
6430 	struct sockaddr_in sin;
6431 
6432 #endif
6433 
6434 	/* see if we're bound all already! */
6435 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6436 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6437 		*error = EINVAL;
6438 		return;
6439 	}
6440 	addr_touse = sa;
6441 #ifdef INET6
6442 	if (sa->sa_family == AF_INET6) {
6443 		struct sockaddr_in6 *sin6;
6444 
6445 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6446 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 			*error = EINVAL;
6448 			return;
6449 		}
6450 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6451 			/* can only bind v6 on PF_INET6 sockets */
6452 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6453 			*error = EINVAL;
6454 			return;
6455 		}
6456 		sin6 = (struct sockaddr_in6 *)addr_touse;
6457 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6458 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6459 			    SCTP_IPV6_V6ONLY(inp)) {
6460 				/* can't bind v4-mapped on PF_INET sockets */
6461 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462 				*error = EINVAL;
6463 				return;
6464 			}
6465 			in6_sin6_2_sin(&sin, sin6);
6466 			addr_touse = (struct sockaddr *)&sin;
6467 		}
6468 	}
6469 #endif
6470 #ifdef INET
6471 	if (sa->sa_family == AF_INET) {
6472 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6473 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6474 			*error = EINVAL;
6475 			return;
6476 		}
6477 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6478 		    SCTP_IPV6_V6ONLY(inp)) {
6479 			/* can't bind v4 on PF_INET sockets */
6480 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6481 			*error = EINVAL;
6482 			return;
6483 		}
6484 	}
6485 #endif
6486 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6487 		if (p == NULL) {
6488 			/* Can't get proc for Net/Open BSD */
6489 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6490 			*error = EINVAL;
6491 			return;
6492 		}
6493 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6494 		return;
6495 	}
6496 	/*
6497 	 * No locks required here since bind and mgmt_ep_sa all do their own
6498 	 * locking. If we do something for the FIX: below we may need to
6499 	 * lock in that case.
6500 	 */
6501 	if (assoc_id == 0) {
6502 		/* add the address */
6503 		struct sctp_inpcb *lep;
6504 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6505 
6506 		/* validate the incoming port */
6507 		if ((lsin->sin_port != 0) &&
6508 		    (lsin->sin_port != inp->sctp_lport)) {
6509 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6510 			*error = EINVAL;
6511 			return;
6512 		} else {
6513 			/* user specified 0 port, set it to existing port */
6514 			lsin->sin_port = inp->sctp_lport;
6515 		}
6516 
6517 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6518 		if (lep != NULL) {
6519 			/*
6520 			 * We must decrement the refcount since we have the
6521 			 * ep already and are binding. No remove going on
6522 			 * here.
6523 			 */
6524 			SCTP_INP_DECR_REF(lep);
6525 		}
6526 		if (lep == inp) {
6527 			/* already bound to it.. ok */
6528 			return;
6529 		} else if (lep == NULL) {
6530 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6531 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6532 			    SCTP_ADD_IP_ADDRESS,
6533 			    vrf_id, NULL);
6534 		} else {
6535 			*error = EADDRINUSE;
6536 		}
6537 		if (*error)
6538 			return;
6539 	} else {
6540 		/*
6541 		 * FIX: decide whether we allow assoc based bindx
6542 		 */
6543 	}
6544 }
6545 
6546 /*
6547  * sctp_bindx(DELETE) for one address.
6548  * assumes all arguments are valid/checked by caller.
6549  */
6550 void
6551 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6552     struct sockaddr *sa, sctp_assoc_t assoc_id,
6553     uint32_t vrf_id, int *error)
6554 {
6555 	struct sockaddr *addr_touse;
6556 
6557 #ifdef INET6
6558 	struct sockaddr_in sin;
6559 
6560 #endif
6561 
6562 	/* see if we're bound all already! */
6563 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6564 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6565 		*error = EINVAL;
6566 		return;
6567 	}
6568 	addr_touse = sa;
6569 #ifdef INET6
6570 	if (sa->sa_family == AF_INET6) {
6571 		struct sockaddr_in6 *sin6;
6572 
6573 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6574 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6575 			*error = EINVAL;
6576 			return;
6577 		}
6578 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6579 			/* can only bind v6 on PF_INET6 sockets */
6580 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6581 			*error = EINVAL;
6582 			return;
6583 		}
6584 		sin6 = (struct sockaddr_in6 *)addr_touse;
6585 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6586 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6587 			    SCTP_IPV6_V6ONLY(inp)) {
6588 				/* can't bind mapped-v4 on PF_INET sockets */
6589 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 				*error = EINVAL;
6591 				return;
6592 			}
6593 			in6_sin6_2_sin(&sin, sin6);
6594 			addr_touse = (struct sockaddr *)&sin;
6595 		}
6596 	}
6597 #endif
6598 #ifdef INET
6599 	if (sa->sa_family == AF_INET) {
6600 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6601 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6602 			*error = EINVAL;
6603 			return;
6604 		}
6605 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6606 		    SCTP_IPV6_V6ONLY(inp)) {
6607 			/* can't bind v4 on PF_INET sockets */
6608 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6609 			*error = EINVAL;
6610 			return;
6611 		}
6612 	}
6613 #endif
6614 	/*
6615 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6616 	 * below is ever changed we may need to lock before calling
6617 	 * association level binding.
6618 	 */
6619 	if (assoc_id == 0) {
6620 		/* delete the address */
6621 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6622 		    SCTP_DEL_IP_ADDRESS,
6623 		    vrf_id, NULL);
6624 	} else {
6625 		/*
6626 		 * FIX: decide whether we allow assoc based bindx
6627 		 */
6628 	}
6629 }
6630 
6631 /*
6632  * returns the valid local address count for an assoc, taking into account
6633  * all scoping rules
6634  */
6635 int
6636 sctp_local_addr_count(struct sctp_tcb *stcb)
6637 {
6638 	int loopback_scope;
6639 
6640 #if defined(INET)
6641 	int ipv4_local_scope, ipv4_addr_legal;
6642 
6643 #endif
6644 #if defined (INET6)
6645 	int local_scope, site_scope, ipv6_addr_legal;
6646 
6647 #endif
6648 	struct sctp_vrf *vrf;
6649 	struct sctp_ifn *sctp_ifn;
6650 	struct sctp_ifa *sctp_ifa;
6651 	int count = 0;
6652 
6653 	/* Turn on all the appropriate scopes */
6654 	loopback_scope = stcb->asoc.scope.loopback_scope;
6655 #if defined(INET)
6656 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6657 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6658 #endif
6659 #if defined(INET6)
6660 	local_scope = stcb->asoc.scope.local_scope;
6661 	site_scope = stcb->asoc.scope.site_scope;
6662 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6663 #endif
6664 	SCTP_IPI_ADDR_RLOCK();
6665 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6666 	if (vrf == NULL) {
6667 		/* no vrf, no addresses */
6668 		SCTP_IPI_ADDR_RUNLOCK();
6669 		return (0);
6670 	}
6671 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6672 		/*
6673 		 * bound all case: go through all ifns on the vrf
6674 		 */
6675 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6676 			if ((loopback_scope == 0) &&
6677 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6678 				continue;
6679 			}
6680 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6681 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6682 					continue;
6683 				switch (sctp_ifa->address.sa.sa_family) {
6684 #ifdef INET
6685 				case AF_INET:
6686 					if (ipv4_addr_legal) {
6687 						struct sockaddr_in *sin;
6688 
6689 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6690 						if (sin->sin_addr.s_addr == 0) {
6691 							/*
6692 							 * skip unspecified
6693 							 * addrs
6694 							 */
6695 							continue;
6696 						}
6697 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6698 						    &sin->sin_addr) != 0) {
6699 							continue;
6700 						}
6701 						if ((ipv4_local_scope == 0) &&
6702 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6703 							continue;
6704 						}
6705 						/* count this one */
6706 						count++;
6707 					} else {
6708 						continue;
6709 					}
6710 					break;
6711 #endif
6712 #ifdef INET6
6713 				case AF_INET6:
6714 					if (ipv6_addr_legal) {
6715 						struct sockaddr_in6 *sin6;
6716 
6717 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6718 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6719 							continue;
6720 						}
6721 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6722 						    &sin6->sin6_addr) != 0) {
6723 							continue;
6724 						}
6725 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6726 							if (local_scope == 0)
6727 								continue;
6728 							if (sin6->sin6_scope_id == 0) {
6729 								if (sa6_recoverscope(sin6) != 0)
6730 									/*
6731 									 *
6732 									 * bad
6733 									 *
6734 									 * li
6735 									 * nk
6736 									 *
6737 									 * loc
6738 									 * al
6739 									 *
6740 									 * add
6741 									 * re
6742 									 * ss
6743 									 * */
6744 									continue;
6745 							}
6746 						}
6747 						if ((site_scope == 0) &&
6748 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6749 							continue;
6750 						}
6751 						/* count this one */
6752 						count++;
6753 					}
6754 					break;
6755 #endif
6756 				default:
6757 					/* TSNH */
6758 					break;
6759 				}
6760 			}
6761 		}
6762 	} else {
6763 		/*
6764 		 * subset bound case
6765 		 */
6766 		struct sctp_laddr *laddr;
6767 
6768 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6769 		    sctp_nxt_addr) {
6770 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6771 				continue;
6772 			}
6773 			/* count this one */
6774 			count++;
6775 		}
6776 	}
6777 	SCTP_IPI_ADDR_RUNLOCK();
6778 	return (count);
6779 }
6780 
6781 #if defined(SCTP_LOCAL_TRACE_BUF)
6782 
6783 void
6784 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6785 {
6786 	uint32_t saveindex, newindex;
6787 
6788 	do {
6789 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6790 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6791 			newindex = 1;
6792 		} else {
6793 			newindex = saveindex + 1;
6794 		}
6795 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6796 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6797 		saveindex = 0;
6798 	}
6799 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6800 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6801 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6802 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6803 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6804 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6805 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6806 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6807 }
6808 
6809 #endif
6810 static void
6811 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6812 {
6813 	struct ip *iph;
6814 
6815 #ifdef INET6
6816 	struct ip6_hdr *ip6;
6817 
6818 #endif
6819 	struct mbuf *sp, *last;
6820 	struct udphdr *uhdr;
6821 	uint16_t port;
6822 
6823 	if ((m->m_flags & M_PKTHDR) == 0) {
6824 		/* Can't handle one that is not a pkt hdr */
6825 		goto out;
6826 	}
6827 	/* Pull the src port */
6828 	iph = mtod(m, struct ip *);
6829 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6830 	port = uhdr->uh_sport;
6831 	/*
6832 	 * Split out the mbuf chain. Leave the IP header in m, place the
6833 	 * rest in the sp.
6834 	 */
6835 	sp = m_split(m, off, M_NOWAIT);
6836 	if (sp == NULL) {
6837 		/* Gak, drop packet, we can't do a split */
6838 		goto out;
6839 	}
6840 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6841 		/* Gak, packet can't have an SCTP header in it - too small */
6842 		m_freem(sp);
6843 		goto out;
6844 	}
6845 	/* Now pull up the UDP header and SCTP header together */
6846 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6847 	if (sp == NULL) {
6848 		/* Gak pullup failed */
6849 		goto out;
6850 	}
6851 	/* Trim out the UDP header */
6852 	m_adj(sp, sizeof(struct udphdr));
6853 
6854 	/* Now reconstruct the mbuf chain */
6855 	for (last = m; last->m_next; last = last->m_next);
6856 	last->m_next = sp;
6857 	m->m_pkthdr.len += sp->m_pkthdr.len;
6858 	iph = mtod(m, struct ip *);
6859 	switch (iph->ip_v) {
6860 #ifdef INET
6861 	case IPVERSION:
6862 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6863 		sctp_input_with_port(m, off, port);
6864 		break;
6865 #endif
6866 #ifdef INET6
6867 	case IPV6_VERSION >> 4:
6868 		ip6 = mtod(m, struct ip6_hdr *);
6869 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6870 		sctp6_input_with_port(&m, &off, port);
6871 		break;
6872 #endif
6873 	default:
6874 		goto out;
6875 		break;
6876 	}
6877 	return;
6878 out:
6879 	m_freem(m);
6880 }
6881 
6882 void
6883 sctp_over_udp_stop(void)
6884 {
6885 	/*
6886 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6887 	 * for writting!
6888 	 */
6889 #ifdef INET
6890 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6891 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6892 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6893 	}
6894 #endif
6895 #ifdef INET6
6896 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6897 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6898 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6899 	}
6900 #endif
6901 }
6902 
6903 int
6904 sctp_over_udp_start(void)
6905 {
6906 	uint16_t port;
6907 	int ret;
6908 
6909 #ifdef INET
6910 	struct sockaddr_in sin;
6911 
6912 #endif
6913 #ifdef INET6
6914 	struct sockaddr_in6 sin6;
6915 
6916 #endif
6917 	/*
6918 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6919 	 * for writting!
6920 	 */
6921 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6922 	if (ntohs(port) == 0) {
6923 		/* Must have a port set */
6924 		return (EINVAL);
6925 	}
6926 #ifdef INET
6927 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6928 		/* Already running -- must stop first */
6929 		return (EALREADY);
6930 	}
6931 #endif
6932 #ifdef INET6
6933 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6934 		/* Already running -- must stop first */
6935 		return (EALREADY);
6936 	}
6937 #endif
6938 #ifdef INET
6939 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6940 	    SOCK_DGRAM, IPPROTO_UDP,
6941 	    curthread->td_ucred, curthread))) {
6942 		sctp_over_udp_stop();
6943 		return (ret);
6944 	}
6945 	/* Call the special UDP hook. */
6946 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6947 	    sctp_recv_udp_tunneled_packet))) {
6948 		sctp_over_udp_stop();
6949 		return (ret);
6950 	}
6951 	/* Ok, we have a socket, bind it to the port. */
6952 	memset(&sin, 0, sizeof(struct sockaddr_in));
6953 	sin.sin_len = sizeof(struct sockaddr_in);
6954 	sin.sin_family = AF_INET;
6955 	sin.sin_port = htons(port);
6956 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6957 	    (struct sockaddr *)&sin, curthread))) {
6958 		sctp_over_udp_stop();
6959 		return (ret);
6960 	}
6961 #endif
6962 #ifdef INET6
6963 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
6964 	    SOCK_DGRAM, IPPROTO_UDP,
6965 	    curthread->td_ucred, curthread))) {
6966 		sctp_over_udp_stop();
6967 		return (ret);
6968 	}
6969 	/* Call the special UDP hook. */
6970 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
6971 	    sctp_recv_udp_tunneled_packet))) {
6972 		sctp_over_udp_stop();
6973 		return (ret);
6974 	}
6975 	/* Ok, we have a socket, bind it to the port. */
6976 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
6977 	sin6.sin6_len = sizeof(struct sockaddr_in6);
6978 	sin6.sin6_family = AF_INET6;
6979 	sin6.sin6_port = htons(port);
6980 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
6981 	    (struct sockaddr *)&sin6, curthread))) {
6982 		sctp_over_udp_stop();
6983 		return (ret);
6984 	}
6985 #endif
6986 	return (0);
6987 }
6988