xref: /freebsd/sys/netinet/sctputil.c (revision 4f0a4502a1f33fef287ac558c98e5ef99a32216f)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int32_t
897 sctp_map_assoc_state(int kernel_state)
898 {
899 	int32_t user_state;
900 
901 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
902 		user_state = SCTP_CLOSED;
903 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
904 		user_state = SCTP_SHUTDOWN_PENDING;
905 	} else {
906 		switch (kernel_state & SCTP_STATE_MASK) {
907 		case SCTP_STATE_EMPTY:
908 			user_state = SCTP_CLOSED;
909 			break;
910 		case SCTP_STATE_INUSE:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_COOKIE_WAIT:
914 			user_state = SCTP_COOKIE_WAIT;
915 			break;
916 		case SCTP_STATE_COOKIE_ECHOED:
917 			user_state = SCTP_COOKIE_ECHOED;
918 			break;
919 		case SCTP_STATE_OPEN:
920 			user_state = SCTP_ESTABLISHED;
921 			break;
922 		case SCTP_STATE_SHUTDOWN_SENT:
923 			user_state = SCTP_SHUTDOWN_SENT;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_RECEIVED:
926 			user_state = SCTP_SHUTDOWN_RECEIVED;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
929 			user_state = SCTP_SHUTDOWN_ACK_SENT;
930 			break;
931 		default:
932 			user_state = SCTP_CLOSED;
933 			break;
934 		}
935 	}
936 	return (user_state);
937 }
938 
939 int
940 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
941     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
942 {
943 	struct sctp_association *asoc;
944 
945 	/*
946 	 * Anything set to zero is taken care of by the allocation routine's
947 	 * bzero
948 	 */
949 
950 	/*
951 	 * Up front select what scoping to apply on addresses I tell my peer
952 	 * Not sure what to do with these right now, we will need to come up
953 	 * with a way to set them. We may need to pass them through from the
954 	 * caller in the sctp_aloc_assoc() function.
955 	 */
956 	int i;
957 
958 #if defined(SCTP_DETAILED_STR_STATS)
959 	int j;
960 
961 #endif
962 
963 	asoc = &stcb->asoc;
964 	/* init all variables to a known value. */
965 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
966 	asoc->max_burst = inp->sctp_ep.max_burst;
967 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
968 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
969 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
970 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
971 	asoc->ecn_supported = inp->ecn_supported;
972 	asoc->prsctp_supported = inp->prsctp_supported;
973 	asoc->auth_supported = inp->auth_supported;
974 	asoc->asconf_supported = inp->asconf_supported;
975 	asoc->reconfig_supported = inp->reconfig_supported;
976 	asoc->nrsack_supported = inp->nrsack_supported;
977 	asoc->pktdrop_supported = inp->pktdrop_supported;
978 	asoc->sctp_cmt_pf = (uint8_t) 0;
979 	asoc->sctp_frag_point = inp->sctp_frag_point;
980 	asoc->sctp_features = inp->sctp_features;
981 	asoc->default_dscp = inp->sctp_ep.default_dscp;
982 	asoc->max_cwnd = inp->max_cwnd;
983 #ifdef INET6
984 	if (inp->sctp_ep.default_flowlabel) {
985 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
986 	} else {
987 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
988 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
989 			asoc->default_flowlabel &= 0x000fffff;
990 			asoc->default_flowlabel |= 0x80000000;
991 		} else {
992 			asoc->default_flowlabel = 0;
993 		}
994 	}
995 #endif
996 	asoc->sb_send_resv = 0;
997 	if (override_tag) {
998 		asoc->my_vtag = override_tag;
999 	} else {
1000 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1001 	}
1002 	/* Get the nonce tags */
1003 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1004 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1005 	asoc->vrf_id = vrf_id;
1006 
1007 #ifdef SCTP_ASOCLOG_OF_TSNS
1008 	asoc->tsn_in_at = 0;
1009 	asoc->tsn_out_at = 0;
1010 	asoc->tsn_in_wrapped = 0;
1011 	asoc->tsn_out_wrapped = 0;
1012 	asoc->cumack_log_at = 0;
1013 	asoc->cumack_log_atsnt = 0;
1014 #endif
1015 #ifdef SCTP_FS_SPEC_LOG
1016 	asoc->fs_index = 0;
1017 #endif
1018 	asoc->refcnt = 0;
1019 	asoc->assoc_up_sent = 0;
1020 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1021 	    sctp_select_initial_TSN(&inp->sctp_ep);
1022 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1023 	/* we are optimisitic here */
1024 	asoc->peer_supports_nat = 0;
1025 	asoc->sent_queue_retran_cnt = 0;
1026 
1027 	/* for CMT */
1028 	asoc->last_net_cmt_send_started = NULL;
1029 
1030 	/* This will need to be adjusted */
1031 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1032 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1033 	asoc->asconf_seq_in = asoc->last_acked_seq;
1034 
1035 	/* here we are different, we hold the next one we expect */
1036 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1037 
1038 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1039 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1040 
1041 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1042 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1043 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1044 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1045 	asoc->free_chunk_cnt = 0;
1046 
1047 	asoc->iam_blocking = 0;
1048 	asoc->context = inp->sctp_context;
1049 	asoc->local_strreset_support = inp->local_strreset_support;
1050 	asoc->def_send = inp->def_send;
1051 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1052 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1053 	asoc->pr_sctp_cnt = 0;
1054 	asoc->total_output_queue_size = 0;
1055 
1056 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1057 		asoc->scope.ipv6_addr_legal = 1;
1058 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1059 			asoc->scope.ipv4_addr_legal = 1;
1060 		} else {
1061 			asoc->scope.ipv4_addr_legal = 0;
1062 		}
1063 	} else {
1064 		asoc->scope.ipv6_addr_legal = 0;
1065 		asoc->scope.ipv4_addr_legal = 1;
1066 	}
1067 
1068 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1069 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1070 
1071 	asoc->smallest_mtu = inp->sctp_frag_point;
1072 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1073 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1074 
1075 	asoc->locked_on_sending = NULL;
1076 	asoc->stream_locked_on = 0;
1077 	asoc->ecn_echo_cnt_onq = 0;
1078 	asoc->stream_locked = 0;
1079 
1080 	asoc->send_sack = 1;
1081 
1082 	LIST_INIT(&asoc->sctp_restricted_addrs);
1083 
1084 	TAILQ_INIT(&asoc->nets);
1085 	TAILQ_INIT(&asoc->pending_reply_queue);
1086 	TAILQ_INIT(&asoc->asconf_ack_sent);
1087 	/* Setup to fill the hb random cache at first HB */
1088 	asoc->hb_random_idx = 4;
1089 
1090 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1091 
1092 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1093 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1094 
1095 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1096 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1097 
1098 	/*
1099 	 * Now the stream parameters, here we allocate space for all streams
1100 	 * that we request by default.
1101 	 */
1102 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1103 	    o_strms;
1104 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1105 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1106 	    SCTP_M_STRMO);
1107 	if (asoc->strmout == NULL) {
1108 		/* big trouble no memory */
1109 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1110 		return (ENOMEM);
1111 	}
1112 	for (i = 0; i < asoc->streamoutcnt; i++) {
1113 		/*
1114 		 * inbound side must be set to 0xffff, also NOTE when we get
1115 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1116 		 * count (streamoutcnt) but first check if we sent to any of
1117 		 * the upper streams that were dropped (if some were). Those
1118 		 * that were dropped must be notified to the upper layer as
1119 		 * failed to send.
1120 		 */
1121 		asoc->strmout[i].next_sequence_send = 0x0;
1122 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1123 		asoc->strmout[i].chunks_on_queues = 0;
1124 #if defined(SCTP_DETAILED_STR_STATS)
1125 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1126 			asoc->strmout[i].abandoned_sent[j] = 0;
1127 			asoc->strmout[i].abandoned_unsent[j] = 0;
1128 		}
1129 #else
1130 		asoc->strmout[i].abandoned_sent[0] = 0;
1131 		asoc->strmout[i].abandoned_unsent[0] = 0;
1132 #endif
1133 		asoc->strmout[i].stream_no = i;
1134 		asoc->strmout[i].last_msg_incomplete = 0;
1135 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1136 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1137 	}
1138 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1139 
1140 	/* Now the mapping array */
1141 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1142 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1143 	    SCTP_M_MAP);
1144 	if (asoc->mapping_array == NULL) {
1145 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147 		return (ENOMEM);
1148 	}
1149 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1150 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1151 	    SCTP_M_MAP);
1152 	if (asoc->nr_mapping_array == NULL) {
1153 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1155 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1156 		return (ENOMEM);
1157 	}
1158 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1159 
1160 	/* Now the init of the other outqueues */
1161 	TAILQ_INIT(&asoc->free_chunks);
1162 	TAILQ_INIT(&asoc->control_send_queue);
1163 	TAILQ_INIT(&asoc->asconf_send_queue);
1164 	TAILQ_INIT(&asoc->send_queue);
1165 	TAILQ_INIT(&asoc->sent_queue);
1166 	TAILQ_INIT(&asoc->reasmqueue);
1167 	TAILQ_INIT(&asoc->resetHead);
1168 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1169 	TAILQ_INIT(&asoc->asconf_queue);
1170 	/* authentication fields */
1171 	asoc->authinfo.random = NULL;
1172 	asoc->authinfo.active_keyid = 0;
1173 	asoc->authinfo.assoc_key = NULL;
1174 	asoc->authinfo.assoc_keyid = 0;
1175 	asoc->authinfo.recv_key = NULL;
1176 	asoc->authinfo.recv_keyid = 0;
1177 	LIST_INIT(&asoc->shared_keys);
1178 	asoc->marked_retrans = 0;
1179 	asoc->port = inp->sctp_ep.port;
1180 	asoc->timoinit = 0;
1181 	asoc->timodata = 0;
1182 	asoc->timosack = 0;
1183 	asoc->timoshutdown = 0;
1184 	asoc->timoheartbeat = 0;
1185 	asoc->timocookie = 0;
1186 	asoc->timoshutdownack = 0;
1187 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188 	asoc->discontinuity_time = asoc->start_time;
1189 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1190 		asoc->abandoned_unsent[i] = 0;
1191 		asoc->abandoned_sent[i] = 0;
1192 	}
1193 	/*
1194 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1195 	 * freed later when the association is freed.
1196 	 */
1197 	return (0);
1198 }
1199 
1200 void
1201 sctp_print_mapping_array(struct sctp_association *asoc)
1202 {
1203 	unsigned int i, limit;
1204 
1205 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1206 	    asoc->mapping_array_size,
1207 	    asoc->mapping_array_base_tsn,
1208 	    asoc->cumulative_tsn,
1209 	    asoc->highest_tsn_inside_map,
1210 	    asoc->highest_tsn_inside_nr_map);
1211 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1212 		if (asoc->mapping_array[limit - 1] != 0) {
1213 			break;
1214 		}
1215 	}
1216 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1217 	for (i = 0; i < limit; i++) {
1218 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1219 	}
1220 	if (limit % 16)
1221 		SCTP_PRINTF("\n");
1222 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1223 		if (asoc->nr_mapping_array[limit - 1]) {
1224 			break;
1225 		}
1226 	}
1227 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1228 	for (i = 0; i < limit; i++) {
1229 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1230 	}
1231 	if (limit % 16)
1232 		SCTP_PRINTF("\n");
1233 }
1234 
1235 int
1236 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1237 {
1238 	/* mapping array needs to grow */
1239 	uint8_t *new_array1, *new_array2;
1240 	uint32_t new_size;
1241 
1242 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1243 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1244 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1245 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1246 		/* can't get more, forget it */
1247 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1248 		if (new_array1) {
1249 			SCTP_FREE(new_array1, SCTP_M_MAP);
1250 		}
1251 		if (new_array2) {
1252 			SCTP_FREE(new_array2, SCTP_M_MAP);
1253 		}
1254 		return (-1);
1255 	}
1256 	memset(new_array1, 0, new_size);
1257 	memset(new_array2, 0, new_size);
1258 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1259 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1260 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1261 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1262 	asoc->mapping_array = new_array1;
1263 	asoc->nr_mapping_array = new_array2;
1264 	asoc->mapping_array_size = new_size;
1265 	return (0);
1266 }
1267 
1268 
1269 static void
1270 sctp_iterator_work(struct sctp_iterator *it)
1271 {
1272 	int iteration_count = 0;
1273 	int inp_skip = 0;
1274 	int first_in = 1;
1275 	struct sctp_inpcb *tinp;
1276 
1277 	SCTP_INP_INFO_RLOCK();
1278 	SCTP_ITERATOR_LOCK();
1279 	if (it->inp) {
1280 		SCTP_INP_RLOCK(it->inp);
1281 		SCTP_INP_DECR_REF(it->inp);
1282 	}
1283 	if (it->inp == NULL) {
1284 		/* iterator is complete */
1285 done_with_iterator:
1286 		SCTP_ITERATOR_UNLOCK();
1287 		SCTP_INP_INFO_RUNLOCK();
1288 		if (it->function_atend != NULL) {
1289 			(*it->function_atend) (it->pointer, it->val);
1290 		}
1291 		SCTP_FREE(it, SCTP_M_ITER);
1292 		return;
1293 	}
1294 select_a_new_ep:
1295 	if (first_in) {
1296 		first_in = 0;
1297 	} else {
1298 		SCTP_INP_RLOCK(it->inp);
1299 	}
1300 	while (((it->pcb_flags) &&
1301 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1302 	    ((it->pcb_features) &&
1303 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1304 		/* endpoint flags or features don't match, so keep looking */
1305 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1306 			SCTP_INP_RUNLOCK(it->inp);
1307 			goto done_with_iterator;
1308 		}
1309 		tinp = it->inp;
1310 		it->inp = LIST_NEXT(it->inp, sctp_list);
1311 		SCTP_INP_RUNLOCK(tinp);
1312 		if (it->inp == NULL) {
1313 			goto done_with_iterator;
1314 		}
1315 		SCTP_INP_RLOCK(it->inp);
1316 	}
1317 	/* now go through each assoc which is in the desired state */
1318 	if (it->done_current_ep == 0) {
1319 		if (it->function_inp != NULL)
1320 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1321 		it->done_current_ep = 1;
1322 	}
1323 	if (it->stcb == NULL) {
1324 		/* run the per instance function */
1325 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1326 	}
1327 	if ((inp_skip) || it->stcb == NULL) {
1328 		if (it->function_inp_end != NULL) {
1329 			inp_skip = (*it->function_inp_end) (it->inp,
1330 			    it->pointer,
1331 			    it->val);
1332 		}
1333 		SCTP_INP_RUNLOCK(it->inp);
1334 		goto no_stcb;
1335 	}
1336 	while (it->stcb) {
1337 		SCTP_TCB_LOCK(it->stcb);
1338 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1339 			/* not in the right state... keep looking */
1340 			SCTP_TCB_UNLOCK(it->stcb);
1341 			goto next_assoc;
1342 		}
1343 		/* see if we have limited out the iterator loop */
1344 		iteration_count++;
1345 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1346 			/* Pause to let others grab the lock */
1347 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1348 			SCTP_TCB_UNLOCK(it->stcb);
1349 			SCTP_INP_INCR_REF(it->inp);
1350 			SCTP_INP_RUNLOCK(it->inp);
1351 			SCTP_ITERATOR_UNLOCK();
1352 			SCTP_INP_INFO_RUNLOCK();
1353 			SCTP_INP_INFO_RLOCK();
1354 			SCTP_ITERATOR_LOCK();
1355 			if (sctp_it_ctl.iterator_flags) {
1356 				/* We won't be staying here */
1357 				SCTP_INP_DECR_REF(it->inp);
1358 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1359 				if (sctp_it_ctl.iterator_flags &
1360 				    SCTP_ITERATOR_STOP_CUR_IT) {
1361 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1362 					goto done_with_iterator;
1363 				}
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_INP) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1367 					goto no_stcb;
1368 				}
1369 				/* If we reach here huh? */
1370 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1371 				    sctp_it_ctl.iterator_flags);
1372 				sctp_it_ctl.iterator_flags = 0;
1373 			}
1374 			SCTP_INP_RLOCK(it->inp);
1375 			SCTP_INP_DECR_REF(it->inp);
1376 			SCTP_TCB_LOCK(it->stcb);
1377 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1378 			iteration_count = 0;
1379 		}
1380 		/* run function on this one */
1381 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1382 
1383 		/*
1384 		 * we lie here, it really needs to have its own type but
1385 		 * first I must verify that this won't effect things :-0
1386 		 */
1387 		if (it->no_chunk_output == 0)
1388 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1389 
1390 		SCTP_TCB_UNLOCK(it->stcb);
1391 next_assoc:
1392 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1393 		if (it->stcb == NULL) {
1394 			/* Run last function */
1395 			if (it->function_inp_end != NULL) {
1396 				inp_skip = (*it->function_inp_end) (it->inp,
1397 				    it->pointer,
1398 				    it->val);
1399 			}
1400 		}
1401 	}
1402 	SCTP_INP_RUNLOCK(it->inp);
1403 no_stcb:
1404 	/* done with all assocs on this endpoint, move on to next endpoint */
1405 	it->done_current_ep = 0;
1406 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1407 		it->inp = NULL;
1408 	} else {
1409 		it->inp = LIST_NEXT(it->inp, sctp_list);
1410 	}
1411 	if (it->inp == NULL) {
1412 		goto done_with_iterator;
1413 	}
1414 	goto select_a_new_ep;
1415 }
1416 
1417 void
1418 sctp_iterator_worker(void)
1419 {
1420 	struct sctp_iterator *it, *nit;
1421 
1422 	/* This function is called with the WQ lock in place */
1423 
1424 	sctp_it_ctl.iterator_running = 1;
1425 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1426 		sctp_it_ctl.cur_it = it;
1427 		/* now lets work on this one */
1428 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1429 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1430 		CURVNET_SET(it->vn);
1431 		sctp_iterator_work(it);
1432 		sctp_it_ctl.cur_it = NULL;
1433 		CURVNET_RESTORE();
1434 		SCTP_IPI_ITERATOR_WQ_LOCK();
1435 		/* sa_ignore FREED_MEMORY */
1436 	}
1437 	sctp_it_ctl.iterator_running = 0;
1438 	return;
1439 }
1440 
1441 
1442 static void
1443 sctp_handle_addr_wq(void)
1444 {
1445 	/* deal with the ADDR wq from the rtsock calls */
1446 	struct sctp_laddr *wi, *nwi;
1447 	struct sctp_asconf_iterator *asc;
1448 
1449 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1450 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1451 	if (asc == NULL) {
1452 		/* Try later, no memory */
1453 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1454 		    (struct sctp_inpcb *)NULL,
1455 		    (struct sctp_tcb *)NULL,
1456 		    (struct sctp_nets *)NULL);
1457 		return;
1458 	}
1459 	LIST_INIT(&asc->list_of_work);
1460 	asc->cnt = 0;
1461 
1462 	SCTP_WQ_ADDR_LOCK();
1463 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1464 		LIST_REMOVE(wi, sctp_nxt_addr);
1465 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1466 		asc->cnt++;
1467 	}
1468 	SCTP_WQ_ADDR_UNLOCK();
1469 
1470 	if (asc->cnt == 0) {
1471 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1472 	} else {
1473 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1474 		    sctp_asconf_iterator_stcb,
1475 		    NULL,	/* No ep end for boundall */
1476 		    SCTP_PCB_FLAGS_BOUNDALL,
1477 		    SCTP_PCB_ANY_FEATURES,
1478 		    SCTP_ASOC_ANY_STATE,
1479 		    (void *)asc, 0,
1480 		    sctp_asconf_iterator_end, NULL, 0);
1481 	}
1482 }
1483 
1484 void
1485 sctp_timeout_handler(void *t)
1486 {
1487 	struct sctp_inpcb *inp;
1488 	struct sctp_tcb *stcb;
1489 	struct sctp_nets *net;
1490 	struct sctp_timer *tmr;
1491 	struct mbuf *op_err;
1492 
1493 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1494 	struct socket *so;
1495 
1496 #endif
1497 	int did_output;
1498 	int type;
1499 
1500 	tmr = (struct sctp_timer *)t;
1501 	inp = (struct sctp_inpcb *)tmr->ep;
1502 	stcb = (struct sctp_tcb *)tmr->tcb;
1503 	net = (struct sctp_nets *)tmr->net;
1504 	CURVNET_SET((struct vnet *)tmr->vnet);
1505 	did_output = 1;
1506 
1507 #ifdef SCTP_AUDITING_ENABLED
1508 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1509 	sctp_auditing(3, inp, stcb, net);
1510 #endif
1511 
1512 	/* sanity checks... */
1513 	if (tmr->self != (void *)tmr) {
1514 		/*
1515 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1516 		 * (void *)tmr);
1517 		 */
1518 		CURVNET_RESTORE();
1519 		return;
1520 	}
1521 	tmr->stopped_from = 0xa001;
1522 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1523 		/*
1524 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1525 		 * tmr->type);
1526 		 */
1527 		CURVNET_RESTORE();
1528 		return;
1529 	}
1530 	tmr->stopped_from = 0xa002;
1531 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1532 		CURVNET_RESTORE();
1533 		return;
1534 	}
1535 	/* if this is an iterator timeout, get the struct and clear inp */
1536 	tmr->stopped_from = 0xa003;
1537 	if (inp) {
1538 		SCTP_INP_INCR_REF(inp);
1539 		if ((inp->sctp_socket == NULL) &&
1540 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1541 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1542 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1543 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1544 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1545 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1546 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1547 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1548 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1549 		    ) {
1550 			SCTP_INP_DECR_REF(inp);
1551 			CURVNET_RESTORE();
1552 			return;
1553 		}
1554 	}
1555 	tmr->stopped_from = 0xa004;
1556 	if (stcb) {
1557 		atomic_add_int(&stcb->asoc.refcnt, 1);
1558 		if (stcb->asoc.state == 0) {
1559 			atomic_add_int(&stcb->asoc.refcnt, -1);
1560 			if (inp) {
1561 				SCTP_INP_DECR_REF(inp);
1562 			}
1563 			CURVNET_RESTORE();
1564 			return;
1565 		}
1566 	}
1567 	type = tmr->type;
1568 	tmr->stopped_from = 0xa005;
1569 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1570 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1571 		if (inp) {
1572 			SCTP_INP_DECR_REF(inp);
1573 		}
1574 		if (stcb) {
1575 			atomic_add_int(&stcb->asoc.refcnt, -1);
1576 		}
1577 		CURVNET_RESTORE();
1578 		return;
1579 	}
1580 	tmr->stopped_from = 0xa006;
1581 
1582 	if (stcb) {
1583 		SCTP_TCB_LOCK(stcb);
1584 		atomic_add_int(&stcb->asoc.refcnt, -1);
1585 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1586 		    ((stcb->asoc.state == 0) ||
1587 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1588 			SCTP_TCB_UNLOCK(stcb);
1589 			if (inp) {
1590 				SCTP_INP_DECR_REF(inp);
1591 			}
1592 			CURVNET_RESTORE();
1593 			return;
1594 		}
1595 	}
1596 	/* record in stopped what t-o occured */
1597 	tmr->stopped_from = type;
1598 
1599 	/* mark as being serviced now */
1600 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1601 		/*
1602 		 * Callout has been rescheduled.
1603 		 */
1604 		goto get_out;
1605 	}
1606 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1607 		/*
1608 		 * Not active, so no action.
1609 		 */
1610 		goto get_out;
1611 	}
1612 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1613 
1614 	/* call the handler for the appropriate timer type */
1615 	switch (type) {
1616 	case SCTP_TIMER_TYPE_ZERO_COPY:
1617 		if (inp == NULL) {
1618 			break;
1619 		}
1620 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1621 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1622 		}
1623 		break;
1624 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1625 		if (inp == NULL) {
1626 			break;
1627 		}
1628 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1629 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1630 		}
1631 		break;
1632 	case SCTP_TIMER_TYPE_ADDR_WQ:
1633 		sctp_handle_addr_wq();
1634 		break;
1635 	case SCTP_TIMER_TYPE_SEND:
1636 		if ((stcb == NULL) || (inp == NULL)) {
1637 			break;
1638 		}
1639 		SCTP_STAT_INCR(sctps_timodata);
1640 		stcb->asoc.timodata++;
1641 		stcb->asoc.num_send_timers_up--;
1642 		if (stcb->asoc.num_send_timers_up < 0) {
1643 			stcb->asoc.num_send_timers_up = 0;
1644 		}
1645 		SCTP_TCB_LOCK_ASSERT(stcb);
1646 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1647 			/* no need to unlock on tcb its gone */
1648 
1649 			goto out_decr;
1650 		}
1651 		SCTP_TCB_LOCK_ASSERT(stcb);
1652 #ifdef SCTP_AUDITING_ENABLED
1653 		sctp_auditing(4, inp, stcb, net);
1654 #endif
1655 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1656 		if ((stcb->asoc.num_send_timers_up == 0) &&
1657 		    (stcb->asoc.sent_queue_cnt > 0)) {
1658 			struct sctp_tmit_chunk *chk;
1659 
1660 			/*
1661 			 * safeguard. If there on some on the sent queue
1662 			 * somewhere but no timers running something is
1663 			 * wrong... so we start a timer on the first chunk
1664 			 * on the send queue on whatever net it is sent to.
1665 			 */
1666 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1667 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1668 			    chk->whoTo);
1669 		}
1670 		break;
1671 	case SCTP_TIMER_TYPE_INIT:
1672 		if ((stcb == NULL) || (inp == NULL)) {
1673 			break;
1674 		}
1675 		SCTP_STAT_INCR(sctps_timoinit);
1676 		stcb->asoc.timoinit++;
1677 		if (sctp_t1init_timer(inp, stcb, net)) {
1678 			/* no need to unlock on tcb its gone */
1679 			goto out_decr;
1680 		}
1681 		/* We do output but not here */
1682 		did_output = 0;
1683 		break;
1684 	case SCTP_TIMER_TYPE_RECV:
1685 		if ((stcb == NULL) || (inp == NULL)) {
1686 			break;
1687 		}
1688 		SCTP_STAT_INCR(sctps_timosack);
1689 		stcb->asoc.timosack++;
1690 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1691 #ifdef SCTP_AUDITING_ENABLED
1692 		sctp_auditing(4, inp, stcb, net);
1693 #endif
1694 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1695 		break;
1696 	case SCTP_TIMER_TYPE_SHUTDOWN:
1697 		if ((stcb == NULL) || (inp == NULL)) {
1698 			break;
1699 		}
1700 		if (sctp_shutdown_timer(inp, stcb, net)) {
1701 			/* no need to unlock on tcb its gone */
1702 			goto out_decr;
1703 		}
1704 		SCTP_STAT_INCR(sctps_timoshutdown);
1705 		stcb->asoc.timoshutdown++;
1706 #ifdef SCTP_AUDITING_ENABLED
1707 		sctp_auditing(4, inp, stcb, net);
1708 #endif
1709 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1710 		break;
1711 	case SCTP_TIMER_TYPE_HEARTBEAT:
1712 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1713 			break;
1714 		}
1715 		SCTP_STAT_INCR(sctps_timoheartbeat);
1716 		stcb->asoc.timoheartbeat++;
1717 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1718 			/* no need to unlock on tcb its gone */
1719 			goto out_decr;
1720 		}
1721 #ifdef SCTP_AUDITING_ENABLED
1722 		sctp_auditing(4, inp, stcb, net);
1723 #endif
1724 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1725 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1726 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1727 		}
1728 		break;
1729 	case SCTP_TIMER_TYPE_COOKIE:
1730 		if ((stcb == NULL) || (inp == NULL)) {
1731 			break;
1732 		}
1733 		if (sctp_cookie_timer(inp, stcb, net)) {
1734 			/* no need to unlock on tcb its gone */
1735 			goto out_decr;
1736 		}
1737 		SCTP_STAT_INCR(sctps_timocookie);
1738 		stcb->asoc.timocookie++;
1739 #ifdef SCTP_AUDITING_ENABLED
1740 		sctp_auditing(4, inp, stcb, net);
1741 #endif
1742 		/*
1743 		 * We consider T3 and Cookie timer pretty much the same with
1744 		 * respect to where from in chunk_output.
1745 		 */
1746 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1747 		break;
1748 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1749 		{
1750 			struct timeval tv;
1751 			int i, secret;
1752 
1753 			if (inp == NULL) {
1754 				break;
1755 			}
1756 			SCTP_STAT_INCR(sctps_timosecret);
1757 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1758 			SCTP_INP_WLOCK(inp);
1759 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1760 			inp->sctp_ep.last_secret_number =
1761 			    inp->sctp_ep.current_secret_number;
1762 			inp->sctp_ep.current_secret_number++;
1763 			if (inp->sctp_ep.current_secret_number >=
1764 			    SCTP_HOW_MANY_SECRETS) {
1765 				inp->sctp_ep.current_secret_number = 0;
1766 			}
1767 			secret = (int)inp->sctp_ep.current_secret_number;
1768 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1769 				inp->sctp_ep.secret_key[secret][i] =
1770 				    sctp_select_initial_TSN(&inp->sctp_ep);
1771 			}
1772 			SCTP_INP_WUNLOCK(inp);
1773 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1774 		}
1775 		did_output = 0;
1776 		break;
1777 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1778 		if ((stcb == NULL) || (inp == NULL)) {
1779 			break;
1780 		}
1781 		SCTP_STAT_INCR(sctps_timopathmtu);
1782 		sctp_pathmtu_timer(inp, stcb, net);
1783 		did_output = 0;
1784 		break;
1785 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1786 		if ((stcb == NULL) || (inp == NULL)) {
1787 			break;
1788 		}
1789 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1790 			/* no need to unlock on tcb its gone */
1791 			goto out_decr;
1792 		}
1793 		SCTP_STAT_INCR(sctps_timoshutdownack);
1794 		stcb->asoc.timoshutdownack++;
1795 #ifdef SCTP_AUDITING_ENABLED
1796 		sctp_auditing(4, inp, stcb, net);
1797 #endif
1798 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1799 		break;
1800 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1805 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1806 		    "Shutdown guard timer expired");
1807 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1808 		/* no need to unlock on tcb its gone */
1809 		goto out_decr;
1810 
1811 	case SCTP_TIMER_TYPE_STRRESET:
1812 		if ((stcb == NULL) || (inp == NULL)) {
1813 			break;
1814 		}
1815 		if (sctp_strreset_timer(inp, stcb, net)) {
1816 			/* no need to unlock on tcb its gone */
1817 			goto out_decr;
1818 		}
1819 		SCTP_STAT_INCR(sctps_timostrmrst);
1820 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1821 		break;
1822 	case SCTP_TIMER_TYPE_ASCONF:
1823 		if ((stcb == NULL) || (inp == NULL)) {
1824 			break;
1825 		}
1826 		if (sctp_asconf_timer(inp, stcb, net)) {
1827 			/* no need to unlock on tcb its gone */
1828 			goto out_decr;
1829 		}
1830 		SCTP_STAT_INCR(sctps_timoasconf);
1831 #ifdef SCTP_AUDITING_ENABLED
1832 		sctp_auditing(4, inp, stcb, net);
1833 #endif
1834 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1835 		break;
1836 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1837 		if ((stcb == NULL) || (inp == NULL)) {
1838 			break;
1839 		}
1840 		sctp_delete_prim_timer(inp, stcb, net);
1841 		SCTP_STAT_INCR(sctps_timodelprim);
1842 		break;
1843 
1844 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1845 		if ((stcb == NULL) || (inp == NULL)) {
1846 			break;
1847 		}
1848 		SCTP_STAT_INCR(sctps_timoautoclose);
1849 		sctp_autoclose_timer(inp, stcb, net);
1850 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1851 		did_output = 0;
1852 		break;
1853 	case SCTP_TIMER_TYPE_ASOCKILL:
1854 		if ((stcb == NULL) || (inp == NULL)) {
1855 			break;
1856 		}
1857 		SCTP_STAT_INCR(sctps_timoassockill);
1858 		/* Can we free it yet? */
1859 		SCTP_INP_DECR_REF(inp);
1860 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1861 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1862 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1863 		so = SCTP_INP_SO(inp);
1864 		atomic_add_int(&stcb->asoc.refcnt, 1);
1865 		SCTP_TCB_UNLOCK(stcb);
1866 		SCTP_SOCKET_LOCK(so, 1);
1867 		SCTP_TCB_LOCK(stcb);
1868 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1869 #endif
1870 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1871 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1872 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1873 		SCTP_SOCKET_UNLOCK(so, 1);
1874 #endif
1875 		/*
1876 		 * free asoc, always unlocks (or destroy's) so prevent
1877 		 * duplicate unlock or unlock of a free mtx :-0
1878 		 */
1879 		stcb = NULL;
1880 		goto out_no_decr;
1881 	case SCTP_TIMER_TYPE_INPKILL:
1882 		SCTP_STAT_INCR(sctps_timoinpkill);
1883 		if (inp == NULL) {
1884 			break;
1885 		}
1886 		/*
1887 		 * special case, take away our increment since WE are the
1888 		 * killer
1889 		 */
1890 		SCTP_INP_DECR_REF(inp);
1891 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1892 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1893 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1894 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1895 		inp = NULL;
1896 		goto out_no_decr;
1897 	default:
1898 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1899 		    type);
1900 		break;
1901 	}
1902 #ifdef SCTP_AUDITING_ENABLED
1903 	sctp_audit_log(0xF1, (uint8_t) type);
1904 	if (inp)
1905 		sctp_auditing(5, inp, stcb, net);
1906 #endif
1907 	if ((did_output) && stcb) {
1908 		/*
1909 		 * Now we need to clean up the control chunk chain if an
1910 		 * ECNE is on it. It must be marked as UNSENT again so next
1911 		 * call will continue to send it until such time that we get
1912 		 * a CWR, to remove it. It is, however, less likely that we
1913 		 * will find a ecn echo on the chain though.
1914 		 */
1915 		sctp_fix_ecn_echo(&stcb->asoc);
1916 	}
1917 get_out:
1918 	if (stcb) {
1919 		SCTP_TCB_UNLOCK(stcb);
1920 	}
1921 out_decr:
1922 	if (inp) {
1923 		SCTP_INP_DECR_REF(inp);
1924 	}
1925 out_no_decr:
1926 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1927 	CURVNET_RESTORE();
1928 }
1929 
1930 void
1931 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1932     struct sctp_nets *net)
1933 {
1934 	uint32_t to_ticks;
1935 	struct sctp_timer *tmr;
1936 
1937 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1938 		return;
1939 
1940 	tmr = NULL;
1941 	if (stcb) {
1942 		SCTP_TCB_LOCK_ASSERT(stcb);
1943 	}
1944 	switch (t_type) {
1945 	case SCTP_TIMER_TYPE_ZERO_COPY:
1946 		tmr = &inp->sctp_ep.zero_copy_timer;
1947 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1948 		break;
1949 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1950 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1951 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1952 		break;
1953 	case SCTP_TIMER_TYPE_ADDR_WQ:
1954 		/* Only 1 tick away :-) */
1955 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1956 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1957 		break;
1958 	case SCTP_TIMER_TYPE_SEND:
1959 		/* Here we use the RTO timer */
1960 		{
1961 			int rto_val;
1962 
1963 			if ((stcb == NULL) || (net == NULL)) {
1964 				return;
1965 			}
1966 			tmr = &net->rxt_timer;
1967 			if (net->RTO == 0) {
1968 				rto_val = stcb->asoc.initial_rto;
1969 			} else {
1970 				rto_val = net->RTO;
1971 			}
1972 			to_ticks = MSEC_TO_TICKS(rto_val);
1973 		}
1974 		break;
1975 	case SCTP_TIMER_TYPE_INIT:
1976 		/*
1977 		 * Here we use the INIT timer default usually about 1
1978 		 * minute.
1979 		 */
1980 		if ((stcb == NULL) || (net == NULL)) {
1981 			return;
1982 		}
1983 		tmr = &net->rxt_timer;
1984 		if (net->RTO == 0) {
1985 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1986 		} else {
1987 			to_ticks = MSEC_TO_TICKS(net->RTO);
1988 		}
1989 		break;
1990 	case SCTP_TIMER_TYPE_RECV:
1991 		/*
1992 		 * Here we use the Delayed-Ack timer value from the inp
1993 		 * ususually about 200ms.
1994 		 */
1995 		if (stcb == NULL) {
1996 			return;
1997 		}
1998 		tmr = &stcb->asoc.dack_timer;
1999 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2000 		break;
2001 	case SCTP_TIMER_TYPE_SHUTDOWN:
2002 		/* Here we use the RTO of the destination. */
2003 		if ((stcb == NULL) || (net == NULL)) {
2004 			return;
2005 		}
2006 		if (net->RTO == 0) {
2007 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2008 		} else {
2009 			to_ticks = MSEC_TO_TICKS(net->RTO);
2010 		}
2011 		tmr = &net->rxt_timer;
2012 		break;
2013 	case SCTP_TIMER_TYPE_HEARTBEAT:
2014 		/*
2015 		 * the net is used here so that we can add in the RTO. Even
2016 		 * though we use a different timer. We also add the HB timer
2017 		 * PLUS a random jitter.
2018 		 */
2019 		if ((stcb == NULL) || (net == NULL)) {
2020 			return;
2021 		} else {
2022 			uint32_t rndval;
2023 			uint32_t jitter;
2024 
2025 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2026 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2027 				return;
2028 			}
2029 			if (net->RTO == 0) {
2030 				to_ticks = stcb->asoc.initial_rto;
2031 			} else {
2032 				to_ticks = net->RTO;
2033 			}
2034 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2035 			jitter = rndval % to_ticks;
2036 			if (jitter >= (to_ticks >> 1)) {
2037 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2038 			} else {
2039 				to_ticks = to_ticks - jitter;
2040 			}
2041 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2042 			    !(net->dest_state & SCTP_ADDR_PF)) {
2043 				to_ticks += net->heart_beat_delay;
2044 			}
2045 			/*
2046 			 * Now we must convert the to_ticks that are now in
2047 			 * ms to ticks.
2048 			 */
2049 			to_ticks = MSEC_TO_TICKS(to_ticks);
2050 			tmr = &net->hb_timer;
2051 		}
2052 		break;
2053 	case SCTP_TIMER_TYPE_COOKIE:
2054 		/*
2055 		 * Here we can use the RTO timer from the network since one
2056 		 * RTT was compelete. If a retran happened then we will be
2057 		 * using the RTO initial value.
2058 		 */
2059 		if ((stcb == NULL) || (net == NULL)) {
2060 			return;
2061 		}
2062 		if (net->RTO == 0) {
2063 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2064 		} else {
2065 			to_ticks = MSEC_TO_TICKS(net->RTO);
2066 		}
2067 		tmr = &net->rxt_timer;
2068 		break;
2069 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2070 		/*
2071 		 * nothing needed but the endpoint here ususually about 60
2072 		 * minutes.
2073 		 */
2074 		tmr = &inp->sctp_ep.signature_change;
2075 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2076 		break;
2077 	case SCTP_TIMER_TYPE_ASOCKILL:
2078 		if (stcb == NULL) {
2079 			return;
2080 		}
2081 		tmr = &stcb->asoc.strreset_timer;
2082 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2083 		break;
2084 	case SCTP_TIMER_TYPE_INPKILL:
2085 		/*
2086 		 * The inp is setup to die. We re-use the signature_chage
2087 		 * timer since that has stopped and we are in the GONE
2088 		 * state.
2089 		 */
2090 		tmr = &inp->sctp_ep.signature_change;
2091 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2092 		break;
2093 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2094 		/*
2095 		 * Here we use the value found in the EP for PMTU ususually
2096 		 * about 10 minutes.
2097 		 */
2098 		if ((stcb == NULL) || (net == NULL)) {
2099 			return;
2100 		}
2101 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2102 			return;
2103 		}
2104 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2105 		tmr = &net->pmtu_timer;
2106 		break;
2107 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2108 		/* Here we use the RTO of the destination */
2109 		if ((stcb == NULL) || (net == NULL)) {
2110 			return;
2111 		}
2112 		if (net->RTO == 0) {
2113 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2114 		} else {
2115 			to_ticks = MSEC_TO_TICKS(net->RTO);
2116 		}
2117 		tmr = &net->rxt_timer;
2118 		break;
2119 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2120 		/*
2121 		 * Here we use the endpoints shutdown guard timer usually
2122 		 * about 3 minutes.
2123 		 */
2124 		if (stcb == NULL) {
2125 			return;
2126 		}
2127 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2128 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2129 		} else {
2130 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2131 		}
2132 		tmr = &stcb->asoc.shut_guard_timer;
2133 		break;
2134 	case SCTP_TIMER_TYPE_STRRESET:
2135 		/*
2136 		 * Here the timer comes from the stcb but its value is from
2137 		 * the net's RTO.
2138 		 */
2139 		if ((stcb == NULL) || (net == NULL)) {
2140 			return;
2141 		}
2142 		if (net->RTO == 0) {
2143 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2144 		} else {
2145 			to_ticks = MSEC_TO_TICKS(net->RTO);
2146 		}
2147 		tmr = &stcb->asoc.strreset_timer;
2148 		break;
2149 	case SCTP_TIMER_TYPE_ASCONF:
2150 		/*
2151 		 * Here the timer comes from the stcb but its value is from
2152 		 * the net's RTO.
2153 		 */
2154 		if ((stcb == NULL) || (net == NULL)) {
2155 			return;
2156 		}
2157 		if (net->RTO == 0) {
2158 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2159 		} else {
2160 			to_ticks = MSEC_TO_TICKS(net->RTO);
2161 		}
2162 		tmr = &stcb->asoc.asconf_timer;
2163 		break;
2164 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2165 		if ((stcb == NULL) || (net != NULL)) {
2166 			return;
2167 		}
2168 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2169 		tmr = &stcb->asoc.delete_prim_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2172 		if (stcb == NULL) {
2173 			return;
2174 		}
2175 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2176 			/*
2177 			 * Really an error since stcb is NOT set to
2178 			 * autoclose
2179 			 */
2180 			return;
2181 		}
2182 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2183 		tmr = &stcb->asoc.autoclose_timer;
2184 		break;
2185 	default:
2186 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2187 		    __func__, t_type);
2188 		return;
2189 		break;
2190 	}
2191 	if ((to_ticks <= 0) || (tmr == NULL)) {
2192 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2193 		    __func__, t_type, to_ticks, (void *)tmr);
2194 		return;
2195 	}
2196 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2197 		/*
2198 		 * we do NOT allow you to have it already running. if it is
2199 		 * we leave the current one up unchanged
2200 		 */
2201 		return;
2202 	}
2203 	/* At this point we can proceed */
2204 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2205 		stcb->asoc.num_send_timers_up++;
2206 	}
2207 	tmr->stopped_from = 0;
2208 	tmr->type = t_type;
2209 	tmr->ep = (void *)inp;
2210 	tmr->tcb = (void *)stcb;
2211 	tmr->net = (void *)net;
2212 	tmr->self = (void *)tmr;
2213 	tmr->vnet = (void *)curvnet;
2214 	tmr->ticks = sctp_get_tick_count();
2215 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2216 	return;
2217 }
2218 
2219 void
2220 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2221     struct sctp_nets *net, uint32_t from)
2222 {
2223 	struct sctp_timer *tmr;
2224 
2225 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2226 	    (inp == NULL))
2227 		return;
2228 
2229 	tmr = NULL;
2230 	if (stcb) {
2231 		SCTP_TCB_LOCK_ASSERT(stcb);
2232 	}
2233 	switch (t_type) {
2234 	case SCTP_TIMER_TYPE_ZERO_COPY:
2235 		tmr = &inp->sctp_ep.zero_copy_timer;
2236 		break;
2237 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2238 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2239 		break;
2240 	case SCTP_TIMER_TYPE_ADDR_WQ:
2241 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2242 		break;
2243 	case SCTP_TIMER_TYPE_SEND:
2244 		if ((stcb == NULL) || (net == NULL)) {
2245 			return;
2246 		}
2247 		tmr = &net->rxt_timer;
2248 		break;
2249 	case SCTP_TIMER_TYPE_INIT:
2250 		if ((stcb == NULL) || (net == NULL)) {
2251 			return;
2252 		}
2253 		tmr = &net->rxt_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_RECV:
2256 		if (stcb == NULL) {
2257 			return;
2258 		}
2259 		tmr = &stcb->asoc.dack_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_SHUTDOWN:
2262 		if ((stcb == NULL) || (net == NULL)) {
2263 			return;
2264 		}
2265 		tmr = &net->rxt_timer;
2266 		break;
2267 	case SCTP_TIMER_TYPE_HEARTBEAT:
2268 		if ((stcb == NULL) || (net == NULL)) {
2269 			return;
2270 		}
2271 		tmr = &net->hb_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_COOKIE:
2274 		if ((stcb == NULL) || (net == NULL)) {
2275 			return;
2276 		}
2277 		tmr = &net->rxt_timer;
2278 		break;
2279 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2280 		/* nothing needed but the endpoint here */
2281 		tmr = &inp->sctp_ep.signature_change;
2282 		/*
2283 		 * We re-use the newcookie timer for the INP kill timer. We
2284 		 * must assure that we do not kill it by accident.
2285 		 */
2286 		break;
2287 	case SCTP_TIMER_TYPE_ASOCKILL:
2288 		/*
2289 		 * Stop the asoc kill timer.
2290 		 */
2291 		if (stcb == NULL) {
2292 			return;
2293 		}
2294 		tmr = &stcb->asoc.strreset_timer;
2295 		break;
2296 
2297 	case SCTP_TIMER_TYPE_INPKILL:
2298 		/*
2299 		 * The inp is setup to die. We re-use the signature_chage
2300 		 * timer since that has stopped and we are in the GONE
2301 		 * state.
2302 		 */
2303 		tmr = &inp->sctp_ep.signature_change;
2304 		break;
2305 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2306 		if ((stcb == NULL) || (net == NULL)) {
2307 			return;
2308 		}
2309 		tmr = &net->pmtu_timer;
2310 		break;
2311 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2312 		if ((stcb == NULL) || (net == NULL)) {
2313 			return;
2314 		}
2315 		tmr = &net->rxt_timer;
2316 		break;
2317 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2318 		if (stcb == NULL) {
2319 			return;
2320 		}
2321 		tmr = &stcb->asoc.shut_guard_timer;
2322 		break;
2323 	case SCTP_TIMER_TYPE_STRRESET:
2324 		if (stcb == NULL) {
2325 			return;
2326 		}
2327 		tmr = &stcb->asoc.strreset_timer;
2328 		break;
2329 	case SCTP_TIMER_TYPE_ASCONF:
2330 		if (stcb == NULL) {
2331 			return;
2332 		}
2333 		tmr = &stcb->asoc.asconf_timer;
2334 		break;
2335 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2336 		if (stcb == NULL) {
2337 			return;
2338 		}
2339 		tmr = &stcb->asoc.delete_prim_timer;
2340 		break;
2341 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2342 		if (stcb == NULL) {
2343 			return;
2344 		}
2345 		tmr = &stcb->asoc.autoclose_timer;
2346 		break;
2347 	default:
2348 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2349 		    __func__, t_type);
2350 		break;
2351 	}
2352 	if (tmr == NULL) {
2353 		return;
2354 	}
2355 	if ((tmr->type != t_type) && tmr->type) {
2356 		/*
2357 		 * Ok we have a timer that is under joint use. Cookie timer
2358 		 * per chance with the SEND timer. We therefore are NOT
2359 		 * running the timer that the caller wants stopped.  So just
2360 		 * return.
2361 		 */
2362 		return;
2363 	}
2364 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2365 		stcb->asoc.num_send_timers_up--;
2366 		if (stcb->asoc.num_send_timers_up < 0) {
2367 			stcb->asoc.num_send_timers_up = 0;
2368 		}
2369 	}
2370 	tmr->self = NULL;
2371 	tmr->stopped_from = from;
2372 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2373 	return;
2374 }
2375 
2376 uint32_t
2377 sctp_calculate_len(struct mbuf *m)
2378 {
2379 	uint32_t tlen = 0;
2380 	struct mbuf *at;
2381 
2382 	at = m;
2383 	while (at) {
2384 		tlen += SCTP_BUF_LEN(at);
2385 		at = SCTP_BUF_NEXT(at);
2386 	}
2387 	return (tlen);
2388 }
2389 
2390 void
2391 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2392     struct sctp_association *asoc, uint32_t mtu)
2393 {
2394 	/*
2395 	 * Reset the P-MTU size on this association, this involves changing
2396 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2397 	 * allow the DF flag to be cleared.
2398 	 */
2399 	struct sctp_tmit_chunk *chk;
2400 	unsigned int eff_mtu, ovh;
2401 
2402 	asoc->smallest_mtu = mtu;
2403 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2404 		ovh = SCTP_MIN_OVERHEAD;
2405 	} else {
2406 		ovh = SCTP_MIN_V4_OVERHEAD;
2407 	}
2408 	eff_mtu = mtu - ovh;
2409 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2410 		if (chk->send_size > eff_mtu) {
2411 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2412 		}
2413 	}
2414 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2415 		if (chk->send_size > eff_mtu) {
2416 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2417 		}
2418 	}
2419 }
2420 
2421 
2422 /*
2423  * given an association and starting time of the current RTT period return
2424  * RTO in number of msecs net should point to the current network
2425  */
2426 
2427 uint32_t
2428 sctp_calculate_rto(struct sctp_tcb *stcb,
2429     struct sctp_association *asoc,
2430     struct sctp_nets *net,
2431     struct timeval *told,
2432     int safe, int rtt_from_sack)
2433 {
2434 	/*-
2435 	 * given an association and the starting time of the current RTT
2436 	 * period (in value1/value2) return RTO in number of msecs.
2437 	 */
2438 	int32_t rtt;		/* RTT in ms */
2439 	uint32_t new_rto;
2440 	int first_measure = 0;
2441 	struct timeval now, then, *old;
2442 
2443 	/* Copy it out for sparc64 */
2444 	if (safe == sctp_align_unsafe_makecopy) {
2445 		old = &then;
2446 		memcpy(&then, told, sizeof(struct timeval));
2447 	} else if (safe == sctp_align_safe_nocopy) {
2448 		old = told;
2449 	} else {
2450 		/* error */
2451 		SCTP_PRINTF("Huh, bad rto calc call\n");
2452 		return (0);
2453 	}
2454 	/************************/
2455 	/* 1. calculate new RTT */
2456 	/************************/
2457 	/* get the current time */
2458 	if (stcb->asoc.use_precise_time) {
2459 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2460 	} else {
2461 		(void)SCTP_GETTIME_TIMEVAL(&now);
2462 	}
2463 	timevalsub(&now, old);
2464 	/* store the current RTT in us */
2465 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2466 	        (uint64_t) now.tv_usec;
2467 
2468 	/* compute rtt in ms */
2469 	rtt = (int32_t) (net->rtt / 1000);
2470 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2471 		/*
2472 		 * Tell the CC module that a new update has just occurred
2473 		 * from a sack
2474 		 */
2475 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2476 	}
2477 	/*
2478 	 * Do we need to determine the lan? We do this only on sacks i.e.
2479 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2480 	 */
2481 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2482 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2483 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2484 			net->lan_type = SCTP_LAN_INTERNET;
2485 		} else {
2486 			net->lan_type = SCTP_LAN_LOCAL;
2487 		}
2488 	}
2489 	/***************************/
2490 	/* 2. update RTTVAR & SRTT */
2491 	/***************************/
2492 	/*-
2493 	 * Compute the scaled average lastsa and the
2494 	 * scaled variance lastsv as described in van Jacobson
2495 	 * Paper "Congestion Avoidance and Control", Annex A.
2496 	 *
2497 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2498 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2499 	 */
2500 	if (net->RTO_measured) {
2501 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2502 		net->lastsa += rtt;
2503 		if (rtt < 0) {
2504 			rtt = -rtt;
2505 		}
2506 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2507 		net->lastsv += rtt;
2508 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2509 			rto_logging(net, SCTP_LOG_RTTVAR);
2510 		}
2511 	} else {
2512 		/* First RTO measurment */
2513 		net->RTO_measured = 1;
2514 		first_measure = 1;
2515 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2516 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2517 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2518 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2519 		}
2520 	}
2521 	if (net->lastsv == 0) {
2522 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2523 	}
2524 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2525 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2526 	    (stcb->asoc.sat_network_lockout == 0)) {
2527 		stcb->asoc.sat_network = 1;
2528 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2529 		stcb->asoc.sat_network = 0;
2530 		stcb->asoc.sat_network_lockout = 1;
2531 	}
2532 	/* bound it, per C6/C7 in Section 5.3.1 */
2533 	if (new_rto < stcb->asoc.minrto) {
2534 		new_rto = stcb->asoc.minrto;
2535 	}
2536 	if (new_rto > stcb->asoc.maxrto) {
2537 		new_rto = stcb->asoc.maxrto;
2538 	}
2539 	/* we are now returning the RTO */
2540 	return (new_rto);
2541 }
2542 
2543 /*
2544  * return a pointer to a contiguous piece of data from the given mbuf chain
2545  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2546  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2547  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2548  */
2549 caddr_t
2550 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2551 {
2552 	uint32_t count;
2553 	uint8_t *ptr;
2554 
2555 	ptr = in_ptr;
2556 	if ((off < 0) || (len <= 0))
2557 		return (NULL);
2558 
2559 	/* find the desired start location */
2560 	while ((m != NULL) && (off > 0)) {
2561 		if (off < SCTP_BUF_LEN(m))
2562 			break;
2563 		off -= SCTP_BUF_LEN(m);
2564 		m = SCTP_BUF_NEXT(m);
2565 	}
2566 	if (m == NULL)
2567 		return (NULL);
2568 
2569 	/* is the current mbuf large enough (eg. contiguous)? */
2570 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2571 		return (mtod(m, caddr_t)+off);
2572 	} else {
2573 		/* else, it spans more than one mbuf, so save a temp copy... */
2574 		while ((m != NULL) && (len > 0)) {
2575 			count = min(SCTP_BUF_LEN(m) - off, len);
2576 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2577 			len -= count;
2578 			ptr += count;
2579 			off = 0;
2580 			m = SCTP_BUF_NEXT(m);
2581 		}
2582 		if ((m == NULL) && (len > 0))
2583 			return (NULL);
2584 		else
2585 			return ((caddr_t)in_ptr);
2586 	}
2587 }
2588 
2589 
2590 
2591 struct sctp_paramhdr *
2592 sctp_get_next_param(struct mbuf *m,
2593     int offset,
2594     struct sctp_paramhdr *pull,
2595     int pull_limit)
2596 {
2597 	/* This just provides a typed signature to Peter's Pull routine */
2598 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2599 	    (uint8_t *) pull));
2600 }
2601 
2602 
2603 struct mbuf *
2604 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2605 {
2606 	struct mbuf *m_last;
2607 	caddr_t dp;
2608 
2609 	if (padlen > 3) {
2610 		return (NULL);
2611 	}
2612 	if (padlen <= M_TRAILINGSPACE(m)) {
2613 		/*
2614 		 * The easy way. We hope the majority of the time we hit
2615 		 * here :)
2616 		 */
2617 		m_last = m;
2618 	} else {
2619 		/* Hard way we must grow the mbuf chain */
2620 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2621 		if (m_last == NULL) {
2622 			return (NULL);
2623 		}
2624 		SCTP_BUF_LEN(m_last) = 0;
2625 		SCTP_BUF_NEXT(m_last) = NULL;
2626 		SCTP_BUF_NEXT(m) = m_last;
2627 	}
2628 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2629 	SCTP_BUF_LEN(m_last) += padlen;
2630 	memset(dp, 0, padlen);
2631 	return (m_last);
2632 }
2633 
2634 struct mbuf *
2635 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2636 {
2637 	/* find the last mbuf in chain and pad it */
2638 	struct mbuf *m_at;
2639 
2640 	if (last_mbuf != NULL) {
2641 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2642 	} else {
2643 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2644 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2645 				return (sctp_add_pad_tombuf(m_at, padval));
2646 			}
2647 		}
2648 	}
2649 	return (NULL);
2650 }
2651 
2652 static void
2653 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2654     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2655 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2656     SCTP_UNUSED
2657 #endif
2658 )
2659 {
2660 	struct mbuf *m_notify;
2661 	struct sctp_assoc_change *sac;
2662 	struct sctp_queued_to_read *control;
2663 	size_t notif_len, abort_len;
2664 	unsigned int i;
2665 
2666 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2667 	struct socket *so;
2668 
2669 #endif
2670 
2671 	if (stcb == NULL) {
2672 		return;
2673 	}
2674 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2675 		notif_len = sizeof(struct sctp_assoc_change);
2676 		if (abort != NULL) {
2677 			abort_len = ntohs(abort->ch.chunk_length);
2678 		} else {
2679 			abort_len = 0;
2680 		}
2681 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2682 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2683 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2684 			notif_len += abort_len;
2685 		}
2686 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2687 		if (m_notify == NULL) {
2688 			/* Retry with smaller value. */
2689 			notif_len = sizeof(struct sctp_assoc_change);
2690 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2691 			if (m_notify == NULL) {
2692 				goto set_error;
2693 			}
2694 		}
2695 		SCTP_BUF_NEXT(m_notify) = NULL;
2696 		sac = mtod(m_notify, struct sctp_assoc_change *);
2697 		memset(sac, 0, notif_len);
2698 		sac->sac_type = SCTP_ASSOC_CHANGE;
2699 		sac->sac_flags = 0;
2700 		sac->sac_length = sizeof(struct sctp_assoc_change);
2701 		sac->sac_state = state;
2702 		sac->sac_error = error;
2703 		/* XXX verify these stream counts */
2704 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2705 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2706 		sac->sac_assoc_id = sctp_get_associd(stcb);
2707 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2708 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2709 				i = 0;
2710 				if (stcb->asoc.prsctp_supported == 1) {
2711 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2712 				}
2713 				if (stcb->asoc.auth_supported == 1) {
2714 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2715 				}
2716 				if (stcb->asoc.asconf_supported == 1) {
2717 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2718 				}
2719 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2720 				if (stcb->asoc.reconfig_supported == 1) {
2721 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2722 				}
2723 				sac->sac_length += i;
2724 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2725 				memcpy(sac->sac_info, abort, abort_len);
2726 				sac->sac_length += abort_len;
2727 			}
2728 		}
2729 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2730 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2731 		    0, 0, stcb->asoc.context, 0, 0, 0,
2732 		    m_notify);
2733 		if (control != NULL) {
2734 			control->length = SCTP_BUF_LEN(m_notify);
2735 			/* not that we need this */
2736 			control->tail_mbuf = m_notify;
2737 			control->spec_flags = M_NOTIFICATION;
2738 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2739 			    control,
2740 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2741 			    so_locked);
2742 		} else {
2743 			sctp_m_freem(m_notify);
2744 		}
2745 	}
2746 	/*
2747 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2748 	 * comes in.
2749 	 */
2750 set_error:
2751 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2752 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2753 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2754 		SOCK_LOCK(stcb->sctp_socket);
2755 		if (from_peer) {
2756 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2757 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2758 				stcb->sctp_socket->so_error = ECONNREFUSED;
2759 			} else {
2760 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2761 				stcb->sctp_socket->so_error = ECONNRESET;
2762 			}
2763 		} else {
2764 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2765 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2766 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2767 				stcb->sctp_socket->so_error = ETIMEDOUT;
2768 			} else {
2769 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2770 				stcb->sctp_socket->so_error = ECONNABORTED;
2771 			}
2772 		}
2773 	}
2774 	/* Wake ANY sleepers */
2775 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2776 	so = SCTP_INP_SO(stcb->sctp_ep);
2777 	if (!so_locked) {
2778 		atomic_add_int(&stcb->asoc.refcnt, 1);
2779 		SCTP_TCB_UNLOCK(stcb);
2780 		SCTP_SOCKET_LOCK(so, 1);
2781 		SCTP_TCB_LOCK(stcb);
2782 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2783 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2784 			SCTP_SOCKET_UNLOCK(so, 1);
2785 			return;
2786 		}
2787 	}
2788 #endif
2789 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2790 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2791 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2792 		socantrcvmore_locked(stcb->sctp_socket);
2793 	}
2794 	sorwakeup(stcb->sctp_socket);
2795 	sowwakeup(stcb->sctp_socket);
2796 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2797 	if (!so_locked) {
2798 		SCTP_SOCKET_UNLOCK(so, 1);
2799 	}
2800 #endif
2801 }
2802 
2803 static void
2804 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2805     struct sockaddr *sa, uint32_t error, int so_locked
2806 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2807     SCTP_UNUSED
2808 #endif
2809 )
2810 {
2811 	struct mbuf *m_notify;
2812 	struct sctp_paddr_change *spc;
2813 	struct sctp_queued_to_read *control;
2814 
2815 	if ((stcb == NULL) ||
2816 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2817 		/* event not enabled */
2818 		return;
2819 	}
2820 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2821 	if (m_notify == NULL)
2822 		return;
2823 	SCTP_BUF_LEN(m_notify) = 0;
2824 	spc = mtod(m_notify, struct sctp_paddr_change *);
2825 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2826 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2827 	spc->spc_flags = 0;
2828 	spc->spc_length = sizeof(struct sctp_paddr_change);
2829 	switch (sa->sa_family) {
2830 #ifdef INET
2831 	case AF_INET:
2832 #ifdef INET6
2833 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2834 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2835 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2836 		} else {
2837 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2838 		}
2839 #else
2840 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2841 #endif
2842 		break;
2843 #endif
2844 #ifdef INET6
2845 	case AF_INET6:
2846 		{
2847 			struct sockaddr_in6 *sin6;
2848 
2849 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2850 
2851 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2852 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2853 				if (sin6->sin6_scope_id == 0) {
2854 					/* recover scope_id for user */
2855 					(void)sa6_recoverscope(sin6);
2856 				} else {
2857 					/* clear embedded scope_id for user */
2858 					in6_clearscope(&sin6->sin6_addr);
2859 				}
2860 			}
2861 			break;
2862 		}
2863 #endif
2864 	default:
2865 		/* TSNH */
2866 		break;
2867 	}
2868 	spc->spc_state = state;
2869 	spc->spc_error = error;
2870 	spc->spc_assoc_id = sctp_get_associd(stcb);
2871 
2872 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2873 	SCTP_BUF_NEXT(m_notify) = NULL;
2874 
2875 	/* append to socket */
2876 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2877 	    0, 0, stcb->asoc.context, 0, 0, 0,
2878 	    m_notify);
2879 	if (control == NULL) {
2880 		/* no memory */
2881 		sctp_m_freem(m_notify);
2882 		return;
2883 	}
2884 	control->length = SCTP_BUF_LEN(m_notify);
2885 	control->spec_flags = M_NOTIFICATION;
2886 	/* not that we need this */
2887 	control->tail_mbuf = m_notify;
2888 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2889 	    control,
2890 	    &stcb->sctp_socket->so_rcv, 1,
2891 	    SCTP_READ_LOCK_NOT_HELD,
2892 	    so_locked);
2893 }
2894 
2895 
2896 static void
2897 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2898     struct sctp_tmit_chunk *chk, int so_locked
2899 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2900     SCTP_UNUSED
2901 #endif
2902 )
2903 {
2904 	struct mbuf *m_notify;
2905 	struct sctp_send_failed *ssf;
2906 	struct sctp_send_failed_event *ssfe;
2907 	struct sctp_queued_to_read *control;
2908 	int length;
2909 
2910 	if ((stcb == NULL) ||
2911 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2912 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2913 		/* event not enabled */
2914 		return;
2915 	}
2916 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2917 		length = sizeof(struct sctp_send_failed_event);
2918 	} else {
2919 		length = sizeof(struct sctp_send_failed);
2920 	}
2921 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2922 	if (m_notify == NULL)
2923 		/* no space left */
2924 		return;
2925 	SCTP_BUF_LEN(m_notify) = 0;
2926 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2927 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2928 		memset(ssfe, 0, length);
2929 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2930 		if (sent) {
2931 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2932 		} else {
2933 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2934 		}
2935 		length += chk->send_size;
2936 		length -= sizeof(struct sctp_data_chunk);
2937 		ssfe->ssfe_length = length;
2938 		ssfe->ssfe_error = error;
2939 		/* not exactly what the user sent in, but should be close :) */
2940 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2941 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2942 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2943 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2944 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2945 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2946 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2947 	} else {
2948 		ssf = mtod(m_notify, struct sctp_send_failed *);
2949 		memset(ssf, 0, length);
2950 		ssf->ssf_type = SCTP_SEND_FAILED;
2951 		if (sent) {
2952 			ssf->ssf_flags = SCTP_DATA_SENT;
2953 		} else {
2954 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2955 		}
2956 		length += chk->send_size;
2957 		length -= sizeof(struct sctp_data_chunk);
2958 		ssf->ssf_length = length;
2959 		ssf->ssf_error = error;
2960 		/* not exactly what the user sent in, but should be close :) */
2961 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2962 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2963 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2964 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2965 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2966 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2967 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2968 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2969 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2970 	}
2971 	if (chk->data) {
2972 		/*
2973 		 * trim off the sctp chunk header(it should be there)
2974 		 */
2975 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2976 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2977 			sctp_mbuf_crush(chk->data);
2978 			chk->send_size -= sizeof(struct sctp_data_chunk);
2979 		}
2980 	}
2981 	SCTP_BUF_NEXT(m_notify) = chk->data;
2982 	/* Steal off the mbuf */
2983 	chk->data = NULL;
2984 	/*
2985 	 * For this case, we check the actual socket buffer, since the assoc
2986 	 * is going away we don't want to overfill the socket buffer for a
2987 	 * non-reader
2988 	 */
2989 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2990 		sctp_m_freem(m_notify);
2991 		return;
2992 	}
2993 	/* append to socket */
2994 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2995 	    0, 0, stcb->asoc.context, 0, 0, 0,
2996 	    m_notify);
2997 	if (control == NULL) {
2998 		/* no memory */
2999 		sctp_m_freem(m_notify);
3000 		return;
3001 	}
3002 	control->spec_flags = M_NOTIFICATION;
3003 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3004 	    control,
3005 	    &stcb->sctp_socket->so_rcv, 1,
3006 	    SCTP_READ_LOCK_NOT_HELD,
3007 	    so_locked);
3008 }
3009 
3010 
3011 static void
3012 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3013     struct sctp_stream_queue_pending *sp, int so_locked
3014 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3015     SCTP_UNUSED
3016 #endif
3017 )
3018 {
3019 	struct mbuf *m_notify;
3020 	struct sctp_send_failed *ssf;
3021 	struct sctp_send_failed_event *ssfe;
3022 	struct sctp_queued_to_read *control;
3023 	int length;
3024 
3025 	if ((stcb == NULL) ||
3026 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3027 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3028 		/* event not enabled */
3029 		return;
3030 	}
3031 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3032 		length = sizeof(struct sctp_send_failed_event);
3033 	} else {
3034 		length = sizeof(struct sctp_send_failed);
3035 	}
3036 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3037 	if (m_notify == NULL) {
3038 		/* no space left */
3039 		return;
3040 	}
3041 	SCTP_BUF_LEN(m_notify) = 0;
3042 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3043 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3044 		memset(ssfe, 0, length);
3045 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3046 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3047 		length += sp->length;
3048 		ssfe->ssfe_length = length;
3049 		ssfe->ssfe_error = error;
3050 		/* not exactly what the user sent in, but should be close :) */
3051 		ssfe->ssfe_info.snd_sid = sp->stream;
3052 		if (sp->some_taken) {
3053 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3054 		} else {
3055 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3056 		}
3057 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3058 		ssfe->ssfe_info.snd_context = sp->context;
3059 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3060 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3061 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3062 	} else {
3063 		ssf = mtod(m_notify, struct sctp_send_failed *);
3064 		memset(ssf, 0, length);
3065 		ssf->ssf_type = SCTP_SEND_FAILED;
3066 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3067 		length += sp->length;
3068 		ssf->ssf_length = length;
3069 		ssf->ssf_error = error;
3070 		/* not exactly what the user sent in, but should be close :) */
3071 		ssf->ssf_info.sinfo_stream = sp->stream;
3072 		ssf->ssf_info.sinfo_ssn = 0;
3073 		if (sp->some_taken) {
3074 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3075 		} else {
3076 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3077 		}
3078 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3079 		ssf->ssf_info.sinfo_context = sp->context;
3080 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3081 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3082 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3083 	}
3084 	SCTP_BUF_NEXT(m_notify) = sp->data;
3085 
3086 	/* Steal off the mbuf */
3087 	sp->data = NULL;
3088 	/*
3089 	 * For this case, we check the actual socket buffer, since the assoc
3090 	 * is going away we don't want to overfill the socket buffer for a
3091 	 * non-reader
3092 	 */
3093 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3094 		sctp_m_freem(m_notify);
3095 		return;
3096 	}
3097 	/* append to socket */
3098 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3099 	    0, 0, stcb->asoc.context, 0, 0, 0,
3100 	    m_notify);
3101 	if (control == NULL) {
3102 		/* no memory */
3103 		sctp_m_freem(m_notify);
3104 		return;
3105 	}
3106 	control->spec_flags = M_NOTIFICATION;
3107 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3108 	    control,
3109 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3110 }
3111 
3112 
3113 
3114 static void
3115 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3116 {
3117 	struct mbuf *m_notify;
3118 	struct sctp_adaptation_event *sai;
3119 	struct sctp_queued_to_read *control;
3120 
3121 	if ((stcb == NULL) ||
3122 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3123 		/* event not enabled */
3124 		return;
3125 	}
3126 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3127 	if (m_notify == NULL)
3128 		/* no space left */
3129 		return;
3130 	SCTP_BUF_LEN(m_notify) = 0;
3131 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3132 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3133 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3134 	sai->sai_flags = 0;
3135 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3136 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3137 	sai->sai_assoc_id = sctp_get_associd(stcb);
3138 
3139 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3140 	SCTP_BUF_NEXT(m_notify) = NULL;
3141 
3142 	/* append to socket */
3143 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3144 	    0, 0, stcb->asoc.context, 0, 0, 0,
3145 	    m_notify);
3146 	if (control == NULL) {
3147 		/* no memory */
3148 		sctp_m_freem(m_notify);
3149 		return;
3150 	}
3151 	control->length = SCTP_BUF_LEN(m_notify);
3152 	control->spec_flags = M_NOTIFICATION;
3153 	/* not that we need this */
3154 	control->tail_mbuf = m_notify;
3155 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3156 	    control,
3157 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3158 }
3159 
3160 /* This always must be called with the read-queue LOCKED in the INP */
3161 static void
3162 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3163     uint32_t val, int so_locked
3164 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3165     SCTP_UNUSED
3166 #endif
3167 )
3168 {
3169 	struct mbuf *m_notify;
3170 	struct sctp_pdapi_event *pdapi;
3171 	struct sctp_queued_to_read *control;
3172 	struct sockbuf *sb;
3173 
3174 	if ((stcb == NULL) ||
3175 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3176 		/* event not enabled */
3177 		return;
3178 	}
3179 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3180 		return;
3181 	}
3182 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3183 	if (m_notify == NULL)
3184 		/* no space left */
3185 		return;
3186 	SCTP_BUF_LEN(m_notify) = 0;
3187 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3188 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3189 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3190 	pdapi->pdapi_flags = 0;
3191 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3192 	pdapi->pdapi_indication = error;
3193 	pdapi->pdapi_stream = (val >> 16);
3194 	pdapi->pdapi_seq = (val & 0x0000ffff);
3195 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3196 
3197 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3198 	SCTP_BUF_NEXT(m_notify) = NULL;
3199 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3200 	    0, 0, stcb->asoc.context, 0, 0, 0,
3201 	    m_notify);
3202 	if (control == NULL) {
3203 		/* no memory */
3204 		sctp_m_freem(m_notify);
3205 		return;
3206 	}
3207 	control->spec_flags = M_NOTIFICATION;
3208 	control->length = SCTP_BUF_LEN(m_notify);
3209 	/* not that we need this */
3210 	control->tail_mbuf = m_notify;
3211 	control->held_length = 0;
3212 	control->length = 0;
3213 	sb = &stcb->sctp_socket->so_rcv;
3214 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3215 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3216 	}
3217 	sctp_sballoc(stcb, sb, m_notify);
3218 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3219 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3220 	}
3221 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3222 	control->end_added = 1;
3223 	if (stcb->asoc.control_pdapi)
3224 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3225 	else {
3226 		/* we really should not see this case */
3227 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3228 	}
3229 	if (stcb->sctp_ep && stcb->sctp_socket) {
3230 		/* This should always be the case */
3231 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3232 		struct socket *so;
3233 
3234 		so = SCTP_INP_SO(stcb->sctp_ep);
3235 		if (!so_locked) {
3236 			atomic_add_int(&stcb->asoc.refcnt, 1);
3237 			SCTP_TCB_UNLOCK(stcb);
3238 			SCTP_SOCKET_LOCK(so, 1);
3239 			SCTP_TCB_LOCK(stcb);
3240 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3241 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3242 				SCTP_SOCKET_UNLOCK(so, 1);
3243 				return;
3244 			}
3245 		}
3246 #endif
3247 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3248 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3249 		if (!so_locked) {
3250 			SCTP_SOCKET_UNLOCK(so, 1);
3251 		}
3252 #endif
3253 	}
3254 }
3255 
3256 static void
3257 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3258 {
3259 	struct mbuf *m_notify;
3260 	struct sctp_shutdown_event *sse;
3261 	struct sctp_queued_to_read *control;
3262 
3263 	/*
3264 	 * For TCP model AND UDP connected sockets we will send an error up
3265 	 * when an SHUTDOWN completes
3266 	 */
3267 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3268 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3269 		/* mark socket closed for read/write and wakeup! */
3270 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3271 		struct socket *so;
3272 
3273 		so = SCTP_INP_SO(stcb->sctp_ep);
3274 		atomic_add_int(&stcb->asoc.refcnt, 1);
3275 		SCTP_TCB_UNLOCK(stcb);
3276 		SCTP_SOCKET_LOCK(so, 1);
3277 		SCTP_TCB_LOCK(stcb);
3278 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3279 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3280 			SCTP_SOCKET_UNLOCK(so, 1);
3281 			return;
3282 		}
3283 #endif
3284 		socantsendmore(stcb->sctp_socket);
3285 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3286 		SCTP_SOCKET_UNLOCK(so, 1);
3287 #endif
3288 	}
3289 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3290 		/* event not enabled */
3291 		return;
3292 	}
3293 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3294 	if (m_notify == NULL)
3295 		/* no space left */
3296 		return;
3297 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3298 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3299 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3300 	sse->sse_flags = 0;
3301 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3302 	sse->sse_assoc_id = sctp_get_associd(stcb);
3303 
3304 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3305 	SCTP_BUF_NEXT(m_notify) = NULL;
3306 
3307 	/* append to socket */
3308 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3309 	    0, 0, stcb->asoc.context, 0, 0, 0,
3310 	    m_notify);
3311 	if (control == NULL) {
3312 		/* no memory */
3313 		sctp_m_freem(m_notify);
3314 		return;
3315 	}
3316 	control->spec_flags = M_NOTIFICATION;
3317 	control->length = SCTP_BUF_LEN(m_notify);
3318 	/* not that we need this */
3319 	control->tail_mbuf = m_notify;
3320 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3321 	    control,
3322 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3323 }
3324 
3325 static void
3326 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3327     int so_locked
3328 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3329     SCTP_UNUSED
3330 #endif
3331 )
3332 {
3333 	struct mbuf *m_notify;
3334 	struct sctp_sender_dry_event *event;
3335 	struct sctp_queued_to_read *control;
3336 
3337 	if ((stcb == NULL) ||
3338 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3339 		/* event not enabled */
3340 		return;
3341 	}
3342 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3343 	if (m_notify == NULL) {
3344 		/* no space left */
3345 		return;
3346 	}
3347 	SCTP_BUF_LEN(m_notify) = 0;
3348 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3349 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3350 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3351 	event->sender_dry_flags = 0;
3352 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3353 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3354 
3355 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3356 	SCTP_BUF_NEXT(m_notify) = NULL;
3357 
3358 	/* append to socket */
3359 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3360 	    0, 0, stcb->asoc.context, 0, 0, 0,
3361 	    m_notify);
3362 	if (control == NULL) {
3363 		/* no memory */
3364 		sctp_m_freem(m_notify);
3365 		return;
3366 	}
3367 	control->length = SCTP_BUF_LEN(m_notify);
3368 	control->spec_flags = M_NOTIFICATION;
3369 	/* not that we need this */
3370 	control->tail_mbuf = m_notify;
3371 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3372 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3373 }
3374 
3375 
3376 void
3377 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3378 {
3379 	struct mbuf *m_notify;
3380 	struct sctp_queued_to_read *control;
3381 	struct sctp_stream_change_event *stradd;
3382 
3383 	if ((stcb == NULL) ||
3384 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3385 		/* event not enabled */
3386 		return;
3387 	}
3388 	if ((stcb->asoc.peer_req_out) && flag) {
3389 		/* Peer made the request, don't tell the local user */
3390 		stcb->asoc.peer_req_out = 0;
3391 		return;
3392 	}
3393 	stcb->asoc.peer_req_out = 0;
3394 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3395 	if (m_notify == NULL)
3396 		/* no space left */
3397 		return;
3398 	SCTP_BUF_LEN(m_notify) = 0;
3399 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3400 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3401 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3402 	stradd->strchange_flags = flag;
3403 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3404 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3405 	stradd->strchange_instrms = numberin;
3406 	stradd->strchange_outstrms = numberout;
3407 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3408 	SCTP_BUF_NEXT(m_notify) = NULL;
3409 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3410 		/* no space */
3411 		sctp_m_freem(m_notify);
3412 		return;
3413 	}
3414 	/* append to socket */
3415 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3416 	    0, 0, stcb->asoc.context, 0, 0, 0,
3417 	    m_notify);
3418 	if (control == NULL) {
3419 		/* no memory */
3420 		sctp_m_freem(m_notify);
3421 		return;
3422 	}
3423 	control->spec_flags = M_NOTIFICATION;
3424 	control->length = SCTP_BUF_LEN(m_notify);
3425 	/* not that we need this */
3426 	control->tail_mbuf = m_notify;
3427 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3428 	    control,
3429 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3430 }
3431 
3432 void
3433 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3434 {
3435 	struct mbuf *m_notify;
3436 	struct sctp_queued_to_read *control;
3437 	struct sctp_assoc_reset_event *strasoc;
3438 
3439 	if ((stcb == NULL) ||
3440 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3441 		/* event not enabled */
3442 		return;
3443 	}
3444 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3445 	if (m_notify == NULL)
3446 		/* no space left */
3447 		return;
3448 	SCTP_BUF_LEN(m_notify) = 0;
3449 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3450 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3451 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3452 	strasoc->assocreset_flags = flag;
3453 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3454 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3455 	strasoc->assocreset_local_tsn = sending_tsn;
3456 	strasoc->assocreset_remote_tsn = recv_tsn;
3457 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3458 	SCTP_BUF_NEXT(m_notify) = NULL;
3459 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3460 		/* no space */
3461 		sctp_m_freem(m_notify);
3462 		return;
3463 	}
3464 	/* append to socket */
3465 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3466 	    0, 0, stcb->asoc.context, 0, 0, 0,
3467 	    m_notify);
3468 	if (control == NULL) {
3469 		/* no memory */
3470 		sctp_m_freem(m_notify);
3471 		return;
3472 	}
3473 	control->spec_flags = M_NOTIFICATION;
3474 	control->length = SCTP_BUF_LEN(m_notify);
3475 	/* not that we need this */
3476 	control->tail_mbuf = m_notify;
3477 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3478 	    control,
3479 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3480 }
3481 
3482 
3483 
3484 static void
3485 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3486     int number_entries, uint16_t * list, int flag)
3487 {
3488 	struct mbuf *m_notify;
3489 	struct sctp_queued_to_read *control;
3490 	struct sctp_stream_reset_event *strreset;
3491 	int len;
3492 
3493 	if ((stcb == NULL) ||
3494 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3495 		/* event not enabled */
3496 		return;
3497 	}
3498 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3499 	if (m_notify == NULL)
3500 		/* no space left */
3501 		return;
3502 	SCTP_BUF_LEN(m_notify) = 0;
3503 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3504 	if (len > M_TRAILINGSPACE(m_notify)) {
3505 		/* never enough room */
3506 		sctp_m_freem(m_notify);
3507 		return;
3508 	}
3509 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3510 	memset(strreset, 0, len);
3511 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3512 	strreset->strreset_flags = flag;
3513 	strreset->strreset_length = len;
3514 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3515 	if (number_entries) {
3516 		int i;
3517 
3518 		for (i = 0; i < number_entries; i++) {
3519 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3520 		}
3521 	}
3522 	SCTP_BUF_LEN(m_notify) = len;
3523 	SCTP_BUF_NEXT(m_notify) = NULL;
3524 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3525 		/* no space */
3526 		sctp_m_freem(m_notify);
3527 		return;
3528 	}
3529 	/* append to socket */
3530 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3531 	    0, 0, stcb->asoc.context, 0, 0, 0,
3532 	    m_notify);
3533 	if (control == NULL) {
3534 		/* no memory */
3535 		sctp_m_freem(m_notify);
3536 		return;
3537 	}
3538 	control->spec_flags = M_NOTIFICATION;
3539 	control->length = SCTP_BUF_LEN(m_notify);
3540 	/* not that we need this */
3541 	control->tail_mbuf = m_notify;
3542 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3543 	    control,
3544 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3545 }
3546 
3547 
3548 static void
3549 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3550 {
3551 	struct mbuf *m_notify;
3552 	struct sctp_remote_error *sre;
3553 	struct sctp_queued_to_read *control;
3554 	size_t notif_len, chunk_len;
3555 
3556 	if ((stcb == NULL) ||
3557 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3558 		return;
3559 	}
3560 	if (chunk != NULL) {
3561 		chunk_len = ntohs(chunk->ch.chunk_length);
3562 	} else {
3563 		chunk_len = 0;
3564 	}
3565 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3566 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3567 	if (m_notify == NULL) {
3568 		/* Retry with smaller value. */
3569 		notif_len = sizeof(struct sctp_remote_error);
3570 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3571 		if (m_notify == NULL) {
3572 			return;
3573 		}
3574 	}
3575 	SCTP_BUF_NEXT(m_notify) = NULL;
3576 	sre = mtod(m_notify, struct sctp_remote_error *);
3577 	memset(sre, 0, notif_len);
3578 	sre->sre_type = SCTP_REMOTE_ERROR;
3579 	sre->sre_flags = 0;
3580 	sre->sre_length = sizeof(struct sctp_remote_error);
3581 	sre->sre_error = error;
3582 	sre->sre_assoc_id = sctp_get_associd(stcb);
3583 	if (notif_len > sizeof(struct sctp_remote_error)) {
3584 		memcpy(sre->sre_data, chunk, chunk_len);
3585 		sre->sre_length += chunk_len;
3586 	}
3587 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3588 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3589 	    0, 0, stcb->asoc.context, 0, 0, 0,
3590 	    m_notify);
3591 	if (control != NULL) {
3592 		control->length = SCTP_BUF_LEN(m_notify);
3593 		/* not that we need this */
3594 		control->tail_mbuf = m_notify;
3595 		control->spec_flags = M_NOTIFICATION;
3596 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3597 		    control,
3598 		    &stcb->sctp_socket->so_rcv, 1,
3599 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3600 	} else {
3601 		sctp_m_freem(m_notify);
3602 	}
3603 }
3604 
3605 
3606 void
3607 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3608     uint32_t error, void *data, int so_locked
3609 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3610     SCTP_UNUSED
3611 #endif
3612 )
3613 {
3614 	if ((stcb == NULL) ||
3615 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3616 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3617 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3618 		/* If the socket is gone we are out of here */
3619 		return;
3620 	}
3621 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3622 		return;
3623 	}
3624 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3625 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3626 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3627 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3628 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3629 			/* Don't report these in front states */
3630 			return;
3631 		}
3632 	}
3633 	switch (notification) {
3634 	case SCTP_NOTIFY_ASSOC_UP:
3635 		if (stcb->asoc.assoc_up_sent == 0) {
3636 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3637 			stcb->asoc.assoc_up_sent = 1;
3638 		}
3639 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3640 			sctp_notify_adaptation_layer(stcb);
3641 		}
3642 		if (stcb->asoc.auth_supported == 0) {
3643 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3644 			    NULL, so_locked);
3645 		}
3646 		break;
3647 	case SCTP_NOTIFY_ASSOC_DOWN:
3648 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3649 		break;
3650 	case SCTP_NOTIFY_INTERFACE_DOWN:
3651 		{
3652 			struct sctp_nets *net;
3653 
3654 			net = (struct sctp_nets *)data;
3655 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3656 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3657 			break;
3658 		}
3659 	case SCTP_NOTIFY_INTERFACE_UP:
3660 		{
3661 			struct sctp_nets *net;
3662 
3663 			net = (struct sctp_nets *)data;
3664 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3665 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3666 			break;
3667 		}
3668 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3669 		{
3670 			struct sctp_nets *net;
3671 
3672 			net = (struct sctp_nets *)data;
3673 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3674 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3675 			break;
3676 		}
3677 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3678 		sctp_notify_send_failed2(stcb, error,
3679 		    (struct sctp_stream_queue_pending *)data, so_locked);
3680 		break;
3681 	case SCTP_NOTIFY_SENT_DG_FAIL:
3682 		sctp_notify_send_failed(stcb, 1, error,
3683 		    (struct sctp_tmit_chunk *)data, so_locked);
3684 		break;
3685 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3686 		sctp_notify_send_failed(stcb, 0, error,
3687 		    (struct sctp_tmit_chunk *)data, so_locked);
3688 		break;
3689 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3690 		{
3691 			uint32_t val;
3692 
3693 			val = *((uint32_t *) data);
3694 
3695 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3696 			break;
3697 		}
3698 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3699 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3700 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3701 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3702 		} else {
3703 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3704 		}
3705 		break;
3706 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3707 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3708 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3709 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3710 		} else {
3711 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3712 		}
3713 		break;
3714 	case SCTP_NOTIFY_ASSOC_RESTART:
3715 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3716 		if (stcb->asoc.auth_supported == 0) {
3717 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3718 			    NULL, so_locked);
3719 		}
3720 		break;
3721 	case SCTP_NOTIFY_STR_RESET_SEND:
3722 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3723 		break;
3724 	case SCTP_NOTIFY_STR_RESET_RECV:
3725 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3726 		break;
3727 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3728 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3729 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3730 		break;
3731 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3732 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3733 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3734 		break;
3735 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3736 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3737 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3738 		break;
3739 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3740 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3741 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3742 		break;
3743 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3744 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3745 		    error, so_locked);
3746 		break;
3747 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3748 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3749 		    error, so_locked);
3750 		break;
3751 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3752 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3753 		    error, so_locked);
3754 		break;
3755 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3756 		sctp_notify_shutdown_event(stcb);
3757 		break;
3758 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3759 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3760 		    (uint16_t) (uintptr_t) data,
3761 		    so_locked);
3762 		break;
3763 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3764 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3765 		    (uint16_t) (uintptr_t) data,
3766 		    so_locked);
3767 		break;
3768 	case SCTP_NOTIFY_NO_PEER_AUTH:
3769 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3770 		    (uint16_t) (uintptr_t) data,
3771 		    so_locked);
3772 		break;
3773 	case SCTP_NOTIFY_SENDER_DRY:
3774 		sctp_notify_sender_dry_event(stcb, so_locked);
3775 		break;
3776 	case SCTP_NOTIFY_REMOTE_ERROR:
3777 		sctp_notify_remote_error(stcb, error, data);
3778 		break;
3779 	default:
3780 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3781 		    __func__, notification, notification);
3782 		break;
3783 	}			/* end switch */
3784 }
3785 
3786 void
3787 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3788 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3789     SCTP_UNUSED
3790 #endif
3791 )
3792 {
3793 	struct sctp_association *asoc;
3794 	struct sctp_stream_out *outs;
3795 	struct sctp_tmit_chunk *chk, *nchk;
3796 	struct sctp_stream_queue_pending *sp, *nsp;
3797 	int i;
3798 
3799 	if (stcb == NULL) {
3800 		return;
3801 	}
3802 	asoc = &stcb->asoc;
3803 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3804 		/* already being freed */
3805 		return;
3806 	}
3807 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3808 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3809 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3810 		return;
3811 	}
3812 	/* now through all the gunk freeing chunks */
3813 	if (holds_lock == 0) {
3814 		SCTP_TCB_SEND_LOCK(stcb);
3815 	}
3816 	/* sent queue SHOULD be empty */
3817 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3818 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3819 		asoc->sent_queue_cnt--;
3820 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3821 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3822 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3823 #ifdef INVARIANTS
3824 			} else {
3825 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3826 #endif
3827 			}
3828 		}
3829 		if (chk->data != NULL) {
3830 			sctp_free_bufspace(stcb, asoc, chk, 1);
3831 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3832 			    error, chk, so_locked);
3833 			if (chk->data) {
3834 				sctp_m_freem(chk->data);
3835 				chk->data = NULL;
3836 			}
3837 		}
3838 		sctp_free_a_chunk(stcb, chk, so_locked);
3839 		/* sa_ignore FREED_MEMORY */
3840 	}
3841 	/* pending send queue SHOULD be empty */
3842 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3843 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3844 		asoc->send_queue_cnt--;
3845 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3846 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3847 #ifdef INVARIANTS
3848 		} else {
3849 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3850 #endif
3851 		}
3852 		if (chk->data != NULL) {
3853 			sctp_free_bufspace(stcb, asoc, chk, 1);
3854 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3855 			    error, chk, so_locked);
3856 			if (chk->data) {
3857 				sctp_m_freem(chk->data);
3858 				chk->data = NULL;
3859 			}
3860 		}
3861 		sctp_free_a_chunk(stcb, chk, so_locked);
3862 		/* sa_ignore FREED_MEMORY */
3863 	}
3864 	for (i = 0; i < asoc->streamoutcnt; i++) {
3865 		/* For each stream */
3866 		outs = &asoc->strmout[i];
3867 		/* clean up any sends there */
3868 		asoc->locked_on_sending = NULL;
3869 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3870 			asoc->stream_queue_cnt--;
3871 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3872 			sctp_free_spbufspace(stcb, asoc, sp);
3873 			if (sp->data) {
3874 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3875 				    error, (void *)sp, so_locked);
3876 				if (sp->data) {
3877 					sctp_m_freem(sp->data);
3878 					sp->data = NULL;
3879 					sp->tail_mbuf = NULL;
3880 					sp->length = 0;
3881 				}
3882 			}
3883 			if (sp->net) {
3884 				sctp_free_remote_addr(sp->net);
3885 				sp->net = NULL;
3886 			}
3887 			/* Free the chunk */
3888 			sctp_free_a_strmoq(stcb, sp, so_locked);
3889 			/* sa_ignore FREED_MEMORY */
3890 		}
3891 	}
3892 
3893 	if (holds_lock == 0) {
3894 		SCTP_TCB_SEND_UNLOCK(stcb);
3895 	}
3896 }
3897 
3898 void
3899 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3900     struct sctp_abort_chunk *abort, int so_locked
3901 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3902     SCTP_UNUSED
3903 #endif
3904 )
3905 {
3906 	if (stcb == NULL) {
3907 		return;
3908 	}
3909 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3910 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3911 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3912 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3913 	}
3914 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3915 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3916 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3917 		return;
3918 	}
3919 	/* Tell them we lost the asoc */
3920 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3921 	if (from_peer) {
3922 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3923 	} else {
3924 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3925 	}
3926 }
3927 
3928 void
3929 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3930     struct mbuf *m, int iphlen,
3931     struct sockaddr *src, struct sockaddr *dst,
3932     struct sctphdr *sh, struct mbuf *op_err,
3933     uint8_t mflowtype, uint32_t mflowid,
3934     uint32_t vrf_id, uint16_t port)
3935 {
3936 	uint32_t vtag;
3937 
3938 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3939 	struct socket *so;
3940 
3941 #endif
3942 
3943 	vtag = 0;
3944 	if (stcb != NULL) {
3945 		/* We have a TCB to abort, send notification too */
3946 		vtag = stcb->asoc.peer_vtag;
3947 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3948 		/* get the assoc vrf id and table id */
3949 		vrf_id = stcb->asoc.vrf_id;
3950 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3951 	}
3952 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3953 	    mflowtype, mflowid, inp->fibnum,
3954 	    vrf_id, port);
3955 	if (stcb != NULL) {
3956 		/* Ok, now lets free it */
3957 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3958 		so = SCTP_INP_SO(inp);
3959 		atomic_add_int(&stcb->asoc.refcnt, 1);
3960 		SCTP_TCB_UNLOCK(stcb);
3961 		SCTP_SOCKET_LOCK(so, 1);
3962 		SCTP_TCB_LOCK(stcb);
3963 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3964 #endif
3965 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3966 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3967 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3968 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3969 		}
3970 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3971 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3972 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3973 		SCTP_SOCKET_UNLOCK(so, 1);
3974 #endif
3975 	}
3976 }
3977 
3978 #ifdef SCTP_ASOCLOG_OF_TSNS
3979 void
3980 sctp_print_out_track_log(struct sctp_tcb *stcb)
3981 {
3982 #ifdef NOSIY_PRINTS
3983 	int i;
3984 
3985 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3986 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3987 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3988 		SCTP_PRINTF("None rcvd\n");
3989 		goto none_in;
3990 	}
3991 	if (stcb->asoc.tsn_in_wrapped) {
3992 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3993 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3994 			    stcb->asoc.in_tsnlog[i].tsn,
3995 			    stcb->asoc.in_tsnlog[i].strm,
3996 			    stcb->asoc.in_tsnlog[i].seq,
3997 			    stcb->asoc.in_tsnlog[i].flgs,
3998 			    stcb->asoc.in_tsnlog[i].sz);
3999 		}
4000 	}
4001 	if (stcb->asoc.tsn_in_at) {
4002 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4003 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4004 			    stcb->asoc.in_tsnlog[i].tsn,
4005 			    stcb->asoc.in_tsnlog[i].strm,
4006 			    stcb->asoc.in_tsnlog[i].seq,
4007 			    stcb->asoc.in_tsnlog[i].flgs,
4008 			    stcb->asoc.in_tsnlog[i].sz);
4009 		}
4010 	}
4011 none_in:
4012 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4013 	if ((stcb->asoc.tsn_out_at == 0) &&
4014 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4015 		SCTP_PRINTF("None sent\n");
4016 	}
4017 	if (stcb->asoc.tsn_out_wrapped) {
4018 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4019 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4020 			    stcb->asoc.out_tsnlog[i].tsn,
4021 			    stcb->asoc.out_tsnlog[i].strm,
4022 			    stcb->asoc.out_tsnlog[i].seq,
4023 			    stcb->asoc.out_tsnlog[i].flgs,
4024 			    stcb->asoc.out_tsnlog[i].sz);
4025 		}
4026 	}
4027 	if (stcb->asoc.tsn_out_at) {
4028 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4029 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4030 			    stcb->asoc.out_tsnlog[i].tsn,
4031 			    stcb->asoc.out_tsnlog[i].strm,
4032 			    stcb->asoc.out_tsnlog[i].seq,
4033 			    stcb->asoc.out_tsnlog[i].flgs,
4034 			    stcb->asoc.out_tsnlog[i].sz);
4035 		}
4036 	}
4037 #endif
4038 }
4039 
4040 #endif
4041 
4042 void
4043 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4044     struct mbuf *op_err,
4045     int so_locked
4046 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4047     SCTP_UNUSED
4048 #endif
4049 )
4050 {
4051 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4052 	struct socket *so;
4053 
4054 #endif
4055 
4056 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4057 	so = SCTP_INP_SO(inp);
4058 #endif
4059 	if (stcb == NULL) {
4060 		/* Got to have a TCB */
4061 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4062 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4063 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4064 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4065 			}
4066 		}
4067 		return;
4068 	} else {
4069 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4070 	}
4071 	/* notify the ulp */
4072 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4073 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4074 	}
4075 	/* notify the peer */
4076 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4077 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4078 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4079 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4080 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4081 	}
4082 	/* now free the asoc */
4083 #ifdef SCTP_ASOCLOG_OF_TSNS
4084 	sctp_print_out_track_log(stcb);
4085 #endif
4086 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4087 	if (!so_locked) {
4088 		atomic_add_int(&stcb->asoc.refcnt, 1);
4089 		SCTP_TCB_UNLOCK(stcb);
4090 		SCTP_SOCKET_LOCK(so, 1);
4091 		SCTP_TCB_LOCK(stcb);
4092 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4093 	}
4094 #endif
4095 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4096 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4097 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4098 	if (!so_locked) {
4099 		SCTP_SOCKET_UNLOCK(so, 1);
4100 	}
4101 #endif
4102 }
4103 
4104 void
4105 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4106     struct sockaddr *src, struct sockaddr *dst,
4107     struct sctphdr *sh, struct sctp_inpcb *inp,
4108     struct mbuf *cause,
4109     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4110     uint32_t vrf_id, uint16_t port)
4111 {
4112 	struct sctp_chunkhdr *ch, chunk_buf;
4113 	unsigned int chk_length;
4114 	int contains_init_chunk;
4115 
4116 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4117 	/* Generate a TO address for future reference */
4118 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4119 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4120 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4121 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4122 		}
4123 	}
4124 	contains_init_chunk = 0;
4125 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4126 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4127 	while (ch != NULL) {
4128 		chk_length = ntohs(ch->chunk_length);
4129 		if (chk_length < sizeof(*ch)) {
4130 			/* break to abort land */
4131 			break;
4132 		}
4133 		switch (ch->chunk_type) {
4134 		case SCTP_INIT:
4135 			contains_init_chunk = 1;
4136 			break;
4137 		case SCTP_PACKET_DROPPED:
4138 			/* we don't respond to pkt-dropped */
4139 			return;
4140 		case SCTP_ABORT_ASSOCIATION:
4141 			/* we don't respond with an ABORT to an ABORT */
4142 			return;
4143 		case SCTP_SHUTDOWN_COMPLETE:
4144 			/*
4145 			 * we ignore it since we are not waiting for it and
4146 			 * peer is gone
4147 			 */
4148 			return;
4149 		case SCTP_SHUTDOWN_ACK:
4150 			sctp_send_shutdown_complete2(src, dst, sh,
4151 			    mflowtype, mflowid, fibnum,
4152 			    vrf_id, port);
4153 			return;
4154 		default:
4155 			break;
4156 		}
4157 		offset += SCTP_SIZE32(chk_length);
4158 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4159 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4160 	}
4161 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4162 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4163 	    (contains_init_chunk == 0))) {
4164 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4165 		    mflowtype, mflowid, fibnum,
4166 		    vrf_id, port);
4167 	}
4168 }
4169 
4170 /*
4171  * check the inbound datagram to make sure there is not an abort inside it,
4172  * if there is return 1, else return 0.
4173  */
4174 int
4175 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4176 {
4177 	struct sctp_chunkhdr *ch;
4178 	struct sctp_init_chunk *init_chk, chunk_buf;
4179 	int offset;
4180 	unsigned int chk_length;
4181 
4182 	offset = iphlen + sizeof(struct sctphdr);
4183 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4184 	    (uint8_t *) & chunk_buf);
4185 	while (ch != NULL) {
4186 		chk_length = ntohs(ch->chunk_length);
4187 		if (chk_length < sizeof(*ch)) {
4188 			/* packet is probably corrupt */
4189 			break;
4190 		}
4191 		/* we seem to be ok, is it an abort? */
4192 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4193 			/* yep, tell them */
4194 			return (1);
4195 		}
4196 		if (ch->chunk_type == SCTP_INITIATION) {
4197 			/* need to update the Vtag */
4198 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4199 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4200 			if (init_chk != NULL) {
4201 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4202 			}
4203 		}
4204 		/* Nope, move to the next chunk */
4205 		offset += SCTP_SIZE32(chk_length);
4206 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4207 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4208 	}
4209 	return (0);
4210 }
4211 
4212 /*
4213  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4214  * set (i.e. it's 0) so, create this function to compare link local scopes
4215  */
4216 #ifdef INET6
4217 uint32_t
4218 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4219 {
4220 	struct sockaddr_in6 a, b;
4221 
4222 	/* save copies */
4223 	a = *addr1;
4224 	b = *addr2;
4225 
4226 	if (a.sin6_scope_id == 0)
4227 		if (sa6_recoverscope(&a)) {
4228 			/* can't get scope, so can't match */
4229 			return (0);
4230 		}
4231 	if (b.sin6_scope_id == 0)
4232 		if (sa6_recoverscope(&b)) {
4233 			/* can't get scope, so can't match */
4234 			return (0);
4235 		}
4236 	if (a.sin6_scope_id != b.sin6_scope_id)
4237 		return (0);
4238 
4239 	return (1);
4240 }
4241 
4242 /*
4243  * returns a sockaddr_in6 with embedded scope recovered and removed
4244  */
4245 struct sockaddr_in6 *
4246 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4247 {
4248 	/* check and strip embedded scope junk */
4249 	if (addr->sin6_family == AF_INET6) {
4250 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4251 			if (addr->sin6_scope_id == 0) {
4252 				*store = *addr;
4253 				if (!sa6_recoverscope(store)) {
4254 					/* use the recovered scope */
4255 					addr = store;
4256 				}
4257 			} else {
4258 				/* else, return the original "to" addr */
4259 				in6_clearscope(&addr->sin6_addr);
4260 			}
4261 		}
4262 	}
4263 	return (addr);
4264 }
4265 
4266 #endif
4267 
4268 /*
4269  * are the two addresses the same?  currently a "scopeless" check returns: 1
4270  * if same, 0 if not
4271  */
4272 int
4273 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4274 {
4275 
4276 	/* must be valid */
4277 	if (sa1 == NULL || sa2 == NULL)
4278 		return (0);
4279 
4280 	/* must be the same family */
4281 	if (sa1->sa_family != sa2->sa_family)
4282 		return (0);
4283 
4284 	switch (sa1->sa_family) {
4285 #ifdef INET6
4286 	case AF_INET6:
4287 		{
4288 			/* IPv6 addresses */
4289 			struct sockaddr_in6 *sin6_1, *sin6_2;
4290 
4291 			sin6_1 = (struct sockaddr_in6 *)sa1;
4292 			sin6_2 = (struct sockaddr_in6 *)sa2;
4293 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4294 			    sin6_2));
4295 		}
4296 #endif
4297 #ifdef INET
4298 	case AF_INET:
4299 		{
4300 			/* IPv4 addresses */
4301 			struct sockaddr_in *sin_1, *sin_2;
4302 
4303 			sin_1 = (struct sockaddr_in *)sa1;
4304 			sin_2 = (struct sockaddr_in *)sa2;
4305 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4306 		}
4307 #endif
4308 	default:
4309 		/* we don't do these... */
4310 		return (0);
4311 	}
4312 }
4313 
4314 void
4315 sctp_print_address(struct sockaddr *sa)
4316 {
4317 #ifdef INET6
4318 	char ip6buf[INET6_ADDRSTRLEN];
4319 
4320 #endif
4321 
4322 	switch (sa->sa_family) {
4323 #ifdef INET6
4324 	case AF_INET6:
4325 		{
4326 			struct sockaddr_in6 *sin6;
4327 
4328 			sin6 = (struct sockaddr_in6 *)sa;
4329 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4330 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4331 			    ntohs(sin6->sin6_port),
4332 			    sin6->sin6_scope_id);
4333 			break;
4334 		}
4335 #endif
4336 #ifdef INET
4337 	case AF_INET:
4338 		{
4339 			struct sockaddr_in *sin;
4340 			unsigned char *p;
4341 
4342 			sin = (struct sockaddr_in *)sa;
4343 			p = (unsigned char *)&sin->sin_addr;
4344 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4345 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4346 			break;
4347 		}
4348 #endif
4349 	default:
4350 		SCTP_PRINTF("?\n");
4351 		break;
4352 	}
4353 }
4354 
4355 void
4356 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4357     struct sctp_inpcb *new_inp,
4358     struct sctp_tcb *stcb,
4359     int waitflags)
4360 {
4361 	/*
4362 	 * go through our old INP and pull off any control structures that
4363 	 * belong to stcb and move then to the new inp.
4364 	 */
4365 	struct socket *old_so, *new_so;
4366 	struct sctp_queued_to_read *control, *nctl;
4367 	struct sctp_readhead tmp_queue;
4368 	struct mbuf *m;
4369 	int error = 0;
4370 
4371 	old_so = old_inp->sctp_socket;
4372 	new_so = new_inp->sctp_socket;
4373 	TAILQ_INIT(&tmp_queue);
4374 	error = sblock(&old_so->so_rcv, waitflags);
4375 	if (error) {
4376 		/*
4377 		 * Gak, can't get sblock, we have a problem. data will be
4378 		 * left stranded.. and we don't dare look at it since the
4379 		 * other thread may be reading something. Oh well, its a
4380 		 * screwed up app that does a peeloff OR a accept while
4381 		 * reading from the main socket... actually its only the
4382 		 * peeloff() case, since I think read will fail on a
4383 		 * listening socket..
4384 		 */
4385 		return;
4386 	}
4387 	/* lock the socket buffers */
4388 	SCTP_INP_READ_LOCK(old_inp);
4389 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4390 		/* Pull off all for out target stcb */
4391 		if (control->stcb == stcb) {
4392 			/* remove it we want it */
4393 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4394 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4395 			m = control->data;
4396 			while (m) {
4397 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4398 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4399 				}
4400 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4401 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4402 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4403 				}
4404 				m = SCTP_BUF_NEXT(m);
4405 			}
4406 		}
4407 	}
4408 	SCTP_INP_READ_UNLOCK(old_inp);
4409 	/* Remove the sb-lock on the old socket */
4410 
4411 	sbunlock(&old_so->so_rcv);
4412 	/* Now we move them over to the new socket buffer */
4413 	SCTP_INP_READ_LOCK(new_inp);
4414 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4415 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4416 		m = control->data;
4417 		while (m) {
4418 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4419 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4420 			}
4421 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4422 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4423 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4424 			}
4425 			m = SCTP_BUF_NEXT(m);
4426 		}
4427 	}
4428 	SCTP_INP_READ_UNLOCK(new_inp);
4429 }
4430 
4431 void
4432 sctp_add_to_readq(struct sctp_inpcb *inp,
4433     struct sctp_tcb *stcb,
4434     struct sctp_queued_to_read *control,
4435     struct sockbuf *sb,
4436     int end,
4437     int inp_read_lock_held,
4438     int so_locked
4439 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4440     SCTP_UNUSED
4441 #endif
4442 )
4443 {
4444 	/*
4445 	 * Here we must place the control on the end of the socket read
4446 	 * queue AND increment sb_cc so that select will work properly on
4447 	 * read.
4448 	 */
4449 	struct mbuf *m, *prev = NULL;
4450 
4451 	if (inp == NULL) {
4452 		/* Gak, TSNH!! */
4453 #ifdef INVARIANTS
4454 		panic("Gak, inp NULL on add_to_readq");
4455 #endif
4456 		return;
4457 	}
4458 	if (inp_read_lock_held == 0)
4459 		SCTP_INP_READ_LOCK(inp);
4460 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4461 		sctp_free_remote_addr(control->whoFrom);
4462 		if (control->data) {
4463 			sctp_m_freem(control->data);
4464 			control->data = NULL;
4465 		}
4466 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4467 		if (inp_read_lock_held == 0)
4468 			SCTP_INP_READ_UNLOCK(inp);
4469 		return;
4470 	}
4471 	if (!(control->spec_flags & M_NOTIFICATION)) {
4472 		atomic_add_int(&inp->total_recvs, 1);
4473 		if (!control->do_not_ref_stcb) {
4474 			atomic_add_int(&stcb->total_recvs, 1);
4475 		}
4476 	}
4477 	m = control->data;
4478 	control->held_length = 0;
4479 	control->length = 0;
4480 	while (m) {
4481 		if (SCTP_BUF_LEN(m) == 0) {
4482 			/* Skip mbufs with NO length */
4483 			if (prev == NULL) {
4484 				/* First one */
4485 				control->data = sctp_m_free(m);
4486 				m = control->data;
4487 			} else {
4488 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4489 				m = SCTP_BUF_NEXT(prev);
4490 			}
4491 			if (m == NULL) {
4492 				control->tail_mbuf = prev;
4493 			}
4494 			continue;
4495 		}
4496 		prev = m;
4497 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4498 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4499 		}
4500 		sctp_sballoc(stcb, sb, m);
4501 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4502 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4503 		}
4504 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4505 		m = SCTP_BUF_NEXT(m);
4506 	}
4507 	if (prev != NULL) {
4508 		control->tail_mbuf = prev;
4509 	} else {
4510 		/* Everything got collapsed out?? */
4511 		sctp_free_remote_addr(control->whoFrom);
4512 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4513 		if (inp_read_lock_held == 0)
4514 			SCTP_INP_READ_UNLOCK(inp);
4515 		return;
4516 	}
4517 	if (end) {
4518 		control->end_added = 1;
4519 	}
4520 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4521 	if (inp_read_lock_held == 0)
4522 		SCTP_INP_READ_UNLOCK(inp);
4523 	if (inp && inp->sctp_socket) {
4524 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4525 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4526 		} else {
4527 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4528 			struct socket *so;
4529 
4530 			so = SCTP_INP_SO(inp);
4531 			if (!so_locked) {
4532 				if (stcb) {
4533 					atomic_add_int(&stcb->asoc.refcnt, 1);
4534 					SCTP_TCB_UNLOCK(stcb);
4535 				}
4536 				SCTP_SOCKET_LOCK(so, 1);
4537 				if (stcb) {
4538 					SCTP_TCB_LOCK(stcb);
4539 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4540 				}
4541 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4542 					SCTP_SOCKET_UNLOCK(so, 1);
4543 					return;
4544 				}
4545 			}
4546 #endif
4547 			sctp_sorwakeup(inp, inp->sctp_socket);
4548 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4549 			if (!so_locked) {
4550 				SCTP_SOCKET_UNLOCK(so, 1);
4551 			}
4552 #endif
4553 		}
4554 	}
4555 }
4556 
4557 
4558 int
4559 sctp_append_to_readq(struct sctp_inpcb *inp,
4560     struct sctp_tcb *stcb,
4561     struct sctp_queued_to_read *control,
4562     struct mbuf *m,
4563     int end,
4564     int ctls_cumack,
4565     struct sockbuf *sb)
4566 {
4567 	/*
4568 	 * A partial delivery API event is underway. OR we are appending on
4569 	 * the reassembly queue.
4570 	 *
4571 	 * If PDAPI this means we need to add m to the end of the data.
4572 	 * Increase the length in the control AND increment the sb_cc.
4573 	 * Otherwise sb is NULL and all we need to do is put it at the end
4574 	 * of the mbuf chain.
4575 	 */
4576 	int len = 0;
4577 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4578 
4579 	if (inp) {
4580 		SCTP_INP_READ_LOCK(inp);
4581 	}
4582 	if (control == NULL) {
4583 get_out:
4584 		if (inp) {
4585 			SCTP_INP_READ_UNLOCK(inp);
4586 		}
4587 		return (-1);
4588 	}
4589 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4590 		SCTP_INP_READ_UNLOCK(inp);
4591 		return (0);
4592 	}
4593 	if (control->end_added) {
4594 		/* huh this one is complete? */
4595 		goto get_out;
4596 	}
4597 	mm = m;
4598 	if (mm == NULL) {
4599 		goto get_out;
4600 	}
4601 	while (mm) {
4602 		if (SCTP_BUF_LEN(mm) == 0) {
4603 			/* Skip mbufs with NO lenght */
4604 			if (prev == NULL) {
4605 				/* First one */
4606 				m = sctp_m_free(mm);
4607 				mm = m;
4608 			} else {
4609 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4610 				mm = SCTP_BUF_NEXT(prev);
4611 			}
4612 			continue;
4613 		}
4614 		prev = mm;
4615 		len += SCTP_BUF_LEN(mm);
4616 		if (sb) {
4617 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4618 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4619 			}
4620 			sctp_sballoc(stcb, sb, mm);
4621 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4622 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4623 			}
4624 		}
4625 		mm = SCTP_BUF_NEXT(mm);
4626 	}
4627 	if (prev) {
4628 		tail = prev;
4629 	} else {
4630 		/* Really there should always be a prev */
4631 		if (m == NULL) {
4632 			/* Huh nothing left? */
4633 #ifdef INVARIANTS
4634 			panic("Nothing left to add?");
4635 #else
4636 			goto get_out;
4637 #endif
4638 		}
4639 		tail = m;
4640 	}
4641 	if (control->tail_mbuf) {
4642 		/* append */
4643 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4644 		control->tail_mbuf = tail;
4645 	} else {
4646 		/* nothing there */
4647 #ifdef INVARIANTS
4648 		if (control->data != NULL) {
4649 			panic("This should NOT happen");
4650 		}
4651 #endif
4652 		control->data = m;
4653 		control->tail_mbuf = tail;
4654 	}
4655 	atomic_add_int(&control->length, len);
4656 	if (end) {
4657 		/* message is complete */
4658 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4659 			stcb->asoc.control_pdapi = NULL;
4660 		}
4661 		control->held_length = 0;
4662 		control->end_added = 1;
4663 	}
4664 	if (stcb == NULL) {
4665 		control->do_not_ref_stcb = 1;
4666 	}
4667 	/*
4668 	 * When we are appending in partial delivery, the cum-ack is used
4669 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4670 	 * is populated in the outbound sinfo structure from the true cumack
4671 	 * if the association exists...
4672 	 */
4673 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4674 	if (inp) {
4675 		SCTP_INP_READ_UNLOCK(inp);
4676 	}
4677 	if (inp && inp->sctp_socket) {
4678 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4679 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4680 		} else {
4681 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4682 			struct socket *so;
4683 
4684 			so = SCTP_INP_SO(inp);
4685 			if (stcb) {
4686 				atomic_add_int(&stcb->asoc.refcnt, 1);
4687 				SCTP_TCB_UNLOCK(stcb);
4688 			}
4689 			SCTP_SOCKET_LOCK(so, 1);
4690 			if (stcb) {
4691 				SCTP_TCB_LOCK(stcb);
4692 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4693 			}
4694 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4695 				SCTP_SOCKET_UNLOCK(so, 1);
4696 				return (0);
4697 			}
4698 #endif
4699 			sctp_sorwakeup(inp, inp->sctp_socket);
4700 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4701 			SCTP_SOCKET_UNLOCK(so, 1);
4702 #endif
4703 		}
4704 	}
4705 	return (0);
4706 }
4707 
4708 
4709 
4710 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4711  *************ALTERNATE ROUTING CODE
4712  */
4713 
4714 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4715  *************ALTERNATE ROUTING CODE
4716  */
4717 
4718 struct mbuf *
4719 sctp_generate_cause(uint16_t code, char *info)
4720 {
4721 	struct mbuf *m;
4722 	struct sctp_gen_error_cause *cause;
4723 	size_t info_len, len;
4724 
4725 	if ((code == 0) || (info == NULL)) {
4726 		return (NULL);
4727 	}
4728 	info_len = strlen(info);
4729 	len = sizeof(struct sctp_paramhdr) + info_len;
4730 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4731 	if (m != NULL) {
4732 		SCTP_BUF_LEN(m) = len;
4733 		cause = mtod(m, struct sctp_gen_error_cause *);
4734 		cause->code = htons(code);
4735 		cause->length = htons((uint16_t) len);
4736 		memcpy(cause->info, info, info_len);
4737 	}
4738 	return (m);
4739 }
4740 
4741 struct mbuf *
4742 sctp_generate_no_user_data_cause(uint32_t tsn)
4743 {
4744 	struct mbuf *m;
4745 	struct sctp_error_no_user_data *no_user_data_cause;
4746 	size_t len;
4747 
4748 	len = sizeof(struct sctp_error_no_user_data);
4749 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4750 	if (m != NULL) {
4751 		SCTP_BUF_LEN(m) = len;
4752 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4753 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4754 		no_user_data_cause->cause.length = htons((uint16_t) len);
4755 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4756 	}
4757 	return (m);
4758 }
4759 
4760 #ifdef SCTP_MBCNT_LOGGING
4761 void
4762 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4763     struct sctp_tmit_chunk *tp1, int chk_cnt)
4764 {
4765 	if (tp1->data == NULL) {
4766 		return;
4767 	}
4768 	asoc->chunks_on_out_queue -= chk_cnt;
4769 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4770 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4771 		    asoc->total_output_queue_size,
4772 		    tp1->book_size,
4773 		    0,
4774 		    tp1->mbcnt);
4775 	}
4776 	if (asoc->total_output_queue_size >= tp1->book_size) {
4777 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4778 	} else {
4779 		asoc->total_output_queue_size = 0;
4780 	}
4781 
4782 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4783 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4784 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4785 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4786 		} else {
4787 			stcb->sctp_socket->so_snd.sb_cc = 0;
4788 
4789 		}
4790 	}
4791 }
4792 
4793 #endif
4794 
4795 int
4796 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4797     uint8_t sent, int so_locked
4798 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4799     SCTP_UNUSED
4800 #endif
4801 )
4802 {
4803 	struct sctp_stream_out *strq;
4804 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4805 	struct sctp_stream_queue_pending *sp;
4806 	uint16_t stream = 0, seq = 0;
4807 	uint8_t foundeom = 0;
4808 	int ret_sz = 0;
4809 	int notdone;
4810 	int do_wakeup_routine = 0;
4811 
4812 	stream = tp1->rec.data.stream_number;
4813 	seq = tp1->rec.data.stream_seq;
4814 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4815 		stcb->asoc.abandoned_sent[0]++;
4816 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4817 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4818 #if defined(SCTP_DETAILED_STR_STATS)
4819 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4820 #endif
4821 	} else {
4822 		stcb->asoc.abandoned_unsent[0]++;
4823 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4824 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4825 #if defined(SCTP_DETAILED_STR_STATS)
4826 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4827 #endif
4828 	}
4829 	do {
4830 		ret_sz += tp1->book_size;
4831 		if (tp1->data != NULL) {
4832 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4833 				sctp_flight_size_decrease(tp1);
4834 				sctp_total_flight_decrease(stcb, tp1);
4835 			}
4836 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4837 			stcb->asoc.peers_rwnd += tp1->send_size;
4838 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4839 			if (sent) {
4840 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4841 			} else {
4842 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4843 			}
4844 			if (tp1->data) {
4845 				sctp_m_freem(tp1->data);
4846 				tp1->data = NULL;
4847 			}
4848 			do_wakeup_routine = 1;
4849 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4850 				stcb->asoc.sent_queue_cnt_removeable--;
4851 			}
4852 		}
4853 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4854 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4855 		    SCTP_DATA_NOT_FRAG) {
4856 			/* not frag'ed we ae done   */
4857 			notdone = 0;
4858 			foundeom = 1;
4859 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4860 			/* end of frag, we are done */
4861 			notdone = 0;
4862 			foundeom = 1;
4863 		} else {
4864 			/*
4865 			 * Its a begin or middle piece, we must mark all of
4866 			 * it
4867 			 */
4868 			notdone = 1;
4869 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4870 		}
4871 	} while (tp1 && notdone);
4872 	if (foundeom == 0) {
4873 		/*
4874 		 * The multi-part message was scattered across the send and
4875 		 * sent queue.
4876 		 */
4877 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4878 			if ((tp1->rec.data.stream_number != stream) ||
4879 			    (tp1->rec.data.stream_seq != seq)) {
4880 				break;
4881 			}
4882 			/*
4883 			 * save to chk in case we have some on stream out
4884 			 * queue. If so and we have an un-transmitted one we
4885 			 * don't have to fudge the TSN.
4886 			 */
4887 			chk = tp1;
4888 			ret_sz += tp1->book_size;
4889 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4890 			if (sent) {
4891 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4892 			} else {
4893 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4894 			}
4895 			if (tp1->data) {
4896 				sctp_m_freem(tp1->data);
4897 				tp1->data = NULL;
4898 			}
4899 			/* No flight involved here book the size to 0 */
4900 			tp1->book_size = 0;
4901 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4902 				foundeom = 1;
4903 			}
4904 			do_wakeup_routine = 1;
4905 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4906 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4907 			/*
4908 			 * on to the sent queue so we can wait for it to be
4909 			 * passed by.
4910 			 */
4911 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4912 			    sctp_next);
4913 			stcb->asoc.send_queue_cnt--;
4914 			stcb->asoc.sent_queue_cnt++;
4915 		}
4916 	}
4917 	if (foundeom == 0) {
4918 		/*
4919 		 * Still no eom found. That means there is stuff left on the
4920 		 * stream out queue.. yuck.
4921 		 */
4922 		SCTP_TCB_SEND_LOCK(stcb);
4923 		strq = &stcb->asoc.strmout[stream];
4924 		sp = TAILQ_FIRST(&strq->outqueue);
4925 		if (sp != NULL) {
4926 			sp->discard_rest = 1;
4927 			/*
4928 			 * We may need to put a chunk on the queue that
4929 			 * holds the TSN that would have been sent with the
4930 			 * LAST bit.
4931 			 */
4932 			if (chk == NULL) {
4933 				/* Yep, we have to */
4934 				sctp_alloc_a_chunk(stcb, chk);
4935 				if (chk == NULL) {
4936 					/*
4937 					 * we are hosed. All we can do is
4938 					 * nothing.. which will cause an
4939 					 * abort if the peer is paying
4940 					 * attention.
4941 					 */
4942 					goto oh_well;
4943 				}
4944 				memset(chk, 0, sizeof(*chk));
4945 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4946 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4947 				chk->asoc = &stcb->asoc;
4948 				chk->rec.data.stream_seq = strq->next_sequence_send;
4949 				chk->rec.data.stream_number = sp->stream;
4950 				chk->rec.data.payloadtype = sp->ppid;
4951 				chk->rec.data.context = sp->context;
4952 				chk->flags = sp->act_flags;
4953 				chk->whoTo = NULL;
4954 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4955 				strq->chunks_on_queues++;
4956 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4957 				stcb->asoc.sent_queue_cnt++;
4958 				stcb->asoc.pr_sctp_cnt++;
4959 			} else {
4960 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4961 			}
4962 			strq->next_sequence_send++;
4963 	oh_well:
4964 			if (sp->data) {
4965 				/*
4966 				 * Pull any data to free up the SB and allow
4967 				 * sender to "add more" while we will throw
4968 				 * away :-)
4969 				 */
4970 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4971 				ret_sz += sp->length;
4972 				do_wakeup_routine = 1;
4973 				sp->some_taken = 1;
4974 				sctp_m_freem(sp->data);
4975 				sp->data = NULL;
4976 				sp->tail_mbuf = NULL;
4977 				sp->length = 0;
4978 			}
4979 		}
4980 		SCTP_TCB_SEND_UNLOCK(stcb);
4981 	}
4982 	if (do_wakeup_routine) {
4983 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4984 		struct socket *so;
4985 
4986 		so = SCTP_INP_SO(stcb->sctp_ep);
4987 		if (!so_locked) {
4988 			atomic_add_int(&stcb->asoc.refcnt, 1);
4989 			SCTP_TCB_UNLOCK(stcb);
4990 			SCTP_SOCKET_LOCK(so, 1);
4991 			SCTP_TCB_LOCK(stcb);
4992 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4993 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4994 				/* assoc was freed while we were unlocked */
4995 				SCTP_SOCKET_UNLOCK(so, 1);
4996 				return (ret_sz);
4997 			}
4998 		}
4999 #endif
5000 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5001 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5002 		if (!so_locked) {
5003 			SCTP_SOCKET_UNLOCK(so, 1);
5004 		}
5005 #endif
5006 	}
5007 	return (ret_sz);
5008 }
5009 
5010 /*
5011  * checks to see if the given address, sa, is one that is currently known by
5012  * the kernel note: can't distinguish the same address on multiple interfaces
5013  * and doesn't handle multiple addresses with different zone/scope id's note:
5014  * ifa_ifwithaddr() compares the entire sockaddr struct
5015  */
5016 struct sctp_ifa *
5017 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5018     int holds_lock)
5019 {
5020 	struct sctp_laddr *laddr;
5021 
5022 	if (holds_lock == 0) {
5023 		SCTP_INP_RLOCK(inp);
5024 	}
5025 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5026 		if (laddr->ifa == NULL)
5027 			continue;
5028 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5029 			continue;
5030 #ifdef INET
5031 		if (addr->sa_family == AF_INET) {
5032 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5033 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5034 				/* found him. */
5035 				if (holds_lock == 0) {
5036 					SCTP_INP_RUNLOCK(inp);
5037 				}
5038 				return (laddr->ifa);
5039 				break;
5040 			}
5041 		}
5042 #endif
5043 #ifdef INET6
5044 		if (addr->sa_family == AF_INET6) {
5045 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5046 			    &laddr->ifa->address.sin6)) {
5047 				/* found him. */
5048 				if (holds_lock == 0) {
5049 					SCTP_INP_RUNLOCK(inp);
5050 				}
5051 				return (laddr->ifa);
5052 				break;
5053 			}
5054 		}
5055 #endif
5056 	}
5057 	if (holds_lock == 0) {
5058 		SCTP_INP_RUNLOCK(inp);
5059 	}
5060 	return (NULL);
5061 }
5062 
5063 uint32_t
5064 sctp_get_ifa_hash_val(struct sockaddr *addr)
5065 {
5066 	switch (addr->sa_family) {
5067 #ifdef INET
5068 	case AF_INET:
5069 		{
5070 			struct sockaddr_in *sin;
5071 
5072 			sin = (struct sockaddr_in *)addr;
5073 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5074 		}
5075 #endif
5076 #ifdef INET6
5077 	case AF_INET6:
5078 		{
5079 			struct sockaddr_in6 *sin6;
5080 			uint32_t hash_of_addr;
5081 
5082 			sin6 = (struct sockaddr_in6 *)addr;
5083 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5084 			    sin6->sin6_addr.s6_addr32[1] +
5085 			    sin6->sin6_addr.s6_addr32[2] +
5086 			    sin6->sin6_addr.s6_addr32[3]);
5087 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5088 			return (hash_of_addr);
5089 		}
5090 #endif
5091 	default:
5092 		break;
5093 	}
5094 	return (0);
5095 }
5096 
5097 struct sctp_ifa *
5098 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5099 {
5100 	struct sctp_ifa *sctp_ifap;
5101 	struct sctp_vrf *vrf;
5102 	struct sctp_ifalist *hash_head;
5103 	uint32_t hash_of_addr;
5104 
5105 	if (holds_lock == 0)
5106 		SCTP_IPI_ADDR_RLOCK();
5107 
5108 	vrf = sctp_find_vrf(vrf_id);
5109 	if (vrf == NULL) {
5110 		if (holds_lock == 0)
5111 			SCTP_IPI_ADDR_RUNLOCK();
5112 		return (NULL);
5113 	}
5114 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5115 
5116 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5117 	if (hash_head == NULL) {
5118 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5119 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5120 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5121 		sctp_print_address(addr);
5122 		SCTP_PRINTF("No such bucket for address\n");
5123 		if (holds_lock == 0)
5124 			SCTP_IPI_ADDR_RUNLOCK();
5125 
5126 		return (NULL);
5127 	}
5128 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5129 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5130 			continue;
5131 #ifdef INET
5132 		if (addr->sa_family == AF_INET) {
5133 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5134 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5135 				/* found him. */
5136 				if (holds_lock == 0)
5137 					SCTP_IPI_ADDR_RUNLOCK();
5138 				return (sctp_ifap);
5139 				break;
5140 			}
5141 		}
5142 #endif
5143 #ifdef INET6
5144 		if (addr->sa_family == AF_INET6) {
5145 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5146 			    &sctp_ifap->address.sin6)) {
5147 				/* found him. */
5148 				if (holds_lock == 0)
5149 					SCTP_IPI_ADDR_RUNLOCK();
5150 				return (sctp_ifap);
5151 				break;
5152 			}
5153 		}
5154 #endif
5155 	}
5156 	if (holds_lock == 0)
5157 		SCTP_IPI_ADDR_RUNLOCK();
5158 	return (NULL);
5159 }
5160 
5161 static void
5162 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5163     uint32_t rwnd_req)
5164 {
5165 	/* User pulled some data, do we need a rwnd update? */
5166 	int r_unlocked = 0;
5167 	uint32_t dif, rwnd;
5168 	struct socket *so = NULL;
5169 
5170 	if (stcb == NULL)
5171 		return;
5172 
5173 	atomic_add_int(&stcb->asoc.refcnt, 1);
5174 
5175 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5176 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5177 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5178 		/* Pre-check If we are freeing no update */
5179 		goto no_lock;
5180 	}
5181 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5182 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5183 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5184 		goto out;
5185 	}
5186 	so = stcb->sctp_socket;
5187 	if (so == NULL) {
5188 		goto out;
5189 	}
5190 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5191 	/* Have you have freed enough to look */
5192 	*freed_so_far = 0;
5193 	/* Yep, its worth a look and the lock overhead */
5194 
5195 	/* Figure out what the rwnd would be */
5196 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5197 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5198 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5199 	} else {
5200 		dif = 0;
5201 	}
5202 	if (dif >= rwnd_req) {
5203 		if (hold_rlock) {
5204 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5205 			r_unlocked = 1;
5206 		}
5207 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5208 			/*
5209 			 * One last check before we allow the guy possibly
5210 			 * to get in. There is a race, where the guy has not
5211 			 * reached the gate. In that case
5212 			 */
5213 			goto out;
5214 		}
5215 		SCTP_TCB_LOCK(stcb);
5216 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5217 			/* No reports here */
5218 			SCTP_TCB_UNLOCK(stcb);
5219 			goto out;
5220 		}
5221 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5222 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5223 
5224 		sctp_chunk_output(stcb->sctp_ep, stcb,
5225 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5226 		/* make sure no timer is running */
5227 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5228 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5229 		SCTP_TCB_UNLOCK(stcb);
5230 	} else {
5231 		/* Update how much we have pending */
5232 		stcb->freed_by_sorcv_sincelast = dif;
5233 	}
5234 out:
5235 	if (so && r_unlocked && hold_rlock) {
5236 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5237 	}
5238 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5239 no_lock:
5240 	atomic_add_int(&stcb->asoc.refcnt, -1);
5241 	return;
5242 }
5243 
5244 int
5245 sctp_sorecvmsg(struct socket *so,
5246     struct uio *uio,
5247     struct mbuf **mp,
5248     struct sockaddr *from,
5249     int fromlen,
5250     int *msg_flags,
5251     struct sctp_sndrcvinfo *sinfo,
5252     int filling_sinfo)
5253 {
5254 	/*
5255 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5256 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5257 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5258 	 * On the way out we may send out any combination of:
5259 	 * MSG_NOTIFICATION MSG_EOR
5260 	 *
5261 	 */
5262 	struct sctp_inpcb *inp = NULL;
5263 	int my_len = 0;
5264 	int cp_len = 0, error = 0;
5265 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5266 	struct mbuf *m = NULL;
5267 	struct sctp_tcb *stcb = NULL;
5268 	int wakeup_read_socket = 0;
5269 	int freecnt_applied = 0;
5270 	int out_flags = 0, in_flags = 0;
5271 	int block_allowed = 1;
5272 	uint32_t freed_so_far = 0;
5273 	uint32_t copied_so_far = 0;
5274 	int in_eeor_mode = 0;
5275 	int no_rcv_needed = 0;
5276 	uint32_t rwnd_req = 0;
5277 	int hold_sblock = 0;
5278 	int hold_rlock = 0;
5279 	int slen = 0;
5280 	uint32_t held_length = 0;
5281 	int sockbuf_lock = 0;
5282 
5283 	if (uio == NULL) {
5284 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5285 		return (EINVAL);
5286 	}
5287 	if (msg_flags) {
5288 		in_flags = *msg_flags;
5289 		if (in_flags & MSG_PEEK)
5290 			SCTP_STAT_INCR(sctps_read_peeks);
5291 	} else {
5292 		in_flags = 0;
5293 	}
5294 	slen = uio->uio_resid;
5295 
5296 	/* Pull in and set up our int flags */
5297 	if (in_flags & MSG_OOB) {
5298 		/* Out of band's NOT supported */
5299 		return (EOPNOTSUPP);
5300 	}
5301 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5302 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5303 		return (EINVAL);
5304 	}
5305 	if ((in_flags & (MSG_DONTWAIT
5306 	    | MSG_NBIO
5307 	    )) ||
5308 	    SCTP_SO_IS_NBIO(so)) {
5309 		block_allowed = 0;
5310 	}
5311 	/* setup the endpoint */
5312 	inp = (struct sctp_inpcb *)so->so_pcb;
5313 	if (inp == NULL) {
5314 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5315 		return (EFAULT);
5316 	}
5317 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5318 	/* Must be at least a MTU's worth */
5319 	if (rwnd_req < SCTP_MIN_RWND)
5320 		rwnd_req = SCTP_MIN_RWND;
5321 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5322 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5323 		sctp_misc_ints(SCTP_SORECV_ENTER,
5324 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5325 	}
5326 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5327 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5328 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5329 	}
5330 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5331 	if (error) {
5332 		goto release_unlocked;
5333 	}
5334 	sockbuf_lock = 1;
5335 restart:
5336 
5337 
5338 restart_nosblocks:
5339 	if (hold_sblock == 0) {
5340 		SOCKBUF_LOCK(&so->so_rcv);
5341 		hold_sblock = 1;
5342 	}
5343 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5344 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5345 		goto out;
5346 	}
5347 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5348 		if (so->so_error) {
5349 			error = so->so_error;
5350 			if ((in_flags & MSG_PEEK) == 0)
5351 				so->so_error = 0;
5352 			goto out;
5353 		} else {
5354 			if (so->so_rcv.sb_cc == 0) {
5355 				/* indicate EOF */
5356 				error = 0;
5357 				goto out;
5358 			}
5359 		}
5360 	}
5361 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5362 		/* we need to wait for data */
5363 		if ((so->so_rcv.sb_cc == 0) &&
5364 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5365 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5366 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5367 				/*
5368 				 * For active open side clear flags for
5369 				 * re-use passive open is blocked by
5370 				 * connect.
5371 				 */
5372 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5373 					/*
5374 					 * You were aborted, passive side
5375 					 * always hits here
5376 					 */
5377 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5378 					error = ECONNRESET;
5379 				}
5380 				so->so_state &= ~(SS_ISCONNECTING |
5381 				    SS_ISDISCONNECTING |
5382 				    SS_ISCONFIRMING |
5383 				    SS_ISCONNECTED);
5384 				if (error == 0) {
5385 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5386 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5387 						error = ENOTCONN;
5388 					}
5389 				}
5390 				goto out;
5391 			}
5392 		}
5393 		error = sbwait(&so->so_rcv);
5394 		if (error) {
5395 			goto out;
5396 		}
5397 		held_length = 0;
5398 		goto restart_nosblocks;
5399 	} else if (so->so_rcv.sb_cc == 0) {
5400 		if (so->so_error) {
5401 			error = so->so_error;
5402 			if ((in_flags & MSG_PEEK) == 0)
5403 				so->so_error = 0;
5404 		} else {
5405 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5406 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5407 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5408 					/*
5409 					 * For active open side clear flags
5410 					 * for re-use passive open is
5411 					 * blocked by connect.
5412 					 */
5413 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5414 						/*
5415 						 * You were aborted, passive
5416 						 * side always hits here
5417 						 */
5418 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5419 						error = ECONNRESET;
5420 					}
5421 					so->so_state &= ~(SS_ISCONNECTING |
5422 					    SS_ISDISCONNECTING |
5423 					    SS_ISCONFIRMING |
5424 					    SS_ISCONNECTED);
5425 					if (error == 0) {
5426 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5427 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5428 							error = ENOTCONN;
5429 						}
5430 					}
5431 					goto out;
5432 				}
5433 			}
5434 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5435 			error = EWOULDBLOCK;
5436 		}
5437 		goto out;
5438 	}
5439 	if (hold_sblock == 1) {
5440 		SOCKBUF_UNLOCK(&so->so_rcv);
5441 		hold_sblock = 0;
5442 	}
5443 	/* we possibly have data we can read */
5444 	/* sa_ignore FREED_MEMORY */
5445 	control = TAILQ_FIRST(&inp->read_queue);
5446 	if (control == NULL) {
5447 		/*
5448 		 * This could be happening since the appender did the
5449 		 * increment but as not yet did the tailq insert onto the
5450 		 * read_queue
5451 		 */
5452 		if (hold_rlock == 0) {
5453 			SCTP_INP_READ_LOCK(inp);
5454 		}
5455 		control = TAILQ_FIRST(&inp->read_queue);
5456 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5457 #ifdef INVARIANTS
5458 			panic("Huh, its non zero and nothing on control?");
5459 #endif
5460 			so->so_rcv.sb_cc = 0;
5461 		}
5462 		SCTP_INP_READ_UNLOCK(inp);
5463 		hold_rlock = 0;
5464 		goto restart;
5465 	}
5466 	if ((control->length == 0) &&
5467 	    (control->do_not_ref_stcb)) {
5468 		/*
5469 		 * Clean up code for freeing assoc that left behind a
5470 		 * pdapi.. maybe a peer in EEOR that just closed after
5471 		 * sending and never indicated a EOR.
5472 		 */
5473 		if (hold_rlock == 0) {
5474 			hold_rlock = 1;
5475 			SCTP_INP_READ_LOCK(inp);
5476 		}
5477 		control->held_length = 0;
5478 		if (control->data) {
5479 			/* Hmm there is data here .. fix */
5480 			struct mbuf *m_tmp;
5481 			int cnt = 0;
5482 
5483 			m_tmp = control->data;
5484 			while (m_tmp) {
5485 				cnt += SCTP_BUF_LEN(m_tmp);
5486 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5487 					control->tail_mbuf = m_tmp;
5488 					control->end_added = 1;
5489 				}
5490 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5491 			}
5492 			control->length = cnt;
5493 		} else {
5494 			/* remove it */
5495 			TAILQ_REMOVE(&inp->read_queue, control, next);
5496 			/* Add back any hiddend data */
5497 			sctp_free_remote_addr(control->whoFrom);
5498 			sctp_free_a_readq(stcb, control);
5499 		}
5500 		if (hold_rlock) {
5501 			hold_rlock = 0;
5502 			SCTP_INP_READ_UNLOCK(inp);
5503 		}
5504 		goto restart;
5505 	}
5506 	if ((control->length == 0) &&
5507 	    (control->end_added == 1)) {
5508 		/*
5509 		 * Do we also need to check for (control->pdapi_aborted ==
5510 		 * 1)?
5511 		 */
5512 		if (hold_rlock == 0) {
5513 			hold_rlock = 1;
5514 			SCTP_INP_READ_LOCK(inp);
5515 		}
5516 		TAILQ_REMOVE(&inp->read_queue, control, next);
5517 		if (control->data) {
5518 #ifdef INVARIANTS
5519 			panic("control->data not null but control->length == 0");
5520 #else
5521 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5522 			sctp_m_freem(control->data);
5523 			control->data = NULL;
5524 #endif
5525 		}
5526 		if (control->aux_data) {
5527 			sctp_m_free(control->aux_data);
5528 			control->aux_data = NULL;
5529 		}
5530 		sctp_free_remote_addr(control->whoFrom);
5531 		sctp_free_a_readq(stcb, control);
5532 		if (hold_rlock) {
5533 			hold_rlock = 0;
5534 			SCTP_INP_READ_UNLOCK(inp);
5535 		}
5536 		goto restart;
5537 	}
5538 	if (control->length == 0) {
5539 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5540 		    (filling_sinfo)) {
5541 			/* find a more suitable one then this */
5542 			ctl = TAILQ_NEXT(control, next);
5543 			while (ctl) {
5544 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5545 				    (ctl->some_taken ||
5546 				    (ctl->spec_flags & M_NOTIFICATION) ||
5547 				    ((ctl->do_not_ref_stcb == 0) &&
5548 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5549 				    ) {
5550 					/*-
5551 					 * If we have a different TCB next, and there is data
5552 					 * present. If we have already taken some (pdapi), OR we can
5553 					 * ref the tcb and no delivery as started on this stream, we
5554 					 * take it. Note we allow a notification on a different
5555 					 * assoc to be delivered..
5556 					 */
5557 					control = ctl;
5558 					goto found_one;
5559 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5560 					    (ctl->length) &&
5561 					    ((ctl->some_taken) ||
5562 					    ((ctl->do_not_ref_stcb == 0) &&
5563 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5564 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5565 					/*-
5566 					 * If we have the same tcb, and there is data present, and we
5567 					 * have the strm interleave feature present. Then if we have
5568 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5569 					 * not started a delivery for this stream, we can take it.
5570 					 * Note we do NOT allow a notificaiton on the same assoc to
5571 					 * be delivered.
5572 					 */
5573 					control = ctl;
5574 					goto found_one;
5575 				}
5576 				ctl = TAILQ_NEXT(ctl, next);
5577 			}
5578 		}
5579 		/*
5580 		 * if we reach here, not suitable replacement is available
5581 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5582 		 * into the our held count, and its time to sleep again.
5583 		 */
5584 		held_length = so->so_rcv.sb_cc;
5585 		control->held_length = so->so_rcv.sb_cc;
5586 		goto restart;
5587 	}
5588 	/* Clear the held length since there is something to read */
5589 	control->held_length = 0;
5590 	if (hold_rlock) {
5591 		SCTP_INP_READ_UNLOCK(inp);
5592 		hold_rlock = 0;
5593 	}
5594 found_one:
5595 	/*
5596 	 * If we reach here, control has a some data for us to read off.
5597 	 * Note that stcb COULD be NULL.
5598 	 */
5599 	control->some_taken++;
5600 	if (hold_sblock) {
5601 		SOCKBUF_UNLOCK(&so->so_rcv);
5602 		hold_sblock = 0;
5603 	}
5604 	stcb = control->stcb;
5605 	if (stcb) {
5606 		if ((control->do_not_ref_stcb == 0) &&
5607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5608 			if (freecnt_applied == 0)
5609 				stcb = NULL;
5610 		} else if (control->do_not_ref_stcb == 0) {
5611 			/* you can't free it on me please */
5612 			/*
5613 			 * The lock on the socket buffer protects us so the
5614 			 * free code will stop. But since we used the
5615 			 * socketbuf lock and the sender uses the tcb_lock
5616 			 * to increment, we need to use the atomic add to
5617 			 * the refcnt
5618 			 */
5619 			if (freecnt_applied) {
5620 #ifdef INVARIANTS
5621 				panic("refcnt already incremented");
5622 #else
5623 				SCTP_PRINTF("refcnt already incremented?\n");
5624 #endif
5625 			} else {
5626 				atomic_add_int(&stcb->asoc.refcnt, 1);
5627 				freecnt_applied = 1;
5628 			}
5629 			/*
5630 			 * Setup to remember how much we have not yet told
5631 			 * the peer our rwnd has opened up. Note we grab the
5632 			 * value from the tcb from last time. Note too that
5633 			 * sack sending clears this when a sack is sent,
5634 			 * which is fine. Once we hit the rwnd_req, we then
5635 			 * will go to the sctp_user_rcvd() that will not
5636 			 * lock until it KNOWs it MUST send a WUP-SACK.
5637 			 */
5638 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5639 			stcb->freed_by_sorcv_sincelast = 0;
5640 		}
5641 	}
5642 	if (stcb &&
5643 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5644 	    control->do_not_ref_stcb == 0) {
5645 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5646 	}
5647 	/* First lets get off the sinfo and sockaddr info */
5648 	if ((sinfo) && filling_sinfo) {
5649 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5650 		nxt = TAILQ_NEXT(control, next);
5651 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5652 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5653 			struct sctp_extrcvinfo *s_extra;
5654 
5655 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5656 			if ((nxt) &&
5657 			    (nxt->length)) {
5658 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5659 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5660 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5661 				}
5662 				if (nxt->spec_flags & M_NOTIFICATION) {
5663 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5664 				}
5665 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5666 				s_extra->serinfo_next_length = nxt->length;
5667 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5668 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5669 				if (nxt->tail_mbuf != NULL) {
5670 					if (nxt->end_added) {
5671 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5672 					}
5673 				}
5674 			} else {
5675 				/*
5676 				 * we explicitly 0 this, since the memcpy
5677 				 * got some other things beyond the older
5678 				 * sinfo_ that is on the control's structure
5679 				 * :-D
5680 				 */
5681 				nxt = NULL;
5682 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5683 				s_extra->serinfo_next_aid = 0;
5684 				s_extra->serinfo_next_length = 0;
5685 				s_extra->serinfo_next_ppid = 0;
5686 				s_extra->serinfo_next_stream = 0;
5687 			}
5688 		}
5689 		/*
5690 		 * update off the real current cum-ack, if we have an stcb.
5691 		 */
5692 		if ((control->do_not_ref_stcb == 0) && stcb)
5693 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5694 		/*
5695 		 * mask off the high bits, we keep the actual chunk bits in
5696 		 * there.
5697 		 */
5698 		sinfo->sinfo_flags &= 0x00ff;
5699 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5700 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5701 		}
5702 	}
5703 #ifdef SCTP_ASOCLOG_OF_TSNS
5704 	{
5705 		int index, newindex;
5706 		struct sctp_pcbtsn_rlog *entry;
5707 
5708 		do {
5709 			index = inp->readlog_index;
5710 			newindex = index + 1;
5711 			if (newindex >= SCTP_READ_LOG_SIZE) {
5712 				newindex = 0;
5713 			}
5714 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5715 		entry = &inp->readlog[index];
5716 		entry->vtag = control->sinfo_assoc_id;
5717 		entry->strm = control->sinfo_stream;
5718 		entry->seq = control->sinfo_ssn;
5719 		entry->sz = control->length;
5720 		entry->flgs = control->sinfo_flags;
5721 	}
5722 #endif
5723 	if ((fromlen > 0) && (from != NULL)) {
5724 		union sctp_sockstore store;
5725 		size_t len;
5726 
5727 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5728 #ifdef INET6
5729 		case AF_INET6:
5730 			len = sizeof(struct sockaddr_in6);
5731 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5732 			store.sin6.sin6_port = control->port_from;
5733 			break;
5734 #endif
5735 #ifdef INET
5736 		case AF_INET:
5737 #ifdef INET6
5738 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5739 				len = sizeof(struct sockaddr_in6);
5740 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5741 				    &store.sin6);
5742 				store.sin6.sin6_port = control->port_from;
5743 			} else {
5744 				len = sizeof(struct sockaddr_in);
5745 				store.sin = control->whoFrom->ro._l_addr.sin;
5746 				store.sin.sin_port = control->port_from;
5747 			}
5748 #else
5749 			len = sizeof(struct sockaddr_in);
5750 			store.sin = control->whoFrom->ro._l_addr.sin;
5751 			store.sin.sin_port = control->port_from;
5752 #endif
5753 			break;
5754 #endif
5755 		default:
5756 			len = 0;
5757 			break;
5758 		}
5759 		memcpy(from, &store, min((size_t)fromlen, len));
5760 #ifdef INET6
5761 		{
5762 			struct sockaddr_in6 lsa6, *from6;
5763 
5764 			from6 = (struct sockaddr_in6 *)from;
5765 			sctp_recover_scope_mac(from6, (&lsa6));
5766 		}
5767 #endif
5768 	}
5769 	/* now copy out what data we can */
5770 	if (mp == NULL) {
5771 		/* copy out each mbuf in the chain up to length */
5772 get_more_data:
5773 		m = control->data;
5774 		while (m) {
5775 			/* Move out all we can */
5776 			cp_len = (int)uio->uio_resid;
5777 			my_len = (int)SCTP_BUF_LEN(m);
5778 			if (cp_len > my_len) {
5779 				/* not enough in this buf */
5780 				cp_len = my_len;
5781 			}
5782 			if (hold_rlock) {
5783 				SCTP_INP_READ_UNLOCK(inp);
5784 				hold_rlock = 0;
5785 			}
5786 			if (cp_len > 0)
5787 				error = uiomove(mtod(m, char *), cp_len, uio);
5788 			/* re-read */
5789 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5790 				goto release;
5791 			}
5792 			if ((control->do_not_ref_stcb == 0) && stcb &&
5793 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5794 				no_rcv_needed = 1;
5795 			}
5796 			if (error) {
5797 				/* error we are out of here */
5798 				goto release;
5799 			}
5800 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5801 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5802 			    ((control->end_added == 0) ||
5803 			    (control->end_added &&
5804 			    (TAILQ_NEXT(control, next) == NULL)))
5805 			    ) {
5806 				SCTP_INP_READ_LOCK(inp);
5807 				hold_rlock = 1;
5808 			}
5809 			if (cp_len == SCTP_BUF_LEN(m)) {
5810 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5811 				    (control->end_added)) {
5812 					out_flags |= MSG_EOR;
5813 					if ((control->do_not_ref_stcb == 0) &&
5814 					    (control->stcb != NULL) &&
5815 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5816 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5817 				}
5818 				if (control->spec_flags & M_NOTIFICATION) {
5819 					out_flags |= MSG_NOTIFICATION;
5820 				}
5821 				/* we ate up the mbuf */
5822 				if (in_flags & MSG_PEEK) {
5823 					/* just looking */
5824 					m = SCTP_BUF_NEXT(m);
5825 					copied_so_far += cp_len;
5826 				} else {
5827 					/* dispose of the mbuf */
5828 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5829 						sctp_sblog(&so->so_rcv,
5830 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5831 					}
5832 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5833 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5834 						sctp_sblog(&so->so_rcv,
5835 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5836 					}
5837 					copied_so_far += cp_len;
5838 					freed_so_far += cp_len;
5839 					freed_so_far += MSIZE;
5840 					atomic_subtract_int(&control->length, cp_len);
5841 					control->data = sctp_m_free(m);
5842 					m = control->data;
5843 					/*
5844 					 * been through it all, must hold sb
5845 					 * lock ok to null tail
5846 					 */
5847 					if (control->data == NULL) {
5848 #ifdef INVARIANTS
5849 						if ((control->end_added == 0) ||
5850 						    (TAILQ_NEXT(control, next) == NULL)) {
5851 							/*
5852 							 * If the end is not
5853 							 * added, OR the
5854 							 * next is NOT null
5855 							 * we MUST have the
5856 							 * lock.
5857 							 */
5858 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5859 								panic("Hmm we don't own the lock?");
5860 							}
5861 						}
5862 #endif
5863 						control->tail_mbuf = NULL;
5864 #ifdef INVARIANTS
5865 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5866 							panic("end_added, nothing left and no MSG_EOR");
5867 						}
5868 #endif
5869 					}
5870 				}
5871 			} else {
5872 				/* Do we need to trim the mbuf? */
5873 				if (control->spec_flags & M_NOTIFICATION) {
5874 					out_flags |= MSG_NOTIFICATION;
5875 				}
5876 				if ((in_flags & MSG_PEEK) == 0) {
5877 					SCTP_BUF_RESV_UF(m, cp_len);
5878 					SCTP_BUF_LEN(m) -= cp_len;
5879 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5880 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5881 					}
5882 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5883 					if ((control->do_not_ref_stcb == 0) &&
5884 					    stcb) {
5885 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5886 					}
5887 					copied_so_far += cp_len;
5888 					freed_so_far += cp_len;
5889 					freed_so_far += MSIZE;
5890 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5891 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5892 						    SCTP_LOG_SBRESULT, 0);
5893 					}
5894 					atomic_subtract_int(&control->length, cp_len);
5895 				} else {
5896 					copied_so_far += cp_len;
5897 				}
5898 			}
5899 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5900 				break;
5901 			}
5902 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5903 			    (control->do_not_ref_stcb == 0) &&
5904 			    (freed_so_far >= rwnd_req)) {
5905 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5906 			}
5907 		}		/* end while(m) */
5908 		/*
5909 		 * At this point we have looked at it all and we either have
5910 		 * a MSG_EOR/or read all the user wants... <OR>
5911 		 * control->length == 0.
5912 		 */
5913 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5914 			/* we are done with this control */
5915 			if (control->length == 0) {
5916 				if (control->data) {
5917 #ifdef INVARIANTS
5918 					panic("control->data not null at read eor?");
5919 #else
5920 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5921 					sctp_m_freem(control->data);
5922 					control->data = NULL;
5923 #endif
5924 				}
5925 		done_with_control:
5926 				if (TAILQ_NEXT(control, next) == NULL) {
5927 					/*
5928 					 * If we don't have a next we need a
5929 					 * lock, if there is a next
5930 					 * interrupt is filling ahead of us
5931 					 * and we don't need a lock to
5932 					 * remove this guy (which is the
5933 					 * head of the queue).
5934 					 */
5935 					if (hold_rlock == 0) {
5936 						SCTP_INP_READ_LOCK(inp);
5937 						hold_rlock = 1;
5938 					}
5939 				}
5940 				TAILQ_REMOVE(&inp->read_queue, control, next);
5941 				/* Add back any hiddend data */
5942 				if (control->held_length) {
5943 					held_length = 0;
5944 					control->held_length = 0;
5945 					wakeup_read_socket = 1;
5946 				}
5947 				if (control->aux_data) {
5948 					sctp_m_free(control->aux_data);
5949 					control->aux_data = NULL;
5950 				}
5951 				no_rcv_needed = control->do_not_ref_stcb;
5952 				sctp_free_remote_addr(control->whoFrom);
5953 				control->data = NULL;
5954 				sctp_free_a_readq(stcb, control);
5955 				control = NULL;
5956 				if ((freed_so_far >= rwnd_req) &&
5957 				    (no_rcv_needed == 0))
5958 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5959 
5960 			} else {
5961 				/*
5962 				 * The user did not read all of this
5963 				 * message, turn off the returned MSG_EOR
5964 				 * since we are leaving more behind on the
5965 				 * control to read.
5966 				 */
5967 #ifdef INVARIANTS
5968 				if (control->end_added &&
5969 				    (control->data == NULL) &&
5970 				    (control->tail_mbuf == NULL)) {
5971 					panic("Gak, control->length is corrupt?");
5972 				}
5973 #endif
5974 				no_rcv_needed = control->do_not_ref_stcb;
5975 				out_flags &= ~MSG_EOR;
5976 			}
5977 		}
5978 		if (out_flags & MSG_EOR) {
5979 			goto release;
5980 		}
5981 		if ((uio->uio_resid == 0) ||
5982 		    ((in_eeor_mode) &&
5983 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5984 			goto release;
5985 		}
5986 		/*
5987 		 * If I hit here the receiver wants more and this message is
5988 		 * NOT done (pd-api). So two questions. Can we block? if not
5989 		 * we are done. Did the user NOT set MSG_WAITALL?
5990 		 */
5991 		if (block_allowed == 0) {
5992 			goto release;
5993 		}
5994 		/*
5995 		 * We need to wait for more data a few things: - We don't
5996 		 * sbunlock() so we don't get someone else reading. - We
5997 		 * must be sure to account for the case where what is added
5998 		 * is NOT to our control when we wakeup.
5999 		 */
6000 
6001 		/*
6002 		 * Do we need to tell the transport a rwnd update might be
6003 		 * needed before we go to sleep?
6004 		 */
6005 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6006 		    ((freed_so_far >= rwnd_req) &&
6007 		    (control->do_not_ref_stcb == 0) &&
6008 		    (no_rcv_needed == 0))) {
6009 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6010 		}
6011 wait_some_more:
6012 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6013 			goto release;
6014 		}
6015 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6016 			goto release;
6017 
6018 		if (hold_rlock == 1) {
6019 			SCTP_INP_READ_UNLOCK(inp);
6020 			hold_rlock = 0;
6021 		}
6022 		if (hold_sblock == 0) {
6023 			SOCKBUF_LOCK(&so->so_rcv);
6024 			hold_sblock = 1;
6025 		}
6026 		if ((copied_so_far) && (control->length == 0) &&
6027 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6028 			goto release;
6029 		}
6030 		if (so->so_rcv.sb_cc <= control->held_length) {
6031 			error = sbwait(&so->so_rcv);
6032 			if (error) {
6033 				goto release;
6034 			}
6035 			control->held_length = 0;
6036 		}
6037 		if (hold_sblock) {
6038 			SOCKBUF_UNLOCK(&so->so_rcv);
6039 			hold_sblock = 0;
6040 		}
6041 		if (control->length == 0) {
6042 			/* still nothing here */
6043 			if (control->end_added == 1) {
6044 				/* he aborted, or is done i.e.did a shutdown */
6045 				out_flags |= MSG_EOR;
6046 				if (control->pdapi_aborted) {
6047 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6048 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6049 
6050 					out_flags |= MSG_TRUNC;
6051 				} else {
6052 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6053 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6054 				}
6055 				goto done_with_control;
6056 			}
6057 			if (so->so_rcv.sb_cc > held_length) {
6058 				control->held_length = so->so_rcv.sb_cc;
6059 				held_length = 0;
6060 			}
6061 			goto wait_some_more;
6062 		} else if (control->data == NULL) {
6063 			/*
6064 			 * we must re-sync since data is probably being
6065 			 * added
6066 			 */
6067 			SCTP_INP_READ_LOCK(inp);
6068 			if ((control->length > 0) && (control->data == NULL)) {
6069 				/*
6070 				 * big trouble.. we have the lock and its
6071 				 * corrupt?
6072 				 */
6073 #ifdef INVARIANTS
6074 				panic("Impossible data==NULL length !=0");
6075 #endif
6076 				out_flags |= MSG_EOR;
6077 				out_flags |= MSG_TRUNC;
6078 				control->length = 0;
6079 				SCTP_INP_READ_UNLOCK(inp);
6080 				goto done_with_control;
6081 			}
6082 			SCTP_INP_READ_UNLOCK(inp);
6083 			/* We will fall around to get more data */
6084 		}
6085 		goto get_more_data;
6086 	} else {
6087 		/*-
6088 		 * Give caller back the mbuf chain,
6089 		 * store in uio_resid the length
6090 		 */
6091 		wakeup_read_socket = 0;
6092 		if ((control->end_added == 0) ||
6093 		    (TAILQ_NEXT(control, next) == NULL)) {
6094 			/* Need to get rlock */
6095 			if (hold_rlock == 0) {
6096 				SCTP_INP_READ_LOCK(inp);
6097 				hold_rlock = 1;
6098 			}
6099 		}
6100 		if (control->end_added) {
6101 			out_flags |= MSG_EOR;
6102 			if ((control->do_not_ref_stcb == 0) &&
6103 			    (control->stcb != NULL) &&
6104 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6105 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6106 		}
6107 		if (control->spec_flags & M_NOTIFICATION) {
6108 			out_flags |= MSG_NOTIFICATION;
6109 		}
6110 		uio->uio_resid = control->length;
6111 		*mp = control->data;
6112 		m = control->data;
6113 		while (m) {
6114 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6115 				sctp_sblog(&so->so_rcv,
6116 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6117 			}
6118 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6119 			freed_so_far += SCTP_BUF_LEN(m);
6120 			freed_so_far += MSIZE;
6121 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6122 				sctp_sblog(&so->so_rcv,
6123 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6124 			}
6125 			m = SCTP_BUF_NEXT(m);
6126 		}
6127 		control->data = control->tail_mbuf = NULL;
6128 		control->length = 0;
6129 		if (out_flags & MSG_EOR) {
6130 			/* Done with this control */
6131 			goto done_with_control;
6132 		}
6133 	}
6134 release:
6135 	if (hold_rlock == 1) {
6136 		SCTP_INP_READ_UNLOCK(inp);
6137 		hold_rlock = 0;
6138 	}
6139 	if (hold_sblock == 1) {
6140 		SOCKBUF_UNLOCK(&so->so_rcv);
6141 		hold_sblock = 0;
6142 	}
6143 	sbunlock(&so->so_rcv);
6144 	sockbuf_lock = 0;
6145 
6146 release_unlocked:
6147 	if (hold_sblock) {
6148 		SOCKBUF_UNLOCK(&so->so_rcv);
6149 		hold_sblock = 0;
6150 	}
6151 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6152 		if ((freed_so_far >= rwnd_req) &&
6153 		    (control && (control->do_not_ref_stcb == 0)) &&
6154 		    (no_rcv_needed == 0))
6155 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6156 	}
6157 out:
6158 	if (msg_flags) {
6159 		*msg_flags = out_flags;
6160 	}
6161 	if (((out_flags & MSG_EOR) == 0) &&
6162 	    ((in_flags & MSG_PEEK) == 0) &&
6163 	    (sinfo) &&
6164 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6165 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6166 		struct sctp_extrcvinfo *s_extra;
6167 
6168 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6169 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6170 	}
6171 	if (hold_rlock == 1) {
6172 		SCTP_INP_READ_UNLOCK(inp);
6173 	}
6174 	if (hold_sblock) {
6175 		SOCKBUF_UNLOCK(&so->so_rcv);
6176 	}
6177 	if (sockbuf_lock) {
6178 		sbunlock(&so->so_rcv);
6179 	}
6180 	if (freecnt_applied) {
6181 		/*
6182 		 * The lock on the socket buffer protects us so the free
6183 		 * code will stop. But since we used the socketbuf lock and
6184 		 * the sender uses the tcb_lock to increment, we need to use
6185 		 * the atomic add to the refcnt.
6186 		 */
6187 		if (stcb == NULL) {
6188 #ifdef INVARIANTS
6189 			panic("stcb for refcnt has gone NULL?");
6190 			goto stage_left;
6191 #else
6192 			goto stage_left;
6193 #endif
6194 		}
6195 		atomic_add_int(&stcb->asoc.refcnt, -1);
6196 		/* Save the value back for next time */
6197 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6198 	}
6199 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6200 		if (stcb) {
6201 			sctp_misc_ints(SCTP_SORECV_DONE,
6202 			    freed_so_far,
6203 			    ((uio) ? (slen - uio->uio_resid) : slen),
6204 			    stcb->asoc.my_rwnd,
6205 			    so->so_rcv.sb_cc);
6206 		} else {
6207 			sctp_misc_ints(SCTP_SORECV_DONE,
6208 			    freed_so_far,
6209 			    ((uio) ? (slen - uio->uio_resid) : slen),
6210 			    0,
6211 			    so->so_rcv.sb_cc);
6212 		}
6213 	}
6214 stage_left:
6215 	if (wakeup_read_socket) {
6216 		sctp_sorwakeup(inp, so);
6217 	}
6218 	return (error);
6219 }
6220 
6221 
6222 #ifdef SCTP_MBUF_LOGGING
6223 struct mbuf *
6224 sctp_m_free(struct mbuf *m)
6225 {
6226 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6227 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6228 	}
6229 	return (m_free(m));
6230 }
6231 
6232 void
6233 sctp_m_freem(struct mbuf *mb)
6234 {
6235 	while (mb != NULL)
6236 		mb = sctp_m_free(mb);
6237 }
6238 
6239 #endif
6240 
6241 int
6242 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6243 {
6244 	/*
6245 	 * Given a local address. For all associations that holds the
6246 	 * address, request a peer-set-primary.
6247 	 */
6248 	struct sctp_ifa *ifa;
6249 	struct sctp_laddr *wi;
6250 
6251 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6252 	if (ifa == NULL) {
6253 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6254 		return (EADDRNOTAVAIL);
6255 	}
6256 	/*
6257 	 * Now that we have the ifa we must awaken the iterator with this
6258 	 * message.
6259 	 */
6260 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6261 	if (wi == NULL) {
6262 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6263 		return (ENOMEM);
6264 	}
6265 	/* Now incr the count and int wi structure */
6266 	SCTP_INCR_LADDR_COUNT();
6267 	bzero(wi, sizeof(*wi));
6268 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6269 	wi->ifa = ifa;
6270 	wi->action = SCTP_SET_PRIM_ADDR;
6271 	atomic_add_int(&ifa->refcount, 1);
6272 
6273 	/* Now add it to the work queue */
6274 	SCTP_WQ_ADDR_LOCK();
6275 	/*
6276 	 * Should this really be a tailq? As it is we will process the
6277 	 * newest first :-0
6278 	 */
6279 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6280 	SCTP_WQ_ADDR_UNLOCK();
6281 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6282 	    (struct sctp_inpcb *)NULL,
6283 	    (struct sctp_tcb *)NULL,
6284 	    (struct sctp_nets *)NULL);
6285 	return (0);
6286 }
6287 
6288 
6289 int
6290 sctp_soreceive(struct socket *so,
6291     struct sockaddr **psa,
6292     struct uio *uio,
6293     struct mbuf **mp0,
6294     struct mbuf **controlp,
6295     int *flagsp)
6296 {
6297 	int error, fromlen;
6298 	uint8_t sockbuf[256];
6299 	struct sockaddr *from;
6300 	struct sctp_extrcvinfo sinfo;
6301 	int filling_sinfo = 1;
6302 	struct sctp_inpcb *inp;
6303 
6304 	inp = (struct sctp_inpcb *)so->so_pcb;
6305 	/* pickup the assoc we are reading from */
6306 	if (inp == NULL) {
6307 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6308 		return (EINVAL);
6309 	}
6310 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6311 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6312 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6313 	    (controlp == NULL)) {
6314 		/* user does not want the sndrcv ctl */
6315 		filling_sinfo = 0;
6316 	}
6317 	if (psa) {
6318 		from = (struct sockaddr *)sockbuf;
6319 		fromlen = sizeof(sockbuf);
6320 		from->sa_len = 0;
6321 	} else {
6322 		from = NULL;
6323 		fromlen = 0;
6324 	}
6325 
6326 	if (filling_sinfo) {
6327 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6328 	}
6329 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6330 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6331 	if (controlp != NULL) {
6332 		/* copy back the sinfo in a CMSG format */
6333 		if (filling_sinfo)
6334 			*controlp = sctp_build_ctl_nchunk(inp,
6335 			    (struct sctp_sndrcvinfo *)&sinfo);
6336 		else
6337 			*controlp = NULL;
6338 	}
6339 	if (psa) {
6340 		/* copy back the address info */
6341 		if (from && from->sa_len) {
6342 			*psa = sodupsockaddr(from, M_NOWAIT);
6343 		} else {
6344 			*psa = NULL;
6345 		}
6346 	}
6347 	return (error);
6348 }
6349 
6350 
6351 
6352 
6353 
6354 int
6355 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6356     int totaddr, int *error)
6357 {
6358 	int added = 0;
6359 	int i;
6360 	struct sctp_inpcb *inp;
6361 	struct sockaddr *sa;
6362 	size_t incr = 0;
6363 
6364 #ifdef INET
6365 	struct sockaddr_in *sin;
6366 
6367 #endif
6368 #ifdef INET6
6369 	struct sockaddr_in6 *sin6;
6370 
6371 #endif
6372 
6373 	sa = addr;
6374 	inp = stcb->sctp_ep;
6375 	*error = 0;
6376 	for (i = 0; i < totaddr; i++) {
6377 		switch (sa->sa_family) {
6378 #ifdef INET
6379 		case AF_INET:
6380 			incr = sizeof(struct sockaddr_in);
6381 			sin = (struct sockaddr_in *)sa;
6382 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6383 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6384 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6385 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6386 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6387 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6388 				*error = EINVAL;
6389 				goto out_now;
6390 			}
6391 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6392 				/* assoc gone no un-lock */
6393 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6394 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6395 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6396 				*error = ENOBUFS;
6397 				goto out_now;
6398 			}
6399 			added++;
6400 			break;
6401 #endif
6402 #ifdef INET6
6403 		case AF_INET6:
6404 			incr = sizeof(struct sockaddr_in6);
6405 			sin6 = (struct sockaddr_in6 *)sa;
6406 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6407 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6408 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6409 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6410 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6411 				*error = EINVAL;
6412 				goto out_now;
6413 			}
6414 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6415 				/* assoc gone no un-lock */
6416 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6417 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6418 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6419 				*error = ENOBUFS;
6420 				goto out_now;
6421 			}
6422 			added++;
6423 			break;
6424 #endif
6425 		default:
6426 			break;
6427 		}
6428 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6429 	}
6430 out_now:
6431 	return (added);
6432 }
6433 
6434 struct sctp_tcb *
6435 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6436     int *totaddr, int *num_v4, int *num_v6, int *error,
6437     int limit, int *bad_addr)
6438 {
6439 	struct sockaddr *sa;
6440 	struct sctp_tcb *stcb = NULL;
6441 	size_t incr, at, i;
6442 
6443 	at = incr = 0;
6444 	sa = addr;
6445 
6446 	*error = *num_v6 = *num_v4 = 0;
6447 	/* account and validate addresses */
6448 	for (i = 0; i < (size_t)*totaddr; i++) {
6449 		switch (sa->sa_family) {
6450 #ifdef INET
6451 		case AF_INET:
6452 			(*num_v4) += 1;
6453 			incr = sizeof(struct sockaddr_in);
6454 			if (sa->sa_len != incr) {
6455 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6456 				*error = EINVAL;
6457 				*bad_addr = 1;
6458 				return (NULL);
6459 			}
6460 			break;
6461 #endif
6462 #ifdef INET6
6463 		case AF_INET6:
6464 			{
6465 				struct sockaddr_in6 *sin6;
6466 
6467 				sin6 = (struct sockaddr_in6 *)sa;
6468 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6469 					/* Must be non-mapped for connectx */
6470 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6471 					*error = EINVAL;
6472 					*bad_addr = 1;
6473 					return (NULL);
6474 				}
6475 				(*num_v6) += 1;
6476 				incr = sizeof(struct sockaddr_in6);
6477 				if (sa->sa_len != incr) {
6478 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6479 					*error = EINVAL;
6480 					*bad_addr = 1;
6481 					return (NULL);
6482 				}
6483 				break;
6484 			}
6485 #endif
6486 		default:
6487 			*totaddr = i;
6488 			/* we are done */
6489 			break;
6490 		}
6491 		if (i == (size_t)*totaddr) {
6492 			break;
6493 		}
6494 		SCTP_INP_INCR_REF(inp);
6495 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6496 		if (stcb != NULL) {
6497 			/* Already have or am bring up an association */
6498 			return (stcb);
6499 		} else {
6500 			SCTP_INP_DECR_REF(inp);
6501 		}
6502 		if ((at + incr) > (size_t)limit) {
6503 			*totaddr = i;
6504 			break;
6505 		}
6506 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6507 	}
6508 	return ((struct sctp_tcb *)NULL);
6509 }
6510 
6511 /*
6512  * sctp_bindx(ADD) for one address.
6513  * assumes all arguments are valid/checked by caller.
6514  */
6515 void
6516 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6517     struct sockaddr *sa, sctp_assoc_t assoc_id,
6518     uint32_t vrf_id, int *error, void *p)
6519 {
6520 	struct sockaddr *addr_touse;
6521 
6522 #if defined(INET) && defined(INET6)
6523 	struct sockaddr_in sin;
6524 
6525 #endif
6526 
6527 	/* see if we're bound all already! */
6528 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6529 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6530 		*error = EINVAL;
6531 		return;
6532 	}
6533 	addr_touse = sa;
6534 #ifdef INET6
6535 	if (sa->sa_family == AF_INET6) {
6536 #ifdef INET
6537 		struct sockaddr_in6 *sin6;
6538 
6539 #endif
6540 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6541 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6542 			*error = EINVAL;
6543 			return;
6544 		}
6545 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6546 			/* can only bind v6 on PF_INET6 sockets */
6547 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 			*error = EINVAL;
6549 			return;
6550 		}
6551 #ifdef INET
6552 		sin6 = (struct sockaddr_in6 *)addr_touse;
6553 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6554 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6555 			    SCTP_IPV6_V6ONLY(inp)) {
6556 				/* can't bind v4-mapped on PF_INET sockets */
6557 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6558 				*error = EINVAL;
6559 				return;
6560 			}
6561 			in6_sin6_2_sin(&sin, sin6);
6562 			addr_touse = (struct sockaddr *)&sin;
6563 		}
6564 #endif
6565 	}
6566 #endif
6567 #ifdef INET
6568 	if (sa->sa_family == AF_INET) {
6569 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6570 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6571 			*error = EINVAL;
6572 			return;
6573 		}
6574 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6575 		    SCTP_IPV6_V6ONLY(inp)) {
6576 			/* can't bind v4 on PF_INET sockets */
6577 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6578 			*error = EINVAL;
6579 			return;
6580 		}
6581 	}
6582 #endif
6583 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6584 		if (p == NULL) {
6585 			/* Can't get proc for Net/Open BSD */
6586 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6587 			*error = EINVAL;
6588 			return;
6589 		}
6590 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6591 		return;
6592 	}
6593 	/*
6594 	 * No locks required here since bind and mgmt_ep_sa all do their own
6595 	 * locking. If we do something for the FIX: below we may need to
6596 	 * lock in that case.
6597 	 */
6598 	if (assoc_id == 0) {
6599 		/* add the address */
6600 		struct sctp_inpcb *lep;
6601 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6602 
6603 		/* validate the incoming port */
6604 		if ((lsin->sin_port != 0) &&
6605 		    (lsin->sin_port != inp->sctp_lport)) {
6606 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6607 			*error = EINVAL;
6608 			return;
6609 		} else {
6610 			/* user specified 0 port, set it to existing port */
6611 			lsin->sin_port = inp->sctp_lport;
6612 		}
6613 
6614 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6615 		if (lep != NULL) {
6616 			/*
6617 			 * We must decrement the refcount since we have the
6618 			 * ep already and are binding. No remove going on
6619 			 * here.
6620 			 */
6621 			SCTP_INP_DECR_REF(lep);
6622 		}
6623 		if (lep == inp) {
6624 			/* already bound to it.. ok */
6625 			return;
6626 		} else if (lep == NULL) {
6627 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6628 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6629 			    SCTP_ADD_IP_ADDRESS,
6630 			    vrf_id, NULL);
6631 		} else {
6632 			*error = EADDRINUSE;
6633 		}
6634 		if (*error)
6635 			return;
6636 	} else {
6637 		/*
6638 		 * FIX: decide whether we allow assoc based bindx
6639 		 */
6640 	}
6641 }
6642 
6643 /*
6644  * sctp_bindx(DELETE) for one address.
6645  * assumes all arguments are valid/checked by caller.
6646  */
6647 void
6648 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6649     struct sockaddr *sa, sctp_assoc_t assoc_id,
6650     uint32_t vrf_id, int *error)
6651 {
6652 	struct sockaddr *addr_touse;
6653 
6654 #if defined(INET) && defined(INET6)
6655 	struct sockaddr_in sin;
6656 
6657 #endif
6658 
6659 	/* see if we're bound all already! */
6660 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6661 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6662 		*error = EINVAL;
6663 		return;
6664 	}
6665 	addr_touse = sa;
6666 #ifdef INET6
6667 	if (sa->sa_family == AF_INET6) {
6668 #ifdef INET
6669 		struct sockaddr_in6 *sin6;
6670 
6671 #endif
6672 
6673 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6674 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6675 			*error = EINVAL;
6676 			return;
6677 		}
6678 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6679 			/* can only bind v6 on PF_INET6 sockets */
6680 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6681 			*error = EINVAL;
6682 			return;
6683 		}
6684 #ifdef INET
6685 		sin6 = (struct sockaddr_in6 *)addr_touse;
6686 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6687 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6688 			    SCTP_IPV6_V6ONLY(inp)) {
6689 				/* can't bind mapped-v4 on PF_INET sockets */
6690 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6691 				*error = EINVAL;
6692 				return;
6693 			}
6694 			in6_sin6_2_sin(&sin, sin6);
6695 			addr_touse = (struct sockaddr *)&sin;
6696 		}
6697 #endif
6698 	}
6699 #endif
6700 #ifdef INET
6701 	if (sa->sa_family == AF_INET) {
6702 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6703 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6704 			*error = EINVAL;
6705 			return;
6706 		}
6707 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6708 		    SCTP_IPV6_V6ONLY(inp)) {
6709 			/* can't bind v4 on PF_INET sockets */
6710 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6711 			*error = EINVAL;
6712 			return;
6713 		}
6714 	}
6715 #endif
6716 	/*
6717 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6718 	 * below is ever changed we may need to lock before calling
6719 	 * association level binding.
6720 	 */
6721 	if (assoc_id == 0) {
6722 		/* delete the address */
6723 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6724 		    SCTP_DEL_IP_ADDRESS,
6725 		    vrf_id, NULL);
6726 	} else {
6727 		/*
6728 		 * FIX: decide whether we allow assoc based bindx
6729 		 */
6730 	}
6731 }
6732 
6733 /*
6734  * returns the valid local address count for an assoc, taking into account
6735  * all scoping rules
6736  */
6737 int
6738 sctp_local_addr_count(struct sctp_tcb *stcb)
6739 {
6740 	int loopback_scope;
6741 
6742 #if defined(INET)
6743 	int ipv4_local_scope, ipv4_addr_legal;
6744 
6745 #endif
6746 #if defined (INET6)
6747 	int local_scope, site_scope, ipv6_addr_legal;
6748 
6749 #endif
6750 	struct sctp_vrf *vrf;
6751 	struct sctp_ifn *sctp_ifn;
6752 	struct sctp_ifa *sctp_ifa;
6753 	int count = 0;
6754 
6755 	/* Turn on all the appropriate scopes */
6756 	loopback_scope = stcb->asoc.scope.loopback_scope;
6757 #if defined(INET)
6758 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6759 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6760 #endif
6761 #if defined(INET6)
6762 	local_scope = stcb->asoc.scope.local_scope;
6763 	site_scope = stcb->asoc.scope.site_scope;
6764 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6765 #endif
6766 	SCTP_IPI_ADDR_RLOCK();
6767 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6768 	if (vrf == NULL) {
6769 		/* no vrf, no addresses */
6770 		SCTP_IPI_ADDR_RUNLOCK();
6771 		return (0);
6772 	}
6773 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6774 		/*
6775 		 * bound all case: go through all ifns on the vrf
6776 		 */
6777 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6778 			if ((loopback_scope == 0) &&
6779 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6780 				continue;
6781 			}
6782 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6783 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6784 					continue;
6785 				switch (sctp_ifa->address.sa.sa_family) {
6786 #ifdef INET
6787 				case AF_INET:
6788 					if (ipv4_addr_legal) {
6789 						struct sockaddr_in *sin;
6790 
6791 						sin = &sctp_ifa->address.sin;
6792 						if (sin->sin_addr.s_addr == 0) {
6793 							/*
6794 							 * skip unspecified
6795 							 * addrs
6796 							 */
6797 							continue;
6798 						}
6799 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6800 						    &sin->sin_addr) != 0) {
6801 							continue;
6802 						}
6803 						if ((ipv4_local_scope == 0) &&
6804 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6805 							continue;
6806 						}
6807 						/* count this one */
6808 						count++;
6809 					} else {
6810 						continue;
6811 					}
6812 					break;
6813 #endif
6814 #ifdef INET6
6815 				case AF_INET6:
6816 					if (ipv6_addr_legal) {
6817 						struct sockaddr_in6 *sin6;
6818 
6819 						sin6 = &sctp_ifa->address.sin6;
6820 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6821 							continue;
6822 						}
6823 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6824 						    &sin6->sin6_addr) != 0) {
6825 							continue;
6826 						}
6827 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6828 							if (local_scope == 0)
6829 								continue;
6830 							if (sin6->sin6_scope_id == 0) {
6831 								if (sa6_recoverscope(sin6) != 0)
6832 									/*
6833 									 *
6834 									 * bad
6835 									 *
6836 									 * li
6837 									 * nk
6838 									 *
6839 									 * loc
6840 									 * al
6841 									 *
6842 									 * add
6843 									 * re
6844 									 * ss
6845 									 * */
6846 									continue;
6847 							}
6848 						}
6849 						if ((site_scope == 0) &&
6850 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6851 							continue;
6852 						}
6853 						/* count this one */
6854 						count++;
6855 					}
6856 					break;
6857 #endif
6858 				default:
6859 					/* TSNH */
6860 					break;
6861 				}
6862 			}
6863 		}
6864 	} else {
6865 		/*
6866 		 * subset bound case
6867 		 */
6868 		struct sctp_laddr *laddr;
6869 
6870 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6871 		    sctp_nxt_addr) {
6872 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6873 				continue;
6874 			}
6875 			/* count this one */
6876 			count++;
6877 		}
6878 	}
6879 	SCTP_IPI_ADDR_RUNLOCK();
6880 	return (count);
6881 }
6882 
6883 #if defined(SCTP_LOCAL_TRACE_BUF)
6884 
6885 void
6886 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6887 {
6888 	uint32_t saveindex, newindex;
6889 
6890 	do {
6891 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6892 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6893 			newindex = 1;
6894 		} else {
6895 			newindex = saveindex + 1;
6896 		}
6897 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6898 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6899 		saveindex = 0;
6900 	}
6901 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6902 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6903 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6904 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6905 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6906 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6907 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6908 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6909 }
6910 
6911 #endif
6912 static void
6913 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6914     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6915 {
6916 	struct ip *iph;
6917 
6918 #ifdef INET6
6919 	struct ip6_hdr *ip6;
6920 
6921 #endif
6922 	struct mbuf *sp, *last;
6923 	struct udphdr *uhdr;
6924 	uint16_t port;
6925 
6926 	if ((m->m_flags & M_PKTHDR) == 0) {
6927 		/* Can't handle one that is not a pkt hdr */
6928 		goto out;
6929 	}
6930 	/* Pull the src port */
6931 	iph = mtod(m, struct ip *);
6932 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6933 	port = uhdr->uh_sport;
6934 	/*
6935 	 * Split out the mbuf chain. Leave the IP header in m, place the
6936 	 * rest in the sp.
6937 	 */
6938 	sp = m_split(m, off, M_NOWAIT);
6939 	if (sp == NULL) {
6940 		/* Gak, drop packet, we can't do a split */
6941 		goto out;
6942 	}
6943 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6944 		/* Gak, packet can't have an SCTP header in it - too small */
6945 		m_freem(sp);
6946 		goto out;
6947 	}
6948 	/* Now pull up the UDP header and SCTP header together */
6949 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6950 	if (sp == NULL) {
6951 		/* Gak pullup failed */
6952 		goto out;
6953 	}
6954 	/* Trim out the UDP header */
6955 	m_adj(sp, sizeof(struct udphdr));
6956 
6957 	/* Now reconstruct the mbuf chain */
6958 	for (last = m; last->m_next; last = last->m_next);
6959 	last->m_next = sp;
6960 	m->m_pkthdr.len += sp->m_pkthdr.len;
6961 	/*
6962 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6963 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6964 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6965 	 * SCTP checksum. Therefore, clear the bit.
6966 	 */
6967 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6968 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6969 	    m->m_pkthdr.len,
6970 	    if_name(m->m_pkthdr.rcvif),
6971 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6972 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6973 	iph = mtod(m, struct ip *);
6974 	switch (iph->ip_v) {
6975 #ifdef INET
6976 	case IPVERSION:
6977 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6978 		sctp_input_with_port(m, off, port);
6979 		break;
6980 #endif
6981 #ifdef INET6
6982 	case IPV6_VERSION >> 4:
6983 		ip6 = mtod(m, struct ip6_hdr *);
6984 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6985 		sctp6_input_with_port(&m, &off, port);
6986 		break;
6987 #endif
6988 	default:
6989 		goto out;
6990 		break;
6991 	}
6992 	return;
6993 out:
6994 	m_freem(m);
6995 }
6996 
6997 void
6998 sctp_over_udp_stop(void)
6999 {
7000 	/*
7001 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7002 	 * for writting!
7003 	 */
7004 #ifdef INET
7005 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7006 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7007 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7008 	}
7009 #endif
7010 #ifdef INET6
7011 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7012 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7013 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7014 	}
7015 #endif
7016 }
7017 
7018 int
7019 sctp_over_udp_start(void)
7020 {
7021 	uint16_t port;
7022 	int ret;
7023 
7024 #ifdef INET
7025 	struct sockaddr_in sin;
7026 
7027 #endif
7028 #ifdef INET6
7029 	struct sockaddr_in6 sin6;
7030 
7031 #endif
7032 	/*
7033 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7034 	 * for writting!
7035 	 */
7036 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7037 	if (ntohs(port) == 0) {
7038 		/* Must have a port set */
7039 		return (EINVAL);
7040 	}
7041 #ifdef INET
7042 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7043 		/* Already running -- must stop first */
7044 		return (EALREADY);
7045 	}
7046 #endif
7047 #ifdef INET6
7048 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7049 		/* Already running -- must stop first */
7050 		return (EALREADY);
7051 	}
7052 #endif
7053 #ifdef INET
7054 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7055 	    SOCK_DGRAM, IPPROTO_UDP,
7056 	    curthread->td_ucred, curthread))) {
7057 		sctp_over_udp_stop();
7058 		return (ret);
7059 	}
7060 	/* Call the special UDP hook. */
7061 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7062 	    sctp_recv_udp_tunneled_packet, NULL))) {
7063 		sctp_over_udp_stop();
7064 		return (ret);
7065 	}
7066 	/* Ok, we have a socket, bind it to the port. */
7067 	memset(&sin, 0, sizeof(struct sockaddr_in));
7068 	sin.sin_len = sizeof(struct sockaddr_in);
7069 	sin.sin_family = AF_INET;
7070 	sin.sin_port = htons(port);
7071 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7072 	    (struct sockaddr *)&sin, curthread))) {
7073 		sctp_over_udp_stop();
7074 		return (ret);
7075 	}
7076 #endif
7077 #ifdef INET6
7078 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7079 	    SOCK_DGRAM, IPPROTO_UDP,
7080 	    curthread->td_ucred, curthread))) {
7081 		sctp_over_udp_stop();
7082 		return (ret);
7083 	}
7084 	/* Call the special UDP hook. */
7085 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7086 	    sctp_recv_udp_tunneled_packet, NULL))) {
7087 		sctp_over_udp_stop();
7088 		return (ret);
7089 	}
7090 	/* Ok, we have a socket, bind it to the port. */
7091 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7092 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7093 	sin6.sin6_family = AF_INET6;
7094 	sin6.sin6_port = htons(port);
7095 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7096 	    (struct sockaddr *)&sin6, curthread))) {
7097 		sctp_over_udp_stop();
7098 		return (ret);
7099 	}
7100 #endif
7101 	return (0);
7102 }
7103