xref: /freebsd/sys/netinet/sctputil.c (revision 640235e2c2ba32947f7c59d168437ffa1280f1e6)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 #ifdef INET6
56 #include <netinet/icmp6.h>
57 #endif
58 
59 
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 
64 extern const struct sctp_cc_functions sctp_cc_functions[];
65 extern const struct sctp_ss_functions sctp_ss_functions[];
66 
67 void
68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->rtt / 1000;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 }
128 
129 void
130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
131 {
132 	struct sctp_cwnd_log sctp_clog;
133 
134 	sctp_clog.x.strlog.stcb = stcb;
135 	sctp_clog.x.strlog.n_tsn = tsn;
136 	sctp_clog.x.strlog.n_sseq = sseq;
137 	sctp_clog.x.strlog.e_tsn = 0;
138 	sctp_clog.x.strlog.e_sseq = 0;
139 	sctp_clog.x.strlog.strm = stream;
140 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
141 	    SCTP_LOG_EVENT_STRM,
142 	    from,
143 	    sctp_clog.x.misc.log1,
144 	    sctp_clog.x.misc.log2,
145 	    sctp_clog.x.misc.log3,
146 	    sctp_clog.x.misc.log4);
147 }
148 
149 void
150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
151 {
152 	struct sctp_cwnd_log sctp_clog;
153 
154 	sctp_clog.x.nagle.stcb = (void *)stcb;
155 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
156 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
157 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
158 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
159 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
160 	    SCTP_LOG_EVENT_NAGLE,
161 	    action,
162 	    sctp_clog.x.misc.log1,
163 	    sctp_clog.x.misc.log2,
164 	    sctp_clog.x.misc.log3,
165 	    sctp_clog.x.misc.log4);
166 }
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 }
222 
223 #ifdef SCTP_MBUF_LOGGING
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 void
250 sctp_log_mbc(struct mbuf *m, int from)
251 {
252 	struct mbuf *mat;
253 
254 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
255 		sctp_log_mb(mat, from);
256 	}
257 }
258 
259 #endif
260 
261 void
262 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
263 {
264 	struct sctp_cwnd_log sctp_clog;
265 
266 	if (control == NULL) {
267 		SCTP_PRINTF("Gak log of NULL?\n");
268 		return;
269 	}
270 	sctp_clog.x.strlog.stcb = control->stcb;
271 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
272 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
273 	sctp_clog.x.strlog.strm = control->sinfo_stream;
274 	if (poschk != NULL) {
275 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
276 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
277 	} else {
278 		sctp_clog.x.strlog.e_tsn = 0;
279 		sctp_clog.x.strlog.e_sseq = 0;
280 	}
281 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
282 	    SCTP_LOG_EVENT_STRM,
283 	    from,
284 	    sctp_clog.x.misc.log1,
285 	    sctp_clog.x.misc.log2,
286 	    sctp_clog.x.misc.log3,
287 	    sctp_clog.x.misc.log4);
288 }
289 
290 void
291 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
292 {
293 	struct sctp_cwnd_log sctp_clog;
294 
295 	sctp_clog.x.cwnd.net = net;
296 	if (stcb->asoc.send_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_send = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
300 	if (stcb->asoc.stream_queue_cnt > 255)
301 		sctp_clog.x.cwnd.cnt_in_str = 255;
302 	else
303 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
304 
305 	if (net) {
306 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
307 		sctp_clog.x.cwnd.inflight = net->flight_size;
308 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
310 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
311 	}
312 	if (SCTP_CWNDLOG_PRESEND == from) {
313 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
314 	}
315 	sctp_clog.x.cwnd.cwnd_augment = augment;
316 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
317 	    SCTP_LOG_EVENT_CWND,
318 	    from,
319 	    sctp_clog.x.misc.log1,
320 	    sctp_clog.x.misc.log2,
321 	    sctp_clog.x.misc.log3,
322 	    sctp_clog.x.misc.log4);
323 }
324 
325 void
326 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
327 {
328 	struct sctp_cwnd_log sctp_clog;
329 
330 	memset(&sctp_clog, 0, sizeof(sctp_clog));
331 	if (inp) {
332 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
333 
334 	} else {
335 		sctp_clog.x.lock.sock = (void *)NULL;
336 	}
337 	sctp_clog.x.lock.inp = (void *)inp;
338 	if (stcb) {
339 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
340 	} else {
341 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
342 	}
343 	if (inp) {
344 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
345 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
346 	} else {
347 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
349 	}
350 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
351 	if (inp && (inp->sctp_socket)) {
352 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
353 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
354 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
355 	} else {
356 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
357 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
358 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
359 	}
360 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
361 	    SCTP_LOG_LOCK_EVENT,
362 	    from,
363 	    sctp_clog.x.misc.log1,
364 	    sctp_clog.x.misc.log2,
365 	    sctp_clog.x.misc.log3,
366 	    sctp_clog.x.misc.log4);
367 }
368 
369 void
370 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
371 {
372 	struct sctp_cwnd_log sctp_clog;
373 
374 	memset(&sctp_clog, 0, sizeof(sctp_clog));
375 	sctp_clog.x.cwnd.net = net;
376 	sctp_clog.x.cwnd.cwnd_new_value = error;
377 	sctp_clog.x.cwnd.inflight = net->flight_size;
378 	sctp_clog.x.cwnd.cwnd_augment = burst;
379 	if (stcb->asoc.send_queue_cnt > 255)
380 		sctp_clog.x.cwnd.cnt_in_send = 255;
381 	else
382 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
383 	if (stcb->asoc.stream_queue_cnt > 255)
384 		sctp_clog.x.cwnd.cnt_in_str = 255;
385 	else
386 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
387 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
388 	    SCTP_LOG_EVENT_MAXBURST,
389 	    from,
390 	    sctp_clog.x.misc.log1,
391 	    sctp_clog.x.misc.log2,
392 	    sctp_clog.x.misc.log3,
393 	    sctp_clog.x.misc.log4);
394 }
395 
396 void
397 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
398 {
399 	struct sctp_cwnd_log sctp_clog;
400 
401 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
402 	sctp_clog.x.rwnd.send_size = snd_size;
403 	sctp_clog.x.rwnd.overhead = overhead;
404 	sctp_clog.x.rwnd.new_rwnd = 0;
405 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
406 	    SCTP_LOG_EVENT_RWND,
407 	    from,
408 	    sctp_clog.x.misc.log1,
409 	    sctp_clog.x.misc.log2,
410 	    sctp_clog.x.misc.log3,
411 	    sctp_clog.x.misc.log4);
412 }
413 
414 void
415 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
416 {
417 	struct sctp_cwnd_log sctp_clog;
418 
419 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
420 	sctp_clog.x.rwnd.send_size = flight_size;
421 	sctp_clog.x.rwnd.overhead = overhead;
422 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
423 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
424 	    SCTP_LOG_EVENT_RWND,
425 	    from,
426 	    sctp_clog.x.misc.log1,
427 	    sctp_clog.x.misc.log2,
428 	    sctp_clog.x.misc.log3,
429 	    sctp_clog.x.misc.log4);
430 }
431 
432 #ifdef SCTP_MBCNT_LOGGING
433 static void
434 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
435 {
436 	struct sctp_cwnd_log sctp_clog;
437 
438 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
439 	sctp_clog.x.mbcnt.size_change = book;
440 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
441 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
442 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
443 	    SCTP_LOG_EVENT_MBCNT,
444 	    from,
445 	    sctp_clog.x.misc.log1,
446 	    sctp_clog.x.misc.log2,
447 	    sctp_clog.x.misc.log3,
448 	    sctp_clog.x.misc.log4);
449 }
450 
451 #endif
452 
453 void
454 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
455 {
456 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
457 	    SCTP_LOG_MISC_EVENT,
458 	    from,
459 	    a, b, c, d);
460 }
461 
462 void
463 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
464 {
465 	struct sctp_cwnd_log sctp_clog;
466 
467 	sctp_clog.x.wake.stcb = (void *)stcb;
468 	sctp_clog.x.wake.wake_cnt = wake_cnt;
469 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
470 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
471 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
472 
473 	if (stcb->asoc.stream_queue_cnt < 0xff)
474 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
475 	else
476 		sctp_clog.x.wake.stream_qcnt = 0xff;
477 
478 	if (stcb->asoc.chunks_on_out_queue < 0xff)
479 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
480 	else
481 		sctp_clog.x.wake.chunks_on_oque = 0xff;
482 
483 	sctp_clog.x.wake.sctpflags = 0;
484 	/* set in the defered mode stuff */
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
486 		sctp_clog.x.wake.sctpflags |= 1;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
488 		sctp_clog.x.wake.sctpflags |= 2;
489 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
490 		sctp_clog.x.wake.sctpflags |= 4;
491 	/* what about the sb */
492 	if (stcb->sctp_socket) {
493 		struct socket *so = stcb->sctp_socket;
494 
495 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
496 	} else {
497 		sctp_clog.x.wake.sbflags = 0xff;
498 	}
499 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
500 	    SCTP_LOG_EVENT_WAKE,
501 	    from,
502 	    sctp_clog.x.misc.log1,
503 	    sctp_clog.x.misc.log2,
504 	    sctp_clog.x.misc.log3,
505 	    sctp_clog.x.misc.log4);
506 }
507 
508 void
509 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
510 {
511 	struct sctp_cwnd_log sctp_clog;
512 
513 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
514 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
515 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
516 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
517 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
518 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
519 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
520 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
521 	    SCTP_LOG_EVENT_BLOCK,
522 	    from,
523 	    sctp_clog.x.misc.log1,
524 	    sctp_clog.x.misc.log2,
525 	    sctp_clog.x.misc.log3,
526 	    sctp_clog.x.misc.log4);
527 }
528 
529 int
530 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
531 {
532 	/* May need to fix this if ktrdump does not work */
533 	return (0);
534 }
535 
536 #ifdef SCTP_AUDITING_ENABLED
537 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
538 static int sctp_audit_indx = 0;
539 
540 static
541 void
542 sctp_print_audit_report(void)
543 {
544 	int i;
545 	int cnt;
546 
547 	cnt = 0;
548 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	for (i = 0; i < sctp_audit_indx; i++) {
568 		if ((sctp_audit_data[i][0] == 0xe0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if (sctp_audit_data[i][0] == 0xf0) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
576 		    (sctp_audit_data[i][1] == 0x01)) {
577 			SCTP_PRINTF("\n");
578 			cnt = 0;
579 		}
580 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
581 		    (uint32_t) sctp_audit_data[i][1]);
582 		cnt++;
583 		if ((cnt % 14) == 0)
584 			SCTP_PRINTF("\n");
585 	}
586 	SCTP_PRINTF("\n");
587 }
588 
589 void
590 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
591     struct sctp_nets *net)
592 {
593 	int resend_cnt, tot_out, rep, tot_book_cnt;
594 	struct sctp_nets *lnet;
595 	struct sctp_tmit_chunk *chk;
596 
597 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
598 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
599 	sctp_audit_indx++;
600 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 		sctp_audit_indx = 0;
602 	}
603 	if (inp == NULL) {
604 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
605 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
606 		sctp_audit_indx++;
607 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
608 			sctp_audit_indx = 0;
609 		}
610 		return;
611 	}
612 	if (stcb == NULL) {
613 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
614 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
615 		sctp_audit_indx++;
616 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
617 			sctp_audit_indx = 0;
618 		}
619 		return;
620 	}
621 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
622 	sctp_audit_data[sctp_audit_indx][1] =
623 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
624 	sctp_audit_indx++;
625 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
626 		sctp_audit_indx = 0;
627 	}
628 	rep = 0;
629 	tot_book_cnt = 0;
630 	resend_cnt = tot_out = 0;
631 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
632 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
633 			resend_cnt++;
634 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
635 			tot_out += chk->book_size;
636 			tot_book_cnt++;
637 		}
638 	}
639 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
647 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
648 		rep = 1;
649 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
650 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
651 		sctp_audit_data[sctp_audit_indx][1] =
652 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 	}
658 	if (tot_out != stcb->asoc.total_flight) {
659 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
660 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
661 		sctp_audit_indx++;
662 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
663 			sctp_audit_indx = 0;
664 		}
665 		rep = 1;
666 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
667 		    (int)stcb->asoc.total_flight);
668 		stcb->asoc.total_flight = tot_out;
669 	}
670 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
671 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
672 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
673 		sctp_audit_indx++;
674 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
675 			sctp_audit_indx = 0;
676 		}
677 		rep = 1;
678 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
679 
680 		stcb->asoc.total_flight_count = tot_book_cnt;
681 	}
682 	tot_out = 0;
683 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
684 		tot_out += lnet->flight_size;
685 	}
686 	if (tot_out != stcb->asoc.total_flight) {
687 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
688 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
689 		sctp_audit_indx++;
690 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 			sctp_audit_indx = 0;
692 		}
693 		rep = 1;
694 		SCTP_PRINTF("real flight:%d net total was %d\n",
695 		    stcb->asoc.total_flight, tot_out);
696 		/* now corrective action */
697 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
698 
699 			tot_out = 0;
700 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
701 				if ((chk->whoTo == lnet) &&
702 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
703 					tot_out += chk->book_size;
704 				}
705 			}
706 			if (lnet->flight_size != tot_out) {
707 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
708 				    (void *)lnet, lnet->flight_size,
709 				    tot_out);
710 				lnet->flight_size = tot_out;
711 			}
712 		}
713 	}
714 	if (rep) {
715 		sctp_print_audit_report();
716 	}
717 }
718 
719 void
720 sctp_audit_log(uint8_t ev, uint8_t fd)
721 {
722 
723 	sctp_audit_data[sctp_audit_indx][0] = ev;
724 	sctp_audit_data[sctp_audit_indx][1] = fd;
725 	sctp_audit_indx++;
726 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 		sctp_audit_indx = 0;
728 	}
729 }
730 
731 #endif
732 
733 /*
734  * sctp_stop_timers_for_shutdown() should be called
735  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
736  * state to make sure that all timers are stopped.
737  */
738 void
739 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
740 {
741 	struct sctp_association *asoc;
742 	struct sctp_nets *net;
743 
744 	asoc = &stcb->asoc;
745 
746 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
751 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
752 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
753 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
754 	}
755 }
756 
757 /*
758  * a list of sizes based on typical mtu's, used only if next hop size not
759  * returned.
760  */
761 static uint32_t sctp_mtu_sizes[] = {
762 	68,
763 	296,
764 	508,
765 	512,
766 	544,
767 	576,
768 	1006,
769 	1492,
770 	1500,
771 	1536,
772 	2002,
773 	2048,
774 	4352,
775 	4464,
776 	8166,
777 	17914,
778 	32000,
779 	65535
780 };
781 
782 /*
783  * Return the largest MTU smaller than val. If there is no
784  * entry, just return val.
785  */
786 uint32_t
787 sctp_get_prev_mtu(uint32_t val)
788 {
789 	uint32_t i;
790 
791 	if (val <= sctp_mtu_sizes[0]) {
792 		return (val);
793 	}
794 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
795 		if (val <= sctp_mtu_sizes[i]) {
796 			break;
797 		}
798 	}
799 	return (sctp_mtu_sizes[i - 1]);
800 }
801 
802 /*
803  * Return the smallest MTU larger than val. If there is no
804  * entry, just return val.
805  */
806 uint32_t
807 sctp_get_next_mtu(uint32_t val)
808 {
809 	/* select another MTU that is just bigger than this one */
810 	uint32_t i;
811 
812 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
813 		if (val < sctp_mtu_sizes[i]) {
814 			return (sctp_mtu_sizes[i]);
815 		}
816 	}
817 	return (val);
818 }
819 
820 void
821 sctp_fill_random_store(struct sctp_pcb *m)
822 {
823 	/*
824 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
825 	 * our counter. The result becomes our good random numbers and we
826 	 * then setup to give these out. Note that we do no locking to
827 	 * protect this. This is ok, since if competing folks call this we
828 	 * will get more gobbled gook in the random store which is what we
829 	 * want. There is a danger that two guys will use the same random
830 	 * numbers, but thats ok too since that is random as well :->
831 	 */
832 	m->store_at = 0;
833 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
834 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
835 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
836 	m->random_counter++;
837 }
838 
839 uint32_t
840 sctp_select_initial_TSN(struct sctp_pcb *inp)
841 {
842 	/*
843 	 * A true implementation should use random selection process to get
844 	 * the initial stream sequence number, using RFC1750 as a good
845 	 * guideline
846 	 */
847 	uint32_t x, *xp;
848 	uint8_t *p;
849 	int store_at, new_store;
850 
851 	if (inp->initial_sequence_debug != 0) {
852 		uint32_t ret;
853 
854 		ret = inp->initial_sequence_debug;
855 		inp->initial_sequence_debug++;
856 		return (ret);
857 	}
858 retry:
859 	store_at = inp->store_at;
860 	new_store = store_at + sizeof(uint32_t);
861 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
862 		new_store = 0;
863 	}
864 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
865 		goto retry;
866 	}
867 	if (new_store == 0) {
868 		/* Refill the random store */
869 		sctp_fill_random_store(inp);
870 	}
871 	p = &inp->random_store[store_at];
872 	xp = (uint32_t *) p;
873 	x = *xp;
874 	return (x);
875 }
876 
877 uint32_t
878 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
879 {
880 	uint32_t x;
881 	struct timeval now;
882 
883 	if (check) {
884 		(void)SCTP_GETTIME_TIMEVAL(&now);
885 	}
886 	for (;;) {
887 		x = sctp_select_initial_TSN(&inp->sctp_ep);
888 		if (x == 0) {
889 			/* we never use 0 */
890 			continue;
891 		}
892 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
893 			break;
894 		}
895 	}
896 	return (x);
897 }
898 
899 int32_t
900 sctp_map_assoc_state(int kernel_state)
901 {
902 	int32_t user_state;
903 
904 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
905 		user_state = SCTP_CLOSED;
906 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
907 		user_state = SCTP_SHUTDOWN_PENDING;
908 	} else {
909 		switch (kernel_state & SCTP_STATE_MASK) {
910 		case SCTP_STATE_EMPTY:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_INUSE:
914 			user_state = SCTP_CLOSED;
915 			break;
916 		case SCTP_STATE_COOKIE_WAIT:
917 			user_state = SCTP_COOKIE_WAIT;
918 			break;
919 		case SCTP_STATE_COOKIE_ECHOED:
920 			user_state = SCTP_COOKIE_ECHOED;
921 			break;
922 		case SCTP_STATE_OPEN:
923 			user_state = SCTP_ESTABLISHED;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_SENT:
926 			user_state = SCTP_SHUTDOWN_SENT;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_RECEIVED:
929 			user_state = SCTP_SHUTDOWN_RECEIVED;
930 			break;
931 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
932 			user_state = SCTP_SHUTDOWN_ACK_SENT;
933 			break;
934 		default:
935 			user_state = SCTP_CLOSED;
936 			break;
937 		}
938 	}
939 	return (user_state);
940 }
941 
942 int
943 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
944     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
945 {
946 	struct sctp_association *asoc;
947 
948 	/*
949 	 * Anything set to zero is taken care of by the allocation routine's
950 	 * bzero
951 	 */
952 
953 	/*
954 	 * Up front select what scoping to apply on addresses I tell my peer
955 	 * Not sure what to do with these right now, we will need to come up
956 	 * with a way to set them. We may need to pass them through from the
957 	 * caller in the sctp_aloc_assoc() function.
958 	 */
959 	int i;
960 
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 
964 #endif
965 
966 	asoc = &stcb->asoc;
967 	/* init all variables to a known value. */
968 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
969 	asoc->max_burst = inp->sctp_ep.max_burst;
970 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
971 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
972 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
973 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
974 	asoc->ecn_supported = inp->ecn_supported;
975 	asoc->prsctp_supported = inp->prsctp_supported;
976 	asoc->idata_supported = inp->idata_supported;
977 	asoc->auth_supported = inp->auth_supported;
978 	asoc->asconf_supported = inp->asconf_supported;
979 	asoc->reconfig_supported = inp->reconfig_supported;
980 	asoc->nrsack_supported = inp->nrsack_supported;
981 	asoc->pktdrop_supported = inp->pktdrop_supported;
982 	asoc->idata_supported = inp->idata_supported;
983 	asoc->sctp_cmt_pf = (uint8_t) 0;
984 	asoc->sctp_frag_point = inp->sctp_frag_point;
985 	asoc->sctp_features = inp->sctp_features;
986 	asoc->default_dscp = inp->sctp_ep.default_dscp;
987 	asoc->max_cwnd = inp->max_cwnd;
988 #ifdef INET6
989 	if (inp->sctp_ep.default_flowlabel) {
990 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
991 	} else {
992 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
993 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
994 			asoc->default_flowlabel &= 0x000fffff;
995 			asoc->default_flowlabel |= 0x80000000;
996 		} else {
997 			asoc->default_flowlabel = 0;
998 		}
999 	}
1000 #endif
1001 	asoc->sb_send_resv = 0;
1002 	if (override_tag) {
1003 		asoc->my_vtag = override_tag;
1004 	} else {
1005 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1006 	}
1007 	/* Get the nonce tags */
1008 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010 	asoc->vrf_id = vrf_id;
1011 
1012 #ifdef SCTP_ASOCLOG_OF_TSNS
1013 	asoc->tsn_in_at = 0;
1014 	asoc->tsn_out_at = 0;
1015 	asoc->tsn_in_wrapped = 0;
1016 	asoc->tsn_out_wrapped = 0;
1017 	asoc->cumack_log_at = 0;
1018 	asoc->cumack_log_atsnt = 0;
1019 #endif
1020 #ifdef SCTP_FS_SPEC_LOG
1021 	asoc->fs_index = 0;
1022 #endif
1023 	asoc->refcnt = 0;
1024 	asoc->assoc_up_sent = 0;
1025 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1026 	    sctp_select_initial_TSN(&inp->sctp_ep);
1027 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1028 	/* we are optimisitic here */
1029 	asoc->peer_supports_nat = 0;
1030 	asoc->sent_queue_retran_cnt = 0;
1031 
1032 	/* for CMT */
1033 	asoc->last_net_cmt_send_started = NULL;
1034 
1035 	/* This will need to be adjusted */
1036 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1037 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1038 	asoc->asconf_seq_in = asoc->last_acked_seq;
1039 
1040 	/* here we are different, we hold the next one we expect */
1041 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1042 
1043 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1044 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1045 
1046 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1047 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1048 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1049 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1050 	asoc->free_chunk_cnt = 0;
1051 
1052 	asoc->iam_blocking = 0;
1053 	asoc->context = inp->sctp_context;
1054 	asoc->local_strreset_support = inp->local_strreset_support;
1055 	asoc->def_send = inp->def_send;
1056 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1057 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1058 	asoc->pr_sctp_cnt = 0;
1059 	asoc->total_output_queue_size = 0;
1060 
1061 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1062 		asoc->scope.ipv6_addr_legal = 1;
1063 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1064 			asoc->scope.ipv4_addr_legal = 1;
1065 		} else {
1066 			asoc->scope.ipv4_addr_legal = 0;
1067 		}
1068 	} else {
1069 		asoc->scope.ipv6_addr_legal = 0;
1070 		asoc->scope.ipv4_addr_legal = 1;
1071 	}
1072 
1073 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1074 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1075 
1076 	asoc->smallest_mtu = inp->sctp_frag_point;
1077 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1078 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1079 
1080 	asoc->stream_locked_on = 0;
1081 	asoc->ecn_echo_cnt_onq = 0;
1082 	asoc->stream_locked = 0;
1083 
1084 	asoc->send_sack = 1;
1085 
1086 	LIST_INIT(&asoc->sctp_restricted_addrs);
1087 
1088 	TAILQ_INIT(&asoc->nets);
1089 	TAILQ_INIT(&asoc->pending_reply_queue);
1090 	TAILQ_INIT(&asoc->asconf_ack_sent);
1091 	/* Setup to fill the hb random cache at first HB */
1092 	asoc->hb_random_idx = 4;
1093 
1094 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1095 
1096 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1097 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1098 
1099 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1100 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1101 
1102 	/*
1103 	 * Now the stream parameters, here we allocate space for all streams
1104 	 * that we request by default.
1105 	 */
1106 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1107 	    o_strms;
1108 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1109 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1110 	    SCTP_M_STRMO);
1111 	if (asoc->strmout == NULL) {
1112 		/* big trouble no memory */
1113 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1114 		return (ENOMEM);
1115 	}
1116 	for (i = 0; i < asoc->streamoutcnt; i++) {
1117 		/*
1118 		 * inbound side must be set to 0xffff, also NOTE when we get
1119 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1120 		 * count (streamoutcnt) but first check if we sent to any of
1121 		 * the upper streams that were dropped (if some were). Those
1122 		 * that were dropped must be notified to the upper layer as
1123 		 * failed to send.
1124 		 */
1125 		asoc->strmout[i].next_mid_ordered = 0;
1126 		asoc->strmout[i].next_mid_unordered = 0;
1127 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1128 		asoc->strmout[i].chunks_on_queues = 0;
1129 #if defined(SCTP_DETAILED_STR_STATS)
1130 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1131 			asoc->strmout[i].abandoned_sent[j] = 0;
1132 			asoc->strmout[i].abandoned_unsent[j] = 0;
1133 		}
1134 #else
1135 		asoc->strmout[i].abandoned_sent[0] = 0;
1136 		asoc->strmout[i].abandoned_unsent[0] = 0;
1137 #endif
1138 		asoc->strmout[i].stream_no = i;
1139 		asoc->strmout[i].last_msg_incomplete = 0;
1140 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1141 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1142 	}
1143 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1144 
1145 	/* Now the mapping array */
1146 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1147 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1148 	    SCTP_M_MAP);
1149 	if (asoc->mapping_array == NULL) {
1150 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1151 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1152 		return (ENOMEM);
1153 	}
1154 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1155 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1156 	    SCTP_M_MAP);
1157 	if (asoc->nr_mapping_array == NULL) {
1158 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1159 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1160 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1161 		return (ENOMEM);
1162 	}
1163 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1164 
1165 	/* Now the init of the other outqueues */
1166 	TAILQ_INIT(&asoc->free_chunks);
1167 	TAILQ_INIT(&asoc->control_send_queue);
1168 	TAILQ_INIT(&asoc->asconf_send_queue);
1169 	TAILQ_INIT(&asoc->send_queue);
1170 	TAILQ_INIT(&asoc->sent_queue);
1171 	TAILQ_INIT(&asoc->resetHead);
1172 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1173 	TAILQ_INIT(&asoc->asconf_queue);
1174 	/* authentication fields */
1175 	asoc->authinfo.random = NULL;
1176 	asoc->authinfo.active_keyid = 0;
1177 	asoc->authinfo.assoc_key = NULL;
1178 	asoc->authinfo.assoc_keyid = 0;
1179 	asoc->authinfo.recv_key = NULL;
1180 	asoc->authinfo.recv_keyid = 0;
1181 	LIST_INIT(&asoc->shared_keys);
1182 	asoc->marked_retrans = 0;
1183 	asoc->port = inp->sctp_ep.port;
1184 	asoc->timoinit = 0;
1185 	asoc->timodata = 0;
1186 	asoc->timosack = 0;
1187 	asoc->timoshutdown = 0;
1188 	asoc->timoheartbeat = 0;
1189 	asoc->timocookie = 0;
1190 	asoc->timoshutdownack = 0;
1191 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1192 	asoc->discontinuity_time = asoc->start_time;
1193 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1194 		asoc->abandoned_unsent[i] = 0;
1195 		asoc->abandoned_sent[i] = 0;
1196 	}
1197 	/*
1198 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1199 	 * freed later when the association is freed.
1200 	 */
1201 	return (0);
1202 }
1203 
1204 void
1205 sctp_print_mapping_array(struct sctp_association *asoc)
1206 {
1207 	unsigned int i, limit;
1208 
1209 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1210 	    asoc->mapping_array_size,
1211 	    asoc->mapping_array_base_tsn,
1212 	    asoc->cumulative_tsn,
1213 	    asoc->highest_tsn_inside_map,
1214 	    asoc->highest_tsn_inside_nr_map);
1215 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1216 		if (asoc->mapping_array[limit - 1] != 0) {
1217 			break;
1218 		}
1219 	}
1220 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1221 	for (i = 0; i < limit; i++) {
1222 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1223 	}
1224 	if (limit % 16)
1225 		SCTP_PRINTF("\n");
1226 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1227 		if (asoc->nr_mapping_array[limit - 1]) {
1228 			break;
1229 		}
1230 	}
1231 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1232 	for (i = 0; i < limit; i++) {
1233 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1234 	}
1235 	if (limit % 16)
1236 		SCTP_PRINTF("\n");
1237 }
1238 
1239 int
1240 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1241 {
1242 	/* mapping array needs to grow */
1243 	uint8_t *new_array1, *new_array2;
1244 	uint32_t new_size;
1245 
1246 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1247 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1248 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1249 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1250 		/* can't get more, forget it */
1251 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1252 		if (new_array1) {
1253 			SCTP_FREE(new_array1, SCTP_M_MAP);
1254 		}
1255 		if (new_array2) {
1256 			SCTP_FREE(new_array2, SCTP_M_MAP);
1257 		}
1258 		return (-1);
1259 	}
1260 	memset(new_array1, 0, new_size);
1261 	memset(new_array2, 0, new_size);
1262 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1263 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1264 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1265 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1266 	asoc->mapping_array = new_array1;
1267 	asoc->nr_mapping_array = new_array2;
1268 	asoc->mapping_array_size = new_size;
1269 	return (0);
1270 }
1271 
1272 
1273 static void
1274 sctp_iterator_work(struct sctp_iterator *it)
1275 {
1276 	int iteration_count = 0;
1277 	int inp_skip = 0;
1278 	int first_in = 1;
1279 	struct sctp_inpcb *tinp;
1280 
1281 	SCTP_INP_INFO_RLOCK();
1282 	SCTP_ITERATOR_LOCK();
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		SCTP_ITERATOR_UNLOCK();
1291 		SCTP_INP_INFO_RUNLOCK();
1292 		if (it->function_atend != NULL) {
1293 			(*it->function_atend) (it->pointer, it->val);
1294 		}
1295 		SCTP_FREE(it, SCTP_M_ITER);
1296 		return;
1297 	}
1298 select_a_new_ep:
1299 	if (first_in) {
1300 		first_in = 0;
1301 	} else {
1302 		SCTP_INP_RLOCK(it->inp);
1303 	}
1304 	while (((it->pcb_flags) &&
1305 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1306 	    ((it->pcb_features) &&
1307 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1308 		/* endpoint flags or features don't match, so keep looking */
1309 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1310 			SCTP_INP_RUNLOCK(it->inp);
1311 			goto done_with_iterator;
1312 		}
1313 		tinp = it->inp;
1314 		it->inp = LIST_NEXT(it->inp, sctp_list);
1315 		SCTP_INP_RUNLOCK(tinp);
1316 		if (it->inp == NULL) {
1317 			goto done_with_iterator;
1318 		}
1319 		SCTP_INP_RLOCK(it->inp);
1320 	}
1321 	/* now go through each assoc which is in the desired state */
1322 	if (it->done_current_ep == 0) {
1323 		if (it->function_inp != NULL)
1324 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1325 		it->done_current_ep = 1;
1326 	}
1327 	if (it->stcb == NULL) {
1328 		/* run the per instance function */
1329 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1330 	}
1331 	if ((inp_skip) || it->stcb == NULL) {
1332 		if (it->function_inp_end != NULL) {
1333 			inp_skip = (*it->function_inp_end) (it->inp,
1334 			    it->pointer,
1335 			    it->val);
1336 		}
1337 		SCTP_INP_RUNLOCK(it->inp);
1338 		goto no_stcb;
1339 	}
1340 	while (it->stcb) {
1341 		SCTP_TCB_LOCK(it->stcb);
1342 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1343 			/* not in the right state... keep looking */
1344 			SCTP_TCB_UNLOCK(it->stcb);
1345 			goto next_assoc;
1346 		}
1347 		/* see if we have limited out the iterator loop */
1348 		iteration_count++;
1349 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1350 			/* Pause to let others grab the lock */
1351 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1352 			SCTP_TCB_UNLOCK(it->stcb);
1353 			SCTP_INP_INCR_REF(it->inp);
1354 			SCTP_INP_RUNLOCK(it->inp);
1355 			SCTP_ITERATOR_UNLOCK();
1356 			SCTP_INP_INFO_RUNLOCK();
1357 			SCTP_INP_INFO_RLOCK();
1358 			SCTP_ITERATOR_LOCK();
1359 			if (sctp_it_ctl.iterator_flags) {
1360 				/* We won't be staying here */
1361 				SCTP_INP_DECR_REF(it->inp);
1362 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1363 				if (sctp_it_ctl.iterator_flags &
1364 				    SCTP_ITERATOR_STOP_CUR_IT) {
1365 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1366 					goto done_with_iterator;
1367 				}
1368 				if (sctp_it_ctl.iterator_flags &
1369 				    SCTP_ITERATOR_STOP_CUR_INP) {
1370 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1371 					goto no_stcb;
1372 				}
1373 				/* If we reach here huh? */
1374 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1375 				    sctp_it_ctl.iterator_flags);
1376 				sctp_it_ctl.iterator_flags = 0;
1377 			}
1378 			SCTP_INP_RLOCK(it->inp);
1379 			SCTP_INP_DECR_REF(it->inp);
1380 			SCTP_TCB_LOCK(it->stcb);
1381 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1382 			iteration_count = 0;
1383 		}
1384 		/* run function on this one */
1385 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1386 
1387 		/*
1388 		 * we lie here, it really needs to have its own type but
1389 		 * first I must verify that this won't effect things :-0
1390 		 */
1391 		if (it->no_chunk_output == 0)
1392 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1393 
1394 		SCTP_TCB_UNLOCK(it->stcb);
1395 next_assoc:
1396 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1397 		if (it->stcb == NULL) {
1398 			/* Run last function */
1399 			if (it->function_inp_end != NULL) {
1400 				inp_skip = (*it->function_inp_end) (it->inp,
1401 				    it->pointer,
1402 				    it->val);
1403 			}
1404 		}
1405 	}
1406 	SCTP_INP_RUNLOCK(it->inp);
1407 no_stcb:
1408 	/* done with all assocs on this endpoint, move on to next endpoint */
1409 	it->done_current_ep = 0;
1410 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1411 		it->inp = NULL;
1412 	} else {
1413 		it->inp = LIST_NEXT(it->inp, sctp_list);
1414 	}
1415 	if (it->inp == NULL) {
1416 		goto done_with_iterator;
1417 	}
1418 	goto select_a_new_ep;
1419 }
1420 
1421 void
1422 sctp_iterator_worker(void)
1423 {
1424 	struct sctp_iterator *it, *nit;
1425 
1426 	/* This function is called with the WQ lock in place */
1427 
1428 	sctp_it_ctl.iterator_running = 1;
1429 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1430 		sctp_it_ctl.cur_it = it;
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		sctp_it_ctl.cur_it = NULL;
1437 		CURVNET_RESTORE();
1438 		SCTP_IPI_ITERATOR_WQ_LOCK();
1439 		/* sa_ignore FREED_MEMORY */
1440 	}
1441 	sctp_it_ctl.iterator_running = 0;
1442 	return;
1443 }
1444 
1445 
1446 static void
1447 sctp_handle_addr_wq(void)
1448 {
1449 	/* deal with the ADDR wq from the rtsock calls */
1450 	struct sctp_laddr *wi, *nwi;
1451 	struct sctp_asconf_iterator *asc;
1452 
1453 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1454 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1455 	if (asc == NULL) {
1456 		/* Try later, no memory */
1457 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1458 		    (struct sctp_inpcb *)NULL,
1459 		    (struct sctp_tcb *)NULL,
1460 		    (struct sctp_nets *)NULL);
1461 		return;
1462 	}
1463 	LIST_INIT(&asc->list_of_work);
1464 	asc->cnt = 0;
1465 
1466 	SCTP_WQ_ADDR_LOCK();
1467 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1468 		LIST_REMOVE(wi, sctp_nxt_addr);
1469 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1470 		asc->cnt++;
1471 	}
1472 	SCTP_WQ_ADDR_UNLOCK();
1473 
1474 	if (asc->cnt == 0) {
1475 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1476 	} else {
1477 		int ret;
1478 
1479 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1480 		    sctp_asconf_iterator_stcb,
1481 		    NULL,	/* No ep end for boundall */
1482 		    SCTP_PCB_FLAGS_BOUNDALL,
1483 		    SCTP_PCB_ANY_FEATURES,
1484 		    SCTP_ASOC_ANY_STATE,
1485 		    (void *)asc, 0,
1486 		    sctp_asconf_iterator_end, NULL, 0);
1487 		if (ret) {
1488 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1489 			/*
1490 			 * Freeing if we are stopping or put back on the
1491 			 * addr_wq.
1492 			 */
1493 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1494 				sctp_asconf_iterator_end(asc, 0);
1495 			} else {
1496 				SCTP_WQ_ADDR_LOCK();
1497 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1498 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1499 				}
1500 				SCTP_WQ_ADDR_UNLOCK();
1501 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1502 			}
1503 		}
1504 	}
1505 }
1506 
1507 void
1508 sctp_timeout_handler(void *t)
1509 {
1510 	struct sctp_inpcb *inp;
1511 	struct sctp_tcb *stcb;
1512 	struct sctp_nets *net;
1513 	struct sctp_timer *tmr;
1514 	struct mbuf *op_err;
1515 
1516 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1517 	struct socket *so;
1518 
1519 #endif
1520 	int did_output;
1521 	int type;
1522 
1523 	tmr = (struct sctp_timer *)t;
1524 	inp = (struct sctp_inpcb *)tmr->ep;
1525 	stcb = (struct sctp_tcb *)tmr->tcb;
1526 	net = (struct sctp_nets *)tmr->net;
1527 	CURVNET_SET((struct vnet *)tmr->vnet);
1528 	did_output = 1;
1529 
1530 #ifdef SCTP_AUDITING_ENABLED
1531 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1532 	sctp_auditing(3, inp, stcb, net);
1533 #endif
1534 
1535 	/* sanity checks... */
1536 	if (tmr->self != (void *)tmr) {
1537 		/*
1538 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1539 		 * (void *)tmr);
1540 		 */
1541 		CURVNET_RESTORE();
1542 		return;
1543 	}
1544 	tmr->stopped_from = 0xa001;
1545 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1546 		/*
1547 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1548 		 * tmr->type);
1549 		 */
1550 		CURVNET_RESTORE();
1551 		return;
1552 	}
1553 	tmr->stopped_from = 0xa002;
1554 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1555 		CURVNET_RESTORE();
1556 		return;
1557 	}
1558 	/* if this is an iterator timeout, get the struct and clear inp */
1559 	tmr->stopped_from = 0xa003;
1560 	if (inp) {
1561 		SCTP_INP_INCR_REF(inp);
1562 		if ((inp->sctp_socket == NULL) &&
1563 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1569 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1570 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1571 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1572 		    ) {
1573 			SCTP_INP_DECR_REF(inp);
1574 			CURVNET_RESTORE();
1575 			return;
1576 		}
1577 	}
1578 	tmr->stopped_from = 0xa004;
1579 	if (stcb) {
1580 		atomic_add_int(&stcb->asoc.refcnt, 1);
1581 		if (stcb->asoc.state == 0) {
1582 			atomic_add_int(&stcb->asoc.refcnt, -1);
1583 			if (inp) {
1584 				SCTP_INP_DECR_REF(inp);
1585 			}
1586 			CURVNET_RESTORE();
1587 			return;
1588 		}
1589 	}
1590 	type = tmr->type;
1591 	tmr->stopped_from = 0xa005;
1592 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1593 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1594 		if (inp) {
1595 			SCTP_INP_DECR_REF(inp);
1596 		}
1597 		if (stcb) {
1598 			atomic_add_int(&stcb->asoc.refcnt, -1);
1599 		}
1600 		CURVNET_RESTORE();
1601 		return;
1602 	}
1603 	tmr->stopped_from = 0xa006;
1604 
1605 	if (stcb) {
1606 		SCTP_TCB_LOCK(stcb);
1607 		atomic_add_int(&stcb->asoc.refcnt, -1);
1608 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1609 		    ((stcb->asoc.state == 0) ||
1610 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1611 			SCTP_TCB_UNLOCK(stcb);
1612 			if (inp) {
1613 				SCTP_INP_DECR_REF(inp);
1614 			}
1615 			CURVNET_RESTORE();
1616 			return;
1617 		}
1618 	}
1619 	/* record in stopped what t-o occurred */
1620 	tmr->stopped_from = type;
1621 
1622 	/* mark as being serviced now */
1623 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1624 		/*
1625 		 * Callout has been rescheduled.
1626 		 */
1627 		goto get_out;
1628 	}
1629 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1630 		/*
1631 		 * Not active, so no action.
1632 		 */
1633 		goto get_out;
1634 	}
1635 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1636 
1637 	/* call the handler for the appropriate timer type */
1638 	switch (type) {
1639 	case SCTP_TIMER_TYPE_ZERO_COPY:
1640 		if (inp == NULL) {
1641 			break;
1642 		}
1643 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1644 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1645 		}
1646 		break;
1647 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1648 		if (inp == NULL) {
1649 			break;
1650 		}
1651 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1652 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1653 		}
1654 		break;
1655 	case SCTP_TIMER_TYPE_ADDR_WQ:
1656 		sctp_handle_addr_wq();
1657 		break;
1658 	case SCTP_TIMER_TYPE_SEND:
1659 		if ((stcb == NULL) || (inp == NULL)) {
1660 			break;
1661 		}
1662 		SCTP_STAT_INCR(sctps_timodata);
1663 		stcb->asoc.timodata++;
1664 		stcb->asoc.num_send_timers_up--;
1665 		if (stcb->asoc.num_send_timers_up < 0) {
1666 			stcb->asoc.num_send_timers_up = 0;
1667 		}
1668 		SCTP_TCB_LOCK_ASSERT(stcb);
1669 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1670 			/* no need to unlock on tcb its gone */
1671 
1672 			goto out_decr;
1673 		}
1674 		SCTP_TCB_LOCK_ASSERT(stcb);
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1679 		if ((stcb->asoc.num_send_timers_up == 0) &&
1680 		    (stcb->asoc.sent_queue_cnt > 0)) {
1681 			struct sctp_tmit_chunk *chk;
1682 
1683 			/*
1684 			 * safeguard. If there on some on the sent queue
1685 			 * somewhere but no timers running something is
1686 			 * wrong... so we start a timer on the first chunk
1687 			 * on the send queue on whatever net it is sent to.
1688 			 */
1689 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1690 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1691 			    chk->whoTo);
1692 		}
1693 		break;
1694 	case SCTP_TIMER_TYPE_INIT:
1695 		if ((stcb == NULL) || (inp == NULL)) {
1696 			break;
1697 		}
1698 		SCTP_STAT_INCR(sctps_timoinit);
1699 		stcb->asoc.timoinit++;
1700 		if (sctp_t1init_timer(inp, stcb, net)) {
1701 			/* no need to unlock on tcb its gone */
1702 			goto out_decr;
1703 		}
1704 		/* We do output but not here */
1705 		did_output = 0;
1706 		break;
1707 	case SCTP_TIMER_TYPE_RECV:
1708 		if ((stcb == NULL) || (inp == NULL)) {
1709 			break;
1710 		}
1711 		SCTP_STAT_INCR(sctps_timosack);
1712 		stcb->asoc.timosack++;
1713 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1714 #ifdef SCTP_AUDITING_ENABLED
1715 		sctp_auditing(4, inp, stcb, net);
1716 #endif
1717 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1718 		break;
1719 	case SCTP_TIMER_TYPE_SHUTDOWN:
1720 		if ((stcb == NULL) || (inp == NULL)) {
1721 			break;
1722 		}
1723 		if (sctp_shutdown_timer(inp, stcb, net)) {
1724 			/* no need to unlock on tcb its gone */
1725 			goto out_decr;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdown);
1728 		stcb->asoc.timoshutdown++;
1729 #ifdef SCTP_AUDITING_ENABLED
1730 		sctp_auditing(4, inp, stcb, net);
1731 #endif
1732 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1733 		break;
1734 	case SCTP_TIMER_TYPE_HEARTBEAT:
1735 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1736 			break;
1737 		}
1738 		SCTP_STAT_INCR(sctps_timoheartbeat);
1739 		stcb->asoc.timoheartbeat++;
1740 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1741 			/* no need to unlock on tcb its gone */
1742 			goto out_decr;
1743 		}
1744 #ifdef SCTP_AUDITING_ENABLED
1745 		sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1748 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1749 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1750 		}
1751 		break;
1752 	case SCTP_TIMER_TYPE_COOKIE:
1753 		if ((stcb == NULL) || (inp == NULL)) {
1754 			break;
1755 		}
1756 		if (sctp_cookie_timer(inp, stcb, net)) {
1757 			/* no need to unlock on tcb its gone */
1758 			goto out_decr;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timocookie);
1761 		stcb->asoc.timocookie++;
1762 #ifdef SCTP_AUDITING_ENABLED
1763 		sctp_auditing(4, inp, stcb, net);
1764 #endif
1765 		/*
1766 		 * We consider T3 and Cookie timer pretty much the same with
1767 		 * respect to where from in chunk_output.
1768 		 */
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1770 		break;
1771 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1772 		{
1773 			struct timeval tv;
1774 			int i, secret;
1775 
1776 			if (inp == NULL) {
1777 				break;
1778 			}
1779 			SCTP_STAT_INCR(sctps_timosecret);
1780 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1781 			SCTP_INP_WLOCK(inp);
1782 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1783 			inp->sctp_ep.last_secret_number =
1784 			    inp->sctp_ep.current_secret_number;
1785 			inp->sctp_ep.current_secret_number++;
1786 			if (inp->sctp_ep.current_secret_number >=
1787 			    SCTP_HOW_MANY_SECRETS) {
1788 				inp->sctp_ep.current_secret_number = 0;
1789 			}
1790 			secret = (int)inp->sctp_ep.current_secret_number;
1791 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1792 				inp->sctp_ep.secret_key[secret][i] =
1793 				    sctp_select_initial_TSN(&inp->sctp_ep);
1794 			}
1795 			SCTP_INP_WUNLOCK(inp);
1796 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1797 		}
1798 		did_output = 0;
1799 		break;
1800 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		SCTP_STAT_INCR(sctps_timopathmtu);
1805 		sctp_pathmtu_timer(inp, stcb, net);
1806 		did_output = 0;
1807 		break;
1808 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1809 		if ((stcb == NULL) || (inp == NULL)) {
1810 			break;
1811 		}
1812 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1813 			/* no need to unlock on tcb its gone */
1814 			goto out_decr;
1815 		}
1816 		SCTP_STAT_INCR(sctps_timoshutdownack);
1817 		stcb->asoc.timoshutdownack++;
1818 #ifdef SCTP_AUDITING_ENABLED
1819 		sctp_auditing(4, inp, stcb, net);
1820 #endif
1821 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1822 		break;
1823 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1824 		if ((stcb == NULL) || (inp == NULL)) {
1825 			break;
1826 		}
1827 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1828 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1829 		    "Shutdown guard timer expired");
1830 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1831 		/* no need to unlock on tcb its gone */
1832 		goto out_decr;
1833 
1834 	case SCTP_TIMER_TYPE_STRRESET:
1835 		if ((stcb == NULL) || (inp == NULL)) {
1836 			break;
1837 		}
1838 		if (sctp_strreset_timer(inp, stcb, net)) {
1839 			/* no need to unlock on tcb its gone */
1840 			goto out_decr;
1841 		}
1842 		SCTP_STAT_INCR(sctps_timostrmrst);
1843 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1844 		break;
1845 	case SCTP_TIMER_TYPE_ASCONF:
1846 		if ((stcb == NULL) || (inp == NULL)) {
1847 			break;
1848 		}
1849 		if (sctp_asconf_timer(inp, stcb, net)) {
1850 			/* no need to unlock on tcb its gone */
1851 			goto out_decr;
1852 		}
1853 		SCTP_STAT_INCR(sctps_timoasconf);
1854 #ifdef SCTP_AUDITING_ENABLED
1855 		sctp_auditing(4, inp, stcb, net);
1856 #endif
1857 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1858 		break;
1859 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1860 		if ((stcb == NULL) || (inp == NULL)) {
1861 			break;
1862 		}
1863 		sctp_delete_prim_timer(inp, stcb, net);
1864 		SCTP_STAT_INCR(sctps_timodelprim);
1865 		break;
1866 
1867 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1868 		if ((stcb == NULL) || (inp == NULL)) {
1869 			break;
1870 		}
1871 		SCTP_STAT_INCR(sctps_timoautoclose);
1872 		sctp_autoclose_timer(inp, stcb, net);
1873 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1874 		did_output = 0;
1875 		break;
1876 	case SCTP_TIMER_TYPE_ASOCKILL:
1877 		if ((stcb == NULL) || (inp == NULL)) {
1878 			break;
1879 		}
1880 		SCTP_STAT_INCR(sctps_timoassockill);
1881 		/* Can we free it yet? */
1882 		SCTP_INP_DECR_REF(inp);
1883 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1884 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1885 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1886 		so = SCTP_INP_SO(inp);
1887 		atomic_add_int(&stcb->asoc.refcnt, 1);
1888 		SCTP_TCB_UNLOCK(stcb);
1889 		SCTP_SOCKET_LOCK(so, 1);
1890 		SCTP_TCB_LOCK(stcb);
1891 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1892 #endif
1893 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1894 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1895 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1896 		SCTP_SOCKET_UNLOCK(so, 1);
1897 #endif
1898 		/*
1899 		 * free asoc, always unlocks (or destroy's) so prevent
1900 		 * duplicate unlock or unlock of a free mtx :-0
1901 		 */
1902 		stcb = NULL;
1903 		goto out_no_decr;
1904 	case SCTP_TIMER_TYPE_INPKILL:
1905 		SCTP_STAT_INCR(sctps_timoinpkill);
1906 		if (inp == NULL) {
1907 			break;
1908 		}
1909 		/*
1910 		 * special case, take away our increment since WE are the
1911 		 * killer
1912 		 */
1913 		SCTP_INP_DECR_REF(inp);
1914 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1915 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1916 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1917 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1918 		inp = NULL;
1919 		goto out_no_decr;
1920 	default:
1921 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1922 		    type);
1923 		break;
1924 	}
1925 #ifdef SCTP_AUDITING_ENABLED
1926 	sctp_audit_log(0xF1, (uint8_t) type);
1927 	if (inp)
1928 		sctp_auditing(5, inp, stcb, net);
1929 #endif
1930 	if ((did_output) && stcb) {
1931 		/*
1932 		 * Now we need to clean up the control chunk chain if an
1933 		 * ECNE is on it. It must be marked as UNSENT again so next
1934 		 * call will continue to send it until such time that we get
1935 		 * a CWR, to remove it. It is, however, less likely that we
1936 		 * will find a ecn echo on the chain though.
1937 		 */
1938 		sctp_fix_ecn_echo(&stcb->asoc);
1939 	}
1940 get_out:
1941 	if (stcb) {
1942 		SCTP_TCB_UNLOCK(stcb);
1943 	}
1944 out_decr:
1945 	if (inp) {
1946 		SCTP_INP_DECR_REF(inp);
1947 	}
1948 out_no_decr:
1949 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1950 	CURVNET_RESTORE();
1951 }
1952 
1953 void
1954 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1955     struct sctp_nets *net)
1956 {
1957 	uint32_t to_ticks;
1958 	struct sctp_timer *tmr;
1959 
1960 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1961 		return;
1962 
1963 	tmr = NULL;
1964 	if (stcb) {
1965 		SCTP_TCB_LOCK_ASSERT(stcb);
1966 	}
1967 	switch (t_type) {
1968 	case SCTP_TIMER_TYPE_ZERO_COPY:
1969 		tmr = &inp->sctp_ep.zero_copy_timer;
1970 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1971 		break;
1972 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1973 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1974 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1975 		break;
1976 	case SCTP_TIMER_TYPE_ADDR_WQ:
1977 		/* Only 1 tick away :-) */
1978 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1979 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1980 		break;
1981 	case SCTP_TIMER_TYPE_SEND:
1982 		/* Here we use the RTO timer */
1983 		{
1984 			int rto_val;
1985 
1986 			if ((stcb == NULL) || (net == NULL)) {
1987 				return;
1988 			}
1989 			tmr = &net->rxt_timer;
1990 			if (net->RTO == 0) {
1991 				rto_val = stcb->asoc.initial_rto;
1992 			} else {
1993 				rto_val = net->RTO;
1994 			}
1995 			to_ticks = MSEC_TO_TICKS(rto_val);
1996 		}
1997 		break;
1998 	case SCTP_TIMER_TYPE_INIT:
1999 		/*
2000 		 * Here we use the INIT timer default usually about 1
2001 		 * minute.
2002 		 */
2003 		if ((stcb == NULL) || (net == NULL)) {
2004 			return;
2005 		}
2006 		tmr = &net->rxt_timer;
2007 		if (net->RTO == 0) {
2008 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2009 		} else {
2010 			to_ticks = MSEC_TO_TICKS(net->RTO);
2011 		}
2012 		break;
2013 	case SCTP_TIMER_TYPE_RECV:
2014 		/*
2015 		 * Here we use the Delayed-Ack timer value from the inp
2016 		 * ususually about 200ms.
2017 		 */
2018 		if (stcb == NULL) {
2019 			return;
2020 		}
2021 		tmr = &stcb->asoc.dack_timer;
2022 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2023 		break;
2024 	case SCTP_TIMER_TYPE_SHUTDOWN:
2025 		/* Here we use the RTO of the destination. */
2026 		if ((stcb == NULL) || (net == NULL)) {
2027 			return;
2028 		}
2029 		if (net->RTO == 0) {
2030 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2031 		} else {
2032 			to_ticks = MSEC_TO_TICKS(net->RTO);
2033 		}
2034 		tmr = &net->rxt_timer;
2035 		break;
2036 	case SCTP_TIMER_TYPE_HEARTBEAT:
2037 		/*
2038 		 * the net is used here so that we can add in the RTO. Even
2039 		 * though we use a different timer. We also add the HB timer
2040 		 * PLUS a random jitter.
2041 		 */
2042 		if ((stcb == NULL) || (net == NULL)) {
2043 			return;
2044 		} else {
2045 			uint32_t rndval;
2046 			uint32_t jitter;
2047 
2048 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2049 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2050 				return;
2051 			}
2052 			if (net->RTO == 0) {
2053 				to_ticks = stcb->asoc.initial_rto;
2054 			} else {
2055 				to_ticks = net->RTO;
2056 			}
2057 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2058 			jitter = rndval % to_ticks;
2059 			if (jitter >= (to_ticks >> 1)) {
2060 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2061 			} else {
2062 				to_ticks = to_ticks - jitter;
2063 			}
2064 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2065 			    !(net->dest_state & SCTP_ADDR_PF)) {
2066 				to_ticks += net->heart_beat_delay;
2067 			}
2068 			/*
2069 			 * Now we must convert the to_ticks that are now in
2070 			 * ms to ticks.
2071 			 */
2072 			to_ticks = MSEC_TO_TICKS(to_ticks);
2073 			tmr = &net->hb_timer;
2074 		}
2075 		break;
2076 	case SCTP_TIMER_TYPE_COOKIE:
2077 		/*
2078 		 * Here we can use the RTO timer from the network since one
2079 		 * RTT was compelete. If a retran happened then we will be
2080 		 * using the RTO initial value.
2081 		 */
2082 		if ((stcb == NULL) || (net == NULL)) {
2083 			return;
2084 		}
2085 		if (net->RTO == 0) {
2086 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2087 		} else {
2088 			to_ticks = MSEC_TO_TICKS(net->RTO);
2089 		}
2090 		tmr = &net->rxt_timer;
2091 		break;
2092 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2093 		/*
2094 		 * nothing needed but the endpoint here ususually about 60
2095 		 * minutes.
2096 		 */
2097 		tmr = &inp->sctp_ep.signature_change;
2098 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2099 		break;
2100 	case SCTP_TIMER_TYPE_ASOCKILL:
2101 		if (stcb == NULL) {
2102 			return;
2103 		}
2104 		tmr = &stcb->asoc.strreset_timer;
2105 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2106 		break;
2107 	case SCTP_TIMER_TYPE_INPKILL:
2108 		/*
2109 		 * The inp is setup to die. We re-use the signature_chage
2110 		 * timer since that has stopped and we are in the GONE
2111 		 * state.
2112 		 */
2113 		tmr = &inp->sctp_ep.signature_change;
2114 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2115 		break;
2116 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2117 		/*
2118 		 * Here we use the value found in the EP for PMTU ususually
2119 		 * about 10 minutes.
2120 		 */
2121 		if ((stcb == NULL) || (net == NULL)) {
2122 			return;
2123 		}
2124 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2125 			return;
2126 		}
2127 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2128 		tmr = &net->pmtu_timer;
2129 		break;
2130 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2131 		/* Here we use the RTO of the destination */
2132 		if ((stcb == NULL) || (net == NULL)) {
2133 			return;
2134 		}
2135 		if (net->RTO == 0) {
2136 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2137 		} else {
2138 			to_ticks = MSEC_TO_TICKS(net->RTO);
2139 		}
2140 		tmr = &net->rxt_timer;
2141 		break;
2142 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2143 		/*
2144 		 * Here we use the endpoints shutdown guard timer usually
2145 		 * about 3 minutes.
2146 		 */
2147 		if (stcb == NULL) {
2148 			return;
2149 		}
2150 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2151 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2152 		} else {
2153 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2154 		}
2155 		tmr = &stcb->asoc.shut_guard_timer;
2156 		break;
2157 	case SCTP_TIMER_TYPE_STRRESET:
2158 		/*
2159 		 * Here the timer comes from the stcb but its value is from
2160 		 * the net's RTO.
2161 		 */
2162 		if ((stcb == NULL) || (net == NULL)) {
2163 			return;
2164 		}
2165 		if (net->RTO == 0) {
2166 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2167 		} else {
2168 			to_ticks = MSEC_TO_TICKS(net->RTO);
2169 		}
2170 		tmr = &stcb->asoc.strreset_timer;
2171 		break;
2172 	case SCTP_TIMER_TYPE_ASCONF:
2173 		/*
2174 		 * Here the timer comes from the stcb but its value is from
2175 		 * the net's RTO.
2176 		 */
2177 		if ((stcb == NULL) || (net == NULL)) {
2178 			return;
2179 		}
2180 		if (net->RTO == 0) {
2181 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2182 		} else {
2183 			to_ticks = MSEC_TO_TICKS(net->RTO);
2184 		}
2185 		tmr = &stcb->asoc.asconf_timer;
2186 		break;
2187 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2188 		if ((stcb == NULL) || (net != NULL)) {
2189 			return;
2190 		}
2191 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2192 		tmr = &stcb->asoc.delete_prim_timer;
2193 		break;
2194 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2195 		if (stcb == NULL) {
2196 			return;
2197 		}
2198 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2199 			/*
2200 			 * Really an error since stcb is NOT set to
2201 			 * autoclose
2202 			 */
2203 			return;
2204 		}
2205 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2206 		tmr = &stcb->asoc.autoclose_timer;
2207 		break;
2208 	default:
2209 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2210 		    __func__, t_type);
2211 		return;
2212 		break;
2213 	}
2214 	if ((to_ticks <= 0) || (tmr == NULL)) {
2215 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2216 		    __func__, t_type, to_ticks, (void *)tmr);
2217 		return;
2218 	}
2219 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2220 		/*
2221 		 * we do NOT allow you to have it already running. if it is
2222 		 * we leave the current one up unchanged
2223 		 */
2224 		return;
2225 	}
2226 	/* At this point we can proceed */
2227 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2228 		stcb->asoc.num_send_timers_up++;
2229 	}
2230 	tmr->stopped_from = 0;
2231 	tmr->type = t_type;
2232 	tmr->ep = (void *)inp;
2233 	tmr->tcb = (void *)stcb;
2234 	tmr->net = (void *)net;
2235 	tmr->self = (void *)tmr;
2236 	tmr->vnet = (void *)curvnet;
2237 	tmr->ticks = sctp_get_tick_count();
2238 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2239 	return;
2240 }
2241 
2242 void
2243 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2244     struct sctp_nets *net, uint32_t from)
2245 {
2246 	struct sctp_timer *tmr;
2247 
2248 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2249 	    (inp == NULL))
2250 		return;
2251 
2252 	tmr = NULL;
2253 	if (stcb) {
2254 		SCTP_TCB_LOCK_ASSERT(stcb);
2255 	}
2256 	switch (t_type) {
2257 	case SCTP_TIMER_TYPE_ZERO_COPY:
2258 		tmr = &inp->sctp_ep.zero_copy_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2261 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_ADDR_WQ:
2264 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2265 		break;
2266 	case SCTP_TIMER_TYPE_SEND:
2267 		if ((stcb == NULL) || (net == NULL)) {
2268 			return;
2269 		}
2270 		tmr = &net->rxt_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_INIT:
2273 		if ((stcb == NULL) || (net == NULL)) {
2274 			return;
2275 		}
2276 		tmr = &net->rxt_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_RECV:
2279 		if (stcb == NULL) {
2280 			return;
2281 		}
2282 		tmr = &stcb->asoc.dack_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_SHUTDOWN:
2285 		if ((stcb == NULL) || (net == NULL)) {
2286 			return;
2287 		}
2288 		tmr = &net->rxt_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_HEARTBEAT:
2291 		if ((stcb == NULL) || (net == NULL)) {
2292 			return;
2293 		}
2294 		tmr = &net->hb_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_COOKIE:
2297 		if ((stcb == NULL) || (net == NULL)) {
2298 			return;
2299 		}
2300 		tmr = &net->rxt_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2303 		/* nothing needed but the endpoint here */
2304 		tmr = &inp->sctp_ep.signature_change;
2305 		/*
2306 		 * We re-use the newcookie timer for the INP kill timer. We
2307 		 * must assure that we do not kill it by accident.
2308 		 */
2309 		break;
2310 	case SCTP_TIMER_TYPE_ASOCKILL:
2311 		/*
2312 		 * Stop the asoc kill timer.
2313 		 */
2314 		if (stcb == NULL) {
2315 			return;
2316 		}
2317 		tmr = &stcb->asoc.strreset_timer;
2318 		break;
2319 
2320 	case SCTP_TIMER_TYPE_INPKILL:
2321 		/*
2322 		 * The inp is setup to die. We re-use the signature_chage
2323 		 * timer since that has stopped and we are in the GONE
2324 		 * state.
2325 		 */
2326 		tmr = &inp->sctp_ep.signature_change;
2327 		break;
2328 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2329 		if ((stcb == NULL) || (net == NULL)) {
2330 			return;
2331 		}
2332 		tmr = &net->pmtu_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2335 		if ((stcb == NULL) || (net == NULL)) {
2336 			return;
2337 		}
2338 		tmr = &net->rxt_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2341 		if (stcb == NULL) {
2342 			return;
2343 		}
2344 		tmr = &stcb->asoc.shut_guard_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_STRRESET:
2347 		if (stcb == NULL) {
2348 			return;
2349 		}
2350 		tmr = &stcb->asoc.strreset_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_ASCONF:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.asconf_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.delete_prim_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2365 		if (stcb == NULL) {
2366 			return;
2367 		}
2368 		tmr = &stcb->asoc.autoclose_timer;
2369 		break;
2370 	default:
2371 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2372 		    __func__, t_type);
2373 		break;
2374 	}
2375 	if (tmr == NULL) {
2376 		return;
2377 	}
2378 	if ((tmr->type != t_type) && tmr->type) {
2379 		/*
2380 		 * Ok we have a timer that is under joint use. Cookie timer
2381 		 * per chance with the SEND timer. We therefore are NOT
2382 		 * running the timer that the caller wants stopped.  So just
2383 		 * return.
2384 		 */
2385 		return;
2386 	}
2387 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2388 		stcb->asoc.num_send_timers_up--;
2389 		if (stcb->asoc.num_send_timers_up < 0) {
2390 			stcb->asoc.num_send_timers_up = 0;
2391 		}
2392 	}
2393 	tmr->self = NULL;
2394 	tmr->stopped_from = from;
2395 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2396 	return;
2397 }
2398 
2399 uint32_t
2400 sctp_calculate_len(struct mbuf *m)
2401 {
2402 	uint32_t tlen = 0;
2403 	struct mbuf *at;
2404 
2405 	at = m;
2406 	while (at) {
2407 		tlen += SCTP_BUF_LEN(at);
2408 		at = SCTP_BUF_NEXT(at);
2409 	}
2410 	return (tlen);
2411 }
2412 
2413 void
2414 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2415     struct sctp_association *asoc, uint32_t mtu)
2416 {
2417 	/*
2418 	 * Reset the P-MTU size on this association, this involves changing
2419 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2420 	 * allow the DF flag to be cleared.
2421 	 */
2422 	struct sctp_tmit_chunk *chk;
2423 	unsigned int eff_mtu, ovh;
2424 
2425 	asoc->smallest_mtu = mtu;
2426 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2427 		ovh = SCTP_MIN_OVERHEAD;
2428 	} else {
2429 		ovh = SCTP_MIN_V4_OVERHEAD;
2430 	}
2431 	eff_mtu = mtu - ovh;
2432 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2433 		if (chk->send_size > eff_mtu) {
2434 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2435 		}
2436 	}
2437 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2438 		if (chk->send_size > eff_mtu) {
2439 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2440 		}
2441 	}
2442 }
2443 
2444 
2445 /*
2446  * given an association and starting time of the current RTT period return
2447  * RTO in number of msecs net should point to the current network
2448  */
2449 
2450 uint32_t
2451 sctp_calculate_rto(struct sctp_tcb *stcb,
2452     struct sctp_association *asoc,
2453     struct sctp_nets *net,
2454     struct timeval *told,
2455     int safe, int rtt_from_sack)
2456 {
2457 	/*-
2458 	 * given an association and the starting time of the current RTT
2459 	 * period (in value1/value2) return RTO in number of msecs.
2460 	 */
2461 	int32_t rtt;		/* RTT in ms */
2462 	uint32_t new_rto;
2463 	int first_measure = 0;
2464 	struct timeval now, then, *old;
2465 
2466 	/* Copy it out for sparc64 */
2467 	if (safe == sctp_align_unsafe_makecopy) {
2468 		old = &then;
2469 		memcpy(&then, told, sizeof(struct timeval));
2470 	} else if (safe == sctp_align_safe_nocopy) {
2471 		old = told;
2472 	} else {
2473 		/* error */
2474 		SCTP_PRINTF("Huh, bad rto calc call\n");
2475 		return (0);
2476 	}
2477 	/************************/
2478 	/* 1. calculate new RTT */
2479 	/************************/
2480 	/* get the current time */
2481 	if (stcb->asoc.use_precise_time) {
2482 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2483 	} else {
2484 		(void)SCTP_GETTIME_TIMEVAL(&now);
2485 	}
2486 	timevalsub(&now, old);
2487 	/* store the current RTT in us */
2488 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2489 	        (uint64_t) now.tv_usec;
2490 
2491 	/* compute rtt in ms */
2492 	rtt = (int32_t) (net->rtt / 1000);
2493 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2494 		/*
2495 		 * Tell the CC module that a new update has just occurred
2496 		 * from a sack
2497 		 */
2498 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2499 	}
2500 	/*
2501 	 * Do we need to determine the lan? We do this only on sacks i.e.
2502 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2503 	 */
2504 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2505 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2506 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2507 			net->lan_type = SCTP_LAN_INTERNET;
2508 		} else {
2509 			net->lan_type = SCTP_LAN_LOCAL;
2510 		}
2511 	}
2512 	/***************************/
2513 	/* 2. update RTTVAR & SRTT */
2514 	/***************************/
2515 	/*-
2516 	 * Compute the scaled average lastsa and the
2517 	 * scaled variance lastsv as described in van Jacobson
2518 	 * Paper "Congestion Avoidance and Control", Annex A.
2519 	 *
2520 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2521 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2522 	 */
2523 	if (net->RTO_measured) {
2524 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2525 		net->lastsa += rtt;
2526 		if (rtt < 0) {
2527 			rtt = -rtt;
2528 		}
2529 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2530 		net->lastsv += rtt;
2531 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2532 			rto_logging(net, SCTP_LOG_RTTVAR);
2533 		}
2534 	} else {
2535 		/* First RTO measurment */
2536 		net->RTO_measured = 1;
2537 		first_measure = 1;
2538 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2539 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2540 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2541 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2542 		}
2543 	}
2544 	if (net->lastsv == 0) {
2545 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2546 	}
2547 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2548 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2549 	    (stcb->asoc.sat_network_lockout == 0)) {
2550 		stcb->asoc.sat_network = 1;
2551 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2552 		stcb->asoc.sat_network = 0;
2553 		stcb->asoc.sat_network_lockout = 1;
2554 	}
2555 	/* bound it, per C6/C7 in Section 5.3.1 */
2556 	if (new_rto < stcb->asoc.minrto) {
2557 		new_rto = stcb->asoc.minrto;
2558 	}
2559 	if (new_rto > stcb->asoc.maxrto) {
2560 		new_rto = stcb->asoc.maxrto;
2561 	}
2562 	/* we are now returning the RTO */
2563 	return (new_rto);
2564 }
2565 
2566 /*
2567  * return a pointer to a contiguous piece of data from the given mbuf chain
2568  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2569  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2570  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2571  */
2572 caddr_t
2573 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2574 {
2575 	uint32_t count;
2576 	uint8_t *ptr;
2577 
2578 	ptr = in_ptr;
2579 	if ((off < 0) || (len <= 0))
2580 		return (NULL);
2581 
2582 	/* find the desired start location */
2583 	while ((m != NULL) && (off > 0)) {
2584 		if (off < SCTP_BUF_LEN(m))
2585 			break;
2586 		off -= SCTP_BUF_LEN(m);
2587 		m = SCTP_BUF_NEXT(m);
2588 	}
2589 	if (m == NULL)
2590 		return (NULL);
2591 
2592 	/* is the current mbuf large enough (eg. contiguous)? */
2593 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2594 		return (mtod(m, caddr_t)+off);
2595 	} else {
2596 		/* else, it spans more than one mbuf, so save a temp copy... */
2597 		while ((m != NULL) && (len > 0)) {
2598 			count = min(SCTP_BUF_LEN(m) - off, len);
2599 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2600 			len -= count;
2601 			ptr += count;
2602 			off = 0;
2603 			m = SCTP_BUF_NEXT(m);
2604 		}
2605 		if ((m == NULL) && (len > 0))
2606 			return (NULL);
2607 		else
2608 			return ((caddr_t)in_ptr);
2609 	}
2610 }
2611 
2612 
2613 
2614 struct sctp_paramhdr *
2615 sctp_get_next_param(struct mbuf *m,
2616     int offset,
2617     struct sctp_paramhdr *pull,
2618     int pull_limit)
2619 {
2620 	/* This just provides a typed signature to Peter's Pull routine */
2621 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2622 	    (uint8_t *) pull));
2623 }
2624 
2625 
2626 struct mbuf *
2627 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2628 {
2629 	struct mbuf *m_last;
2630 	caddr_t dp;
2631 
2632 	if (padlen > 3) {
2633 		return (NULL);
2634 	}
2635 	if (padlen <= M_TRAILINGSPACE(m)) {
2636 		/*
2637 		 * The easy way. We hope the majority of the time we hit
2638 		 * here :)
2639 		 */
2640 		m_last = m;
2641 	} else {
2642 		/* Hard way we must grow the mbuf chain */
2643 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2644 		if (m_last == NULL) {
2645 			return (NULL);
2646 		}
2647 		SCTP_BUF_LEN(m_last) = 0;
2648 		SCTP_BUF_NEXT(m_last) = NULL;
2649 		SCTP_BUF_NEXT(m) = m_last;
2650 	}
2651 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2652 	SCTP_BUF_LEN(m_last) += padlen;
2653 	memset(dp, 0, padlen);
2654 	return (m_last);
2655 }
2656 
2657 struct mbuf *
2658 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2659 {
2660 	/* find the last mbuf in chain and pad it */
2661 	struct mbuf *m_at;
2662 
2663 	if (last_mbuf != NULL) {
2664 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2665 	} else {
2666 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2667 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2668 				return (sctp_add_pad_tombuf(m_at, padval));
2669 			}
2670 		}
2671 	}
2672 	return (NULL);
2673 }
2674 
2675 static void
2676 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2677     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2678 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2679     SCTP_UNUSED
2680 #endif
2681 )
2682 {
2683 	struct mbuf *m_notify;
2684 	struct sctp_assoc_change *sac;
2685 	struct sctp_queued_to_read *control;
2686 	unsigned int notif_len;
2687 	uint16_t abort_len;
2688 	unsigned int i;
2689 
2690 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2691 	struct socket *so;
2692 
2693 #endif
2694 
2695 	if (stcb == NULL) {
2696 		return;
2697 	}
2698 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2699 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2700 		if (abort != NULL) {
2701 			abort_len = ntohs(abort->ch.chunk_length);
2702 		} else {
2703 			abort_len = 0;
2704 		}
2705 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2706 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2707 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2708 			notif_len += abort_len;
2709 		}
2710 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2711 		if (m_notify == NULL) {
2712 			/* Retry with smaller value. */
2713 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2714 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2715 			if (m_notify == NULL) {
2716 				goto set_error;
2717 			}
2718 		}
2719 		SCTP_BUF_NEXT(m_notify) = NULL;
2720 		sac = mtod(m_notify, struct sctp_assoc_change *);
2721 		memset(sac, 0, notif_len);
2722 		sac->sac_type = SCTP_ASSOC_CHANGE;
2723 		sac->sac_flags = 0;
2724 		sac->sac_length = sizeof(struct sctp_assoc_change);
2725 		sac->sac_state = state;
2726 		sac->sac_error = error;
2727 		/* XXX verify these stream counts */
2728 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2729 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2730 		sac->sac_assoc_id = sctp_get_associd(stcb);
2731 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2732 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2733 				i = 0;
2734 				if (stcb->asoc.prsctp_supported == 1) {
2735 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2736 				}
2737 				if (stcb->asoc.auth_supported == 1) {
2738 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2739 				}
2740 				if (stcb->asoc.asconf_supported == 1) {
2741 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2742 				}
2743 				if (stcb->asoc.idata_supported == 1) {
2744 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2745 				}
2746 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2747 				if (stcb->asoc.reconfig_supported == 1) {
2748 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2749 				}
2750 				sac->sac_length += i;
2751 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2752 				memcpy(sac->sac_info, abort, abort_len);
2753 				sac->sac_length += abort_len;
2754 			}
2755 		}
2756 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2757 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2758 		    0, 0, stcb->asoc.context, 0, 0, 0,
2759 		    m_notify);
2760 		if (control != NULL) {
2761 			control->length = SCTP_BUF_LEN(m_notify);
2762 			/* not that we need this */
2763 			control->tail_mbuf = m_notify;
2764 			control->spec_flags = M_NOTIFICATION;
2765 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2766 			    control,
2767 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2768 			    so_locked);
2769 		} else {
2770 			sctp_m_freem(m_notify);
2771 		}
2772 	}
2773 	/*
2774 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2775 	 * comes in.
2776 	 */
2777 set_error:
2778 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2779 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2780 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2781 		SOCK_LOCK(stcb->sctp_socket);
2782 		if (from_peer) {
2783 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2784 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2785 				stcb->sctp_socket->so_error = ECONNREFUSED;
2786 			} else {
2787 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2788 				stcb->sctp_socket->so_error = ECONNRESET;
2789 			}
2790 		} else {
2791 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2792 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2793 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2794 				stcb->sctp_socket->so_error = ETIMEDOUT;
2795 			} else {
2796 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2797 				stcb->sctp_socket->so_error = ECONNABORTED;
2798 			}
2799 		}
2800 	}
2801 	/* Wake ANY sleepers */
2802 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2803 	so = SCTP_INP_SO(stcb->sctp_ep);
2804 	if (!so_locked) {
2805 		atomic_add_int(&stcb->asoc.refcnt, 1);
2806 		SCTP_TCB_UNLOCK(stcb);
2807 		SCTP_SOCKET_LOCK(so, 1);
2808 		SCTP_TCB_LOCK(stcb);
2809 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2810 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2811 			SCTP_SOCKET_UNLOCK(so, 1);
2812 			return;
2813 		}
2814 	}
2815 #endif
2816 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2817 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2818 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2819 		socantrcvmore_locked(stcb->sctp_socket);
2820 	}
2821 	sorwakeup(stcb->sctp_socket);
2822 	sowwakeup(stcb->sctp_socket);
2823 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2824 	if (!so_locked) {
2825 		SCTP_SOCKET_UNLOCK(so, 1);
2826 	}
2827 #endif
2828 }
2829 
2830 static void
2831 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2832     struct sockaddr *sa, uint32_t error, int so_locked
2833 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2834     SCTP_UNUSED
2835 #endif
2836 )
2837 {
2838 	struct mbuf *m_notify;
2839 	struct sctp_paddr_change *spc;
2840 	struct sctp_queued_to_read *control;
2841 
2842 	if ((stcb == NULL) ||
2843 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2844 		/* event not enabled */
2845 		return;
2846 	}
2847 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2848 	if (m_notify == NULL)
2849 		return;
2850 	SCTP_BUF_LEN(m_notify) = 0;
2851 	spc = mtod(m_notify, struct sctp_paddr_change *);
2852 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2853 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2854 	spc->spc_flags = 0;
2855 	spc->spc_length = sizeof(struct sctp_paddr_change);
2856 	switch (sa->sa_family) {
2857 #ifdef INET
2858 	case AF_INET:
2859 #ifdef INET6
2860 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2861 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2862 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2863 		} else {
2864 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2865 		}
2866 #else
2867 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2868 #endif
2869 		break;
2870 #endif
2871 #ifdef INET6
2872 	case AF_INET6:
2873 		{
2874 			struct sockaddr_in6 *sin6;
2875 
2876 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2877 
2878 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2879 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2880 				if (sin6->sin6_scope_id == 0) {
2881 					/* recover scope_id for user */
2882 					(void)sa6_recoverscope(sin6);
2883 				} else {
2884 					/* clear embedded scope_id for user */
2885 					in6_clearscope(&sin6->sin6_addr);
2886 				}
2887 			}
2888 			break;
2889 		}
2890 #endif
2891 	default:
2892 		/* TSNH */
2893 		break;
2894 	}
2895 	spc->spc_state = state;
2896 	spc->spc_error = error;
2897 	spc->spc_assoc_id = sctp_get_associd(stcb);
2898 
2899 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2900 	SCTP_BUF_NEXT(m_notify) = NULL;
2901 
2902 	/* append to socket */
2903 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2904 	    0, 0, stcb->asoc.context, 0, 0, 0,
2905 	    m_notify);
2906 	if (control == NULL) {
2907 		/* no memory */
2908 		sctp_m_freem(m_notify);
2909 		return;
2910 	}
2911 	control->length = SCTP_BUF_LEN(m_notify);
2912 	control->spec_flags = M_NOTIFICATION;
2913 	/* not that we need this */
2914 	control->tail_mbuf = m_notify;
2915 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2916 	    control,
2917 	    &stcb->sctp_socket->so_rcv, 1,
2918 	    SCTP_READ_LOCK_NOT_HELD,
2919 	    so_locked);
2920 }
2921 
2922 
2923 static void
2924 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2925     struct sctp_tmit_chunk *chk, int so_locked
2926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2927     SCTP_UNUSED
2928 #endif
2929 )
2930 {
2931 	struct mbuf *m_notify;
2932 	struct sctp_send_failed *ssf;
2933 	struct sctp_send_failed_event *ssfe;
2934 	struct sctp_queued_to_read *control;
2935 	struct sctp_chunkhdr *chkhdr;
2936 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2937 
2938 	if ((stcb == NULL) ||
2939 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2940 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2941 		/* event not enabled */
2942 		return;
2943 	}
2944 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2945 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2946 	} else {
2947 		notifhdr_len = sizeof(struct sctp_send_failed);
2948 	}
2949 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2950 	if (m_notify == NULL)
2951 		/* no space left */
2952 		return;
2953 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2954 	if (stcb->asoc.idata_supported) {
2955 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2956 	} else {
2957 		chkhdr_len = sizeof(struct sctp_data_chunk);
2958 	}
2959 	/* Use some defaults in case we can't access the chunk header */
2960 	if (chk->send_size >= chkhdr_len) {
2961 		payload_len = chk->send_size - chkhdr_len;
2962 	} else {
2963 		payload_len = 0;
2964 	}
2965 	padding_len = 0;
2966 	if (chk->data != NULL) {
2967 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2968 		if (chkhdr != NULL) {
2969 			chk_len = ntohs(chkhdr->chunk_length);
2970 			if ((chk_len >= chkhdr_len) &&
2971 			    (chk->send_size >= chk_len) &&
2972 			    (chk->send_size - chk_len < 4)) {
2973 				padding_len = chk->send_size - chk_len;
2974 				payload_len = chk->send_size - chkhdr_len - padding_len;
2975 			}
2976 		}
2977 	}
2978 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2979 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2980 		memset(ssfe, 0, notifhdr_len);
2981 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2982 		if (sent) {
2983 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2984 		} else {
2985 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2986 		}
2987 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + payload_len);
2988 		ssfe->ssfe_error = error;
2989 		/* not exactly what the user sent in, but should be close :) */
2990 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2991 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2992 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2993 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2994 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2995 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2996 	} else {
2997 		ssf = mtod(m_notify, struct sctp_send_failed *);
2998 		memset(ssf, 0, notifhdr_len);
2999 		ssf->ssf_type = SCTP_SEND_FAILED;
3000 		if (sent) {
3001 			ssf->ssf_flags = SCTP_DATA_SENT;
3002 		} else {
3003 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3004 		}
3005 		ssf->ssf_length = (uint32_t) (notifhdr_len + payload_len);
3006 		ssf->ssf_error = error;
3007 		/* not exactly what the user sent in, but should be close :) */
3008 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3009 		ssf->ssf_info.sinfo_ssn = (uint16_t) chk->rec.data.stream_seq;
3010 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3011 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3012 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3013 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3014 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3015 	}
3016 	if (chk->data != NULL) {
3017 		/* Trim off the sctp chunk header (it should be there) */
3018 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3019 			m_adj(chk->data, chkhdr_len);
3020 			m_adj(chk->data, -padding_len);
3021 			sctp_mbuf_crush(chk->data);
3022 			chk->send_size -= (chkhdr_len + padding_len);
3023 		}
3024 	}
3025 	SCTP_BUF_NEXT(m_notify) = chk->data;
3026 	/* Steal off the mbuf */
3027 	chk->data = NULL;
3028 	/*
3029 	 * For this case, we check the actual socket buffer, since the assoc
3030 	 * is going away we don't want to overfill the socket buffer for a
3031 	 * non-reader
3032 	 */
3033 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3034 		sctp_m_freem(m_notify);
3035 		return;
3036 	}
3037 	/* append to socket */
3038 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3039 	    0, 0, stcb->asoc.context, 0, 0, 0,
3040 	    m_notify);
3041 	if (control == NULL) {
3042 		/* no memory */
3043 		sctp_m_freem(m_notify);
3044 		return;
3045 	}
3046 	control->spec_flags = M_NOTIFICATION;
3047 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3048 	    control,
3049 	    &stcb->sctp_socket->so_rcv, 1,
3050 	    SCTP_READ_LOCK_NOT_HELD,
3051 	    so_locked);
3052 }
3053 
3054 
3055 static void
3056 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3057     struct sctp_stream_queue_pending *sp, int so_locked
3058 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3059     SCTP_UNUSED
3060 #endif
3061 )
3062 {
3063 	struct mbuf *m_notify;
3064 	struct sctp_send_failed *ssf;
3065 	struct sctp_send_failed_event *ssfe;
3066 	struct sctp_queued_to_read *control;
3067 	int notifhdr_len;
3068 
3069 	if ((stcb == NULL) ||
3070 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3071 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3072 		/* event not enabled */
3073 		return;
3074 	}
3075 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3076 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3077 	} else {
3078 		notifhdr_len = sizeof(struct sctp_send_failed);
3079 	}
3080 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3081 	if (m_notify == NULL) {
3082 		/* no space left */
3083 		return;
3084 	}
3085 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3086 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3087 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3088 		memset(ssfe, 0, notifhdr_len);
3089 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3090 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3091 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + sp->length);
3092 		ssfe->ssfe_error = error;
3093 		/* not exactly what the user sent in, but should be close :) */
3094 		ssfe->ssfe_info.snd_sid = sp->stream;
3095 		if (sp->some_taken) {
3096 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3097 		} else {
3098 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3099 		}
3100 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3101 		ssfe->ssfe_info.snd_context = sp->context;
3102 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3103 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3104 	} else {
3105 		ssf = mtod(m_notify, struct sctp_send_failed *);
3106 		memset(ssf, 0, notifhdr_len);
3107 		ssf->ssf_type = SCTP_SEND_FAILED;
3108 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3109 		ssf->ssf_length = (uint32_t) (notifhdr_len + sp->length);
3110 		ssf->ssf_error = error;
3111 		/* not exactly what the user sent in, but should be close :) */
3112 		ssf->ssf_info.sinfo_stream = sp->stream;
3113 		ssf->ssf_info.sinfo_ssn = 0;
3114 		if (sp->some_taken) {
3115 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3116 		} else {
3117 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3118 		}
3119 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3120 		ssf->ssf_info.sinfo_context = sp->context;
3121 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3122 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3123 	}
3124 	SCTP_BUF_NEXT(m_notify) = sp->data;
3125 
3126 	/* Steal off the mbuf */
3127 	sp->data = NULL;
3128 	/*
3129 	 * For this case, we check the actual socket buffer, since the assoc
3130 	 * is going away we don't want to overfill the socket buffer for a
3131 	 * non-reader
3132 	 */
3133 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3134 		sctp_m_freem(m_notify);
3135 		return;
3136 	}
3137 	/* append to socket */
3138 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3139 	    0, 0, stcb->asoc.context, 0, 0, 0,
3140 	    m_notify);
3141 	if (control == NULL) {
3142 		/* no memory */
3143 		sctp_m_freem(m_notify);
3144 		return;
3145 	}
3146 	control->spec_flags = M_NOTIFICATION;
3147 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3148 	    control,
3149 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3150 }
3151 
3152 
3153 
3154 static void
3155 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3156 {
3157 	struct mbuf *m_notify;
3158 	struct sctp_adaptation_event *sai;
3159 	struct sctp_queued_to_read *control;
3160 
3161 	if ((stcb == NULL) ||
3162 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3163 		/* event not enabled */
3164 		return;
3165 	}
3166 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3167 	if (m_notify == NULL)
3168 		/* no space left */
3169 		return;
3170 	SCTP_BUF_LEN(m_notify) = 0;
3171 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3172 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3173 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3174 	sai->sai_flags = 0;
3175 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3176 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3177 	sai->sai_assoc_id = sctp_get_associd(stcb);
3178 
3179 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3180 	SCTP_BUF_NEXT(m_notify) = NULL;
3181 
3182 	/* append to socket */
3183 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3184 	    0, 0, stcb->asoc.context, 0, 0, 0,
3185 	    m_notify);
3186 	if (control == NULL) {
3187 		/* no memory */
3188 		sctp_m_freem(m_notify);
3189 		return;
3190 	}
3191 	control->length = SCTP_BUF_LEN(m_notify);
3192 	control->spec_flags = M_NOTIFICATION;
3193 	/* not that we need this */
3194 	control->tail_mbuf = m_notify;
3195 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3196 	    control,
3197 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3198 }
3199 
3200 /* This always must be called with the read-queue LOCKED in the INP */
3201 static void
3202 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3203     uint32_t val, int so_locked
3204 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3205     SCTP_UNUSED
3206 #endif
3207 )
3208 {
3209 	struct mbuf *m_notify;
3210 	struct sctp_pdapi_event *pdapi;
3211 	struct sctp_queued_to_read *control;
3212 	struct sockbuf *sb;
3213 
3214 	if ((stcb == NULL) ||
3215 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3216 		/* event not enabled */
3217 		return;
3218 	}
3219 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3220 		return;
3221 	}
3222 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3223 	if (m_notify == NULL)
3224 		/* no space left */
3225 		return;
3226 	SCTP_BUF_LEN(m_notify) = 0;
3227 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3228 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3229 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3230 	pdapi->pdapi_flags = 0;
3231 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3232 	pdapi->pdapi_indication = error;
3233 	pdapi->pdapi_stream = (val >> 16);
3234 	pdapi->pdapi_seq = (val & 0x0000ffff);
3235 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3236 
3237 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3238 	SCTP_BUF_NEXT(m_notify) = NULL;
3239 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3240 	    0, 0, stcb->asoc.context, 0, 0, 0,
3241 	    m_notify);
3242 	if (control == NULL) {
3243 		/* no memory */
3244 		sctp_m_freem(m_notify);
3245 		return;
3246 	}
3247 	control->spec_flags = M_NOTIFICATION;
3248 	control->length = SCTP_BUF_LEN(m_notify);
3249 	/* not that we need this */
3250 	control->tail_mbuf = m_notify;
3251 	control->held_length = 0;
3252 	control->length = 0;
3253 	sb = &stcb->sctp_socket->so_rcv;
3254 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3255 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3256 	}
3257 	sctp_sballoc(stcb, sb, m_notify);
3258 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3259 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3260 	}
3261 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3262 	control->end_added = 1;
3263 	if (stcb->asoc.control_pdapi)
3264 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3265 	else {
3266 		/* we really should not see this case */
3267 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3268 	}
3269 	if (stcb->sctp_ep && stcb->sctp_socket) {
3270 		/* This should always be the case */
3271 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3272 		struct socket *so;
3273 
3274 		so = SCTP_INP_SO(stcb->sctp_ep);
3275 		if (!so_locked) {
3276 			atomic_add_int(&stcb->asoc.refcnt, 1);
3277 			SCTP_TCB_UNLOCK(stcb);
3278 			SCTP_SOCKET_LOCK(so, 1);
3279 			SCTP_TCB_LOCK(stcb);
3280 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3281 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3282 				SCTP_SOCKET_UNLOCK(so, 1);
3283 				return;
3284 			}
3285 		}
3286 #endif
3287 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3288 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3289 		if (!so_locked) {
3290 			SCTP_SOCKET_UNLOCK(so, 1);
3291 		}
3292 #endif
3293 	}
3294 }
3295 
3296 static void
3297 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3298 {
3299 	struct mbuf *m_notify;
3300 	struct sctp_shutdown_event *sse;
3301 	struct sctp_queued_to_read *control;
3302 
3303 	/*
3304 	 * For TCP model AND UDP connected sockets we will send an error up
3305 	 * when an SHUTDOWN completes
3306 	 */
3307 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3308 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3309 		/* mark socket closed for read/write and wakeup! */
3310 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3311 		struct socket *so;
3312 
3313 		so = SCTP_INP_SO(stcb->sctp_ep);
3314 		atomic_add_int(&stcb->asoc.refcnt, 1);
3315 		SCTP_TCB_UNLOCK(stcb);
3316 		SCTP_SOCKET_LOCK(so, 1);
3317 		SCTP_TCB_LOCK(stcb);
3318 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3319 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3320 			SCTP_SOCKET_UNLOCK(so, 1);
3321 			return;
3322 		}
3323 #endif
3324 		socantsendmore(stcb->sctp_socket);
3325 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3326 		SCTP_SOCKET_UNLOCK(so, 1);
3327 #endif
3328 	}
3329 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3330 		/* event not enabled */
3331 		return;
3332 	}
3333 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3334 	if (m_notify == NULL)
3335 		/* no space left */
3336 		return;
3337 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3338 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3339 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3340 	sse->sse_flags = 0;
3341 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3342 	sse->sse_assoc_id = sctp_get_associd(stcb);
3343 
3344 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3345 	SCTP_BUF_NEXT(m_notify) = NULL;
3346 
3347 	/* append to socket */
3348 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3349 	    0, 0, stcb->asoc.context, 0, 0, 0,
3350 	    m_notify);
3351 	if (control == NULL) {
3352 		/* no memory */
3353 		sctp_m_freem(m_notify);
3354 		return;
3355 	}
3356 	control->spec_flags = M_NOTIFICATION;
3357 	control->length = SCTP_BUF_LEN(m_notify);
3358 	/* not that we need this */
3359 	control->tail_mbuf = m_notify;
3360 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3361 	    control,
3362 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3363 }
3364 
3365 static void
3366 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3367     int so_locked
3368 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3369     SCTP_UNUSED
3370 #endif
3371 )
3372 {
3373 	struct mbuf *m_notify;
3374 	struct sctp_sender_dry_event *event;
3375 	struct sctp_queued_to_read *control;
3376 
3377 	if ((stcb == NULL) ||
3378 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3379 		/* event not enabled */
3380 		return;
3381 	}
3382 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3383 	if (m_notify == NULL) {
3384 		/* no space left */
3385 		return;
3386 	}
3387 	SCTP_BUF_LEN(m_notify) = 0;
3388 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3389 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3390 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3391 	event->sender_dry_flags = 0;
3392 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3393 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3394 
3395 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3396 	SCTP_BUF_NEXT(m_notify) = NULL;
3397 
3398 	/* append to socket */
3399 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3400 	    0, 0, stcb->asoc.context, 0, 0, 0,
3401 	    m_notify);
3402 	if (control == NULL) {
3403 		/* no memory */
3404 		sctp_m_freem(m_notify);
3405 		return;
3406 	}
3407 	control->length = SCTP_BUF_LEN(m_notify);
3408 	control->spec_flags = M_NOTIFICATION;
3409 	/* not that we need this */
3410 	control->tail_mbuf = m_notify;
3411 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3412 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3413 }
3414 
3415 
3416 void
3417 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3418 {
3419 	struct mbuf *m_notify;
3420 	struct sctp_queued_to_read *control;
3421 	struct sctp_stream_change_event *stradd;
3422 
3423 	if ((stcb == NULL) ||
3424 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3425 		/* event not enabled */
3426 		return;
3427 	}
3428 	if ((stcb->asoc.peer_req_out) && flag) {
3429 		/* Peer made the request, don't tell the local user */
3430 		stcb->asoc.peer_req_out = 0;
3431 		return;
3432 	}
3433 	stcb->asoc.peer_req_out = 0;
3434 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3435 	if (m_notify == NULL)
3436 		/* no space left */
3437 		return;
3438 	SCTP_BUF_LEN(m_notify) = 0;
3439 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3440 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3441 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3442 	stradd->strchange_flags = flag;
3443 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3444 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3445 	stradd->strchange_instrms = numberin;
3446 	stradd->strchange_outstrms = numberout;
3447 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3448 	SCTP_BUF_NEXT(m_notify) = NULL;
3449 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3450 		/* no space */
3451 		sctp_m_freem(m_notify);
3452 		return;
3453 	}
3454 	/* append to socket */
3455 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3456 	    0, 0, stcb->asoc.context, 0, 0, 0,
3457 	    m_notify);
3458 	if (control == NULL) {
3459 		/* no memory */
3460 		sctp_m_freem(m_notify);
3461 		return;
3462 	}
3463 	control->spec_flags = M_NOTIFICATION;
3464 	control->length = SCTP_BUF_LEN(m_notify);
3465 	/* not that we need this */
3466 	control->tail_mbuf = m_notify;
3467 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3468 	    control,
3469 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3470 }
3471 
3472 void
3473 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3474 {
3475 	struct mbuf *m_notify;
3476 	struct sctp_queued_to_read *control;
3477 	struct sctp_assoc_reset_event *strasoc;
3478 
3479 	if ((stcb == NULL) ||
3480 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3481 		/* event not enabled */
3482 		return;
3483 	}
3484 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3485 	if (m_notify == NULL)
3486 		/* no space left */
3487 		return;
3488 	SCTP_BUF_LEN(m_notify) = 0;
3489 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3490 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3491 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3492 	strasoc->assocreset_flags = flag;
3493 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3494 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3495 	strasoc->assocreset_local_tsn = sending_tsn;
3496 	strasoc->assocreset_remote_tsn = recv_tsn;
3497 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3498 	SCTP_BUF_NEXT(m_notify) = NULL;
3499 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3500 		/* no space */
3501 		sctp_m_freem(m_notify);
3502 		return;
3503 	}
3504 	/* append to socket */
3505 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3506 	    0, 0, stcb->asoc.context, 0, 0, 0,
3507 	    m_notify);
3508 	if (control == NULL) {
3509 		/* no memory */
3510 		sctp_m_freem(m_notify);
3511 		return;
3512 	}
3513 	control->spec_flags = M_NOTIFICATION;
3514 	control->length = SCTP_BUF_LEN(m_notify);
3515 	/* not that we need this */
3516 	control->tail_mbuf = m_notify;
3517 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3518 	    control,
3519 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3520 }
3521 
3522 
3523 
3524 static void
3525 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3526     int number_entries, uint16_t * list, int flag)
3527 {
3528 	struct mbuf *m_notify;
3529 	struct sctp_queued_to_read *control;
3530 	struct sctp_stream_reset_event *strreset;
3531 	int len;
3532 
3533 	if ((stcb == NULL) ||
3534 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3535 		/* event not enabled */
3536 		return;
3537 	}
3538 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3539 	if (m_notify == NULL)
3540 		/* no space left */
3541 		return;
3542 	SCTP_BUF_LEN(m_notify) = 0;
3543 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3544 	if (len > M_TRAILINGSPACE(m_notify)) {
3545 		/* never enough room */
3546 		sctp_m_freem(m_notify);
3547 		return;
3548 	}
3549 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3550 	memset(strreset, 0, len);
3551 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3552 	strreset->strreset_flags = flag;
3553 	strreset->strreset_length = len;
3554 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3555 	if (number_entries) {
3556 		int i;
3557 
3558 		for (i = 0; i < number_entries; i++) {
3559 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3560 		}
3561 	}
3562 	SCTP_BUF_LEN(m_notify) = len;
3563 	SCTP_BUF_NEXT(m_notify) = NULL;
3564 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3565 		/* no space */
3566 		sctp_m_freem(m_notify);
3567 		return;
3568 	}
3569 	/* append to socket */
3570 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3571 	    0, 0, stcb->asoc.context, 0, 0, 0,
3572 	    m_notify);
3573 	if (control == NULL) {
3574 		/* no memory */
3575 		sctp_m_freem(m_notify);
3576 		return;
3577 	}
3578 	control->spec_flags = M_NOTIFICATION;
3579 	control->length = SCTP_BUF_LEN(m_notify);
3580 	/* not that we need this */
3581 	control->tail_mbuf = m_notify;
3582 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3583 	    control,
3584 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3585 }
3586 
3587 
3588 static void
3589 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3590 {
3591 	struct mbuf *m_notify;
3592 	struct sctp_remote_error *sre;
3593 	struct sctp_queued_to_read *control;
3594 	unsigned int notif_len;
3595 	uint16_t chunk_len;
3596 
3597 	if ((stcb == NULL) ||
3598 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3599 		return;
3600 	}
3601 	if (chunk != NULL) {
3602 		chunk_len = ntohs(chunk->ch.chunk_length);
3603 	} else {
3604 		chunk_len = 0;
3605 	}
3606 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3607 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3608 	if (m_notify == NULL) {
3609 		/* Retry with smaller value. */
3610 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3611 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3612 		if (m_notify == NULL) {
3613 			return;
3614 		}
3615 	}
3616 	SCTP_BUF_NEXT(m_notify) = NULL;
3617 	sre = mtod(m_notify, struct sctp_remote_error *);
3618 	memset(sre, 0, notif_len);
3619 	sre->sre_type = SCTP_REMOTE_ERROR;
3620 	sre->sre_flags = 0;
3621 	sre->sre_length = sizeof(struct sctp_remote_error);
3622 	sre->sre_error = error;
3623 	sre->sre_assoc_id = sctp_get_associd(stcb);
3624 	if (notif_len > sizeof(struct sctp_remote_error)) {
3625 		memcpy(sre->sre_data, chunk, chunk_len);
3626 		sre->sre_length += chunk_len;
3627 	}
3628 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3629 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3630 	    0, 0, stcb->asoc.context, 0, 0, 0,
3631 	    m_notify);
3632 	if (control != NULL) {
3633 		control->length = SCTP_BUF_LEN(m_notify);
3634 		/* not that we need this */
3635 		control->tail_mbuf = m_notify;
3636 		control->spec_flags = M_NOTIFICATION;
3637 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3638 		    control,
3639 		    &stcb->sctp_socket->so_rcv, 1,
3640 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3641 	} else {
3642 		sctp_m_freem(m_notify);
3643 	}
3644 }
3645 
3646 
3647 void
3648 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3649     uint32_t error, void *data, int so_locked
3650 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3651     SCTP_UNUSED
3652 #endif
3653 )
3654 {
3655 	if ((stcb == NULL) ||
3656 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3657 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3658 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3659 		/* If the socket is gone we are out of here */
3660 		return;
3661 	}
3662 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3663 		return;
3664 	}
3665 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3666 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3667 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3668 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3669 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3670 			/* Don't report these in front states */
3671 			return;
3672 		}
3673 	}
3674 	switch (notification) {
3675 	case SCTP_NOTIFY_ASSOC_UP:
3676 		if (stcb->asoc.assoc_up_sent == 0) {
3677 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3678 			stcb->asoc.assoc_up_sent = 1;
3679 		}
3680 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3681 			sctp_notify_adaptation_layer(stcb);
3682 		}
3683 		if (stcb->asoc.auth_supported == 0) {
3684 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3685 			    NULL, so_locked);
3686 		}
3687 		break;
3688 	case SCTP_NOTIFY_ASSOC_DOWN:
3689 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3690 		break;
3691 	case SCTP_NOTIFY_INTERFACE_DOWN:
3692 		{
3693 			struct sctp_nets *net;
3694 
3695 			net = (struct sctp_nets *)data;
3696 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3697 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3698 			break;
3699 		}
3700 	case SCTP_NOTIFY_INTERFACE_UP:
3701 		{
3702 			struct sctp_nets *net;
3703 
3704 			net = (struct sctp_nets *)data;
3705 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3706 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3707 			break;
3708 		}
3709 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3710 		{
3711 			struct sctp_nets *net;
3712 
3713 			net = (struct sctp_nets *)data;
3714 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3715 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3716 			break;
3717 		}
3718 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3719 		sctp_notify_send_failed2(stcb, error,
3720 		    (struct sctp_stream_queue_pending *)data, so_locked);
3721 		break;
3722 	case SCTP_NOTIFY_SENT_DG_FAIL:
3723 		sctp_notify_send_failed(stcb, 1, error,
3724 		    (struct sctp_tmit_chunk *)data, so_locked);
3725 		break;
3726 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3727 		sctp_notify_send_failed(stcb, 0, error,
3728 		    (struct sctp_tmit_chunk *)data, so_locked);
3729 		break;
3730 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3731 		{
3732 			uint32_t val;
3733 
3734 			val = *((uint32_t *) data);
3735 
3736 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3737 			break;
3738 		}
3739 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3740 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3741 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3742 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3743 		} else {
3744 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3745 		}
3746 		break;
3747 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3748 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3749 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3750 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3751 		} else {
3752 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3753 		}
3754 		break;
3755 	case SCTP_NOTIFY_ASSOC_RESTART:
3756 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3757 		if (stcb->asoc.auth_supported == 0) {
3758 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3759 			    NULL, so_locked);
3760 		}
3761 		break;
3762 	case SCTP_NOTIFY_STR_RESET_SEND:
3763 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3764 		break;
3765 	case SCTP_NOTIFY_STR_RESET_RECV:
3766 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3767 		break;
3768 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3769 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3770 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3771 		break;
3772 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3773 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3774 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3775 		break;
3776 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3777 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3778 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3779 		break;
3780 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3781 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3782 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3783 		break;
3784 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3785 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3786 		    error, so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3789 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3790 		    error, so_locked);
3791 		break;
3792 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3793 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3794 		    error, so_locked);
3795 		break;
3796 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3797 		sctp_notify_shutdown_event(stcb);
3798 		break;
3799 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3800 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3801 		    (uint16_t) (uintptr_t) data,
3802 		    so_locked);
3803 		break;
3804 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3805 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3806 		    (uint16_t) (uintptr_t) data,
3807 		    so_locked);
3808 		break;
3809 	case SCTP_NOTIFY_NO_PEER_AUTH:
3810 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3811 		    (uint16_t) (uintptr_t) data,
3812 		    so_locked);
3813 		break;
3814 	case SCTP_NOTIFY_SENDER_DRY:
3815 		sctp_notify_sender_dry_event(stcb, so_locked);
3816 		break;
3817 	case SCTP_NOTIFY_REMOTE_ERROR:
3818 		sctp_notify_remote_error(stcb, error, data);
3819 		break;
3820 	default:
3821 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3822 		    __func__, notification, notification);
3823 		break;
3824 	}			/* end switch */
3825 }
3826 
3827 void
3828 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3829 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3830     SCTP_UNUSED
3831 #endif
3832 )
3833 {
3834 	struct sctp_association *asoc;
3835 	struct sctp_stream_out *outs;
3836 	struct sctp_tmit_chunk *chk, *nchk;
3837 	struct sctp_stream_queue_pending *sp, *nsp;
3838 	int i;
3839 
3840 	if (stcb == NULL) {
3841 		return;
3842 	}
3843 	asoc = &stcb->asoc;
3844 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3845 		/* already being freed */
3846 		return;
3847 	}
3848 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3849 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3850 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3851 		return;
3852 	}
3853 	/* now through all the gunk freeing chunks */
3854 	if (holds_lock == 0) {
3855 		SCTP_TCB_SEND_LOCK(stcb);
3856 	}
3857 	/* sent queue SHOULD be empty */
3858 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3859 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3860 		asoc->sent_queue_cnt--;
3861 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3862 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3863 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3864 #ifdef INVARIANTS
3865 			} else {
3866 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3867 #endif
3868 			}
3869 		}
3870 		if (chk->data != NULL) {
3871 			sctp_free_bufspace(stcb, asoc, chk, 1);
3872 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3873 			    error, chk, so_locked);
3874 			if (chk->data) {
3875 				sctp_m_freem(chk->data);
3876 				chk->data = NULL;
3877 			}
3878 		}
3879 		sctp_free_a_chunk(stcb, chk, so_locked);
3880 		/* sa_ignore FREED_MEMORY */
3881 	}
3882 	/* pending send queue SHOULD be empty */
3883 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3884 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3885 		asoc->send_queue_cnt--;
3886 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3887 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3888 #ifdef INVARIANTS
3889 		} else {
3890 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3891 #endif
3892 		}
3893 		if (chk->data != NULL) {
3894 			sctp_free_bufspace(stcb, asoc, chk, 1);
3895 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3896 			    error, chk, so_locked);
3897 			if (chk->data) {
3898 				sctp_m_freem(chk->data);
3899 				chk->data = NULL;
3900 			}
3901 		}
3902 		sctp_free_a_chunk(stcb, chk, so_locked);
3903 		/* sa_ignore FREED_MEMORY */
3904 	}
3905 	for (i = 0; i < asoc->streamoutcnt; i++) {
3906 		/* For each stream */
3907 		outs = &asoc->strmout[i];
3908 		/* clean up any sends there */
3909 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3910 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3911 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3912 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3913 			sctp_free_spbufspace(stcb, asoc, sp);
3914 			if (sp->data) {
3915 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3916 				    error, (void *)sp, so_locked);
3917 				if (sp->data) {
3918 					sctp_m_freem(sp->data);
3919 					sp->data = NULL;
3920 					sp->tail_mbuf = NULL;
3921 					sp->length = 0;
3922 				}
3923 			}
3924 			if (sp->net) {
3925 				sctp_free_remote_addr(sp->net);
3926 				sp->net = NULL;
3927 			}
3928 			/* Free the chunk */
3929 			sctp_free_a_strmoq(stcb, sp, so_locked);
3930 			/* sa_ignore FREED_MEMORY */
3931 		}
3932 	}
3933 
3934 	if (holds_lock == 0) {
3935 		SCTP_TCB_SEND_UNLOCK(stcb);
3936 	}
3937 }
3938 
3939 void
3940 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3941     struct sctp_abort_chunk *abort, int so_locked
3942 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3943     SCTP_UNUSED
3944 #endif
3945 )
3946 {
3947 	if (stcb == NULL) {
3948 		return;
3949 	}
3950 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3951 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3952 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3953 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3954 	}
3955 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3956 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3957 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3958 		return;
3959 	}
3960 	/* Tell them we lost the asoc */
3961 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3962 	if (from_peer) {
3963 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3964 	} else {
3965 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3966 	}
3967 }
3968 
3969 void
3970 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3971     struct mbuf *m, int iphlen,
3972     struct sockaddr *src, struct sockaddr *dst,
3973     struct sctphdr *sh, struct mbuf *op_err,
3974     uint8_t mflowtype, uint32_t mflowid,
3975     uint32_t vrf_id, uint16_t port)
3976 {
3977 	uint32_t vtag;
3978 
3979 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3980 	struct socket *so;
3981 
3982 #endif
3983 
3984 	vtag = 0;
3985 	if (stcb != NULL) {
3986 		/* We have a TCB to abort, send notification too */
3987 		vtag = stcb->asoc.peer_vtag;
3988 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3989 		/* get the assoc vrf id and table id */
3990 		vrf_id = stcb->asoc.vrf_id;
3991 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3992 	}
3993 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3994 	    mflowtype, mflowid, inp->fibnum,
3995 	    vrf_id, port);
3996 	if (stcb != NULL) {
3997 		/* Ok, now lets free it */
3998 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3999 		so = SCTP_INP_SO(inp);
4000 		atomic_add_int(&stcb->asoc.refcnt, 1);
4001 		SCTP_TCB_UNLOCK(stcb);
4002 		SCTP_SOCKET_LOCK(so, 1);
4003 		SCTP_TCB_LOCK(stcb);
4004 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4005 #endif
4006 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4007 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4008 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4009 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4010 		}
4011 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4012 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4013 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4014 		SCTP_SOCKET_UNLOCK(so, 1);
4015 #endif
4016 	}
4017 }
4018 
4019 #ifdef SCTP_ASOCLOG_OF_TSNS
4020 void
4021 sctp_print_out_track_log(struct sctp_tcb *stcb)
4022 {
4023 #ifdef NOSIY_PRINTS
4024 	int i;
4025 
4026 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4027 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4028 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4029 		SCTP_PRINTF("None rcvd\n");
4030 		goto none_in;
4031 	}
4032 	if (stcb->asoc.tsn_in_wrapped) {
4033 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4034 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4035 			    stcb->asoc.in_tsnlog[i].tsn,
4036 			    stcb->asoc.in_tsnlog[i].strm,
4037 			    stcb->asoc.in_tsnlog[i].seq,
4038 			    stcb->asoc.in_tsnlog[i].flgs,
4039 			    stcb->asoc.in_tsnlog[i].sz);
4040 		}
4041 	}
4042 	if (stcb->asoc.tsn_in_at) {
4043 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4044 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4045 			    stcb->asoc.in_tsnlog[i].tsn,
4046 			    stcb->asoc.in_tsnlog[i].strm,
4047 			    stcb->asoc.in_tsnlog[i].seq,
4048 			    stcb->asoc.in_tsnlog[i].flgs,
4049 			    stcb->asoc.in_tsnlog[i].sz);
4050 		}
4051 	}
4052 none_in:
4053 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4054 	if ((stcb->asoc.tsn_out_at == 0) &&
4055 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4056 		SCTP_PRINTF("None sent\n");
4057 	}
4058 	if (stcb->asoc.tsn_out_wrapped) {
4059 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4060 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4061 			    stcb->asoc.out_tsnlog[i].tsn,
4062 			    stcb->asoc.out_tsnlog[i].strm,
4063 			    stcb->asoc.out_tsnlog[i].seq,
4064 			    stcb->asoc.out_tsnlog[i].flgs,
4065 			    stcb->asoc.out_tsnlog[i].sz);
4066 		}
4067 	}
4068 	if (stcb->asoc.tsn_out_at) {
4069 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4070 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4071 			    stcb->asoc.out_tsnlog[i].tsn,
4072 			    stcb->asoc.out_tsnlog[i].strm,
4073 			    stcb->asoc.out_tsnlog[i].seq,
4074 			    stcb->asoc.out_tsnlog[i].flgs,
4075 			    stcb->asoc.out_tsnlog[i].sz);
4076 		}
4077 	}
4078 #endif
4079 }
4080 
4081 #endif
4082 
4083 void
4084 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4085     struct mbuf *op_err,
4086     int so_locked
4087 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4088     SCTP_UNUSED
4089 #endif
4090 )
4091 {
4092 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4093 	struct socket *so;
4094 
4095 #endif
4096 
4097 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4098 	so = SCTP_INP_SO(inp);
4099 #endif
4100 	if (stcb == NULL) {
4101 		/* Got to have a TCB */
4102 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4103 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4104 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4105 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4106 			}
4107 		}
4108 		return;
4109 	} else {
4110 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4111 	}
4112 	/* notify the ulp */
4113 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4114 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4115 	}
4116 	/* notify the peer */
4117 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4118 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4119 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4120 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4121 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4122 	}
4123 	/* now free the asoc */
4124 #ifdef SCTP_ASOCLOG_OF_TSNS
4125 	sctp_print_out_track_log(stcb);
4126 #endif
4127 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4128 	if (!so_locked) {
4129 		atomic_add_int(&stcb->asoc.refcnt, 1);
4130 		SCTP_TCB_UNLOCK(stcb);
4131 		SCTP_SOCKET_LOCK(so, 1);
4132 		SCTP_TCB_LOCK(stcb);
4133 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4134 	}
4135 #endif
4136 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4137 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4138 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4139 	if (!so_locked) {
4140 		SCTP_SOCKET_UNLOCK(so, 1);
4141 	}
4142 #endif
4143 }
4144 
4145 void
4146 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4147     struct sockaddr *src, struct sockaddr *dst,
4148     struct sctphdr *sh, struct sctp_inpcb *inp,
4149     struct mbuf *cause,
4150     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4151     uint32_t vrf_id, uint16_t port)
4152 {
4153 	struct sctp_chunkhdr *ch, chunk_buf;
4154 	unsigned int chk_length;
4155 	int contains_init_chunk;
4156 
4157 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4158 	/* Generate a TO address for future reference */
4159 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4160 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4161 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4162 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4163 		}
4164 	}
4165 	contains_init_chunk = 0;
4166 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4167 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4168 	while (ch != NULL) {
4169 		chk_length = ntohs(ch->chunk_length);
4170 		if (chk_length < sizeof(*ch)) {
4171 			/* break to abort land */
4172 			break;
4173 		}
4174 		switch (ch->chunk_type) {
4175 		case SCTP_INIT:
4176 			contains_init_chunk = 1;
4177 			break;
4178 		case SCTP_PACKET_DROPPED:
4179 			/* we don't respond to pkt-dropped */
4180 			return;
4181 		case SCTP_ABORT_ASSOCIATION:
4182 			/* we don't respond with an ABORT to an ABORT */
4183 			return;
4184 		case SCTP_SHUTDOWN_COMPLETE:
4185 			/*
4186 			 * we ignore it since we are not waiting for it and
4187 			 * peer is gone
4188 			 */
4189 			return;
4190 		case SCTP_SHUTDOWN_ACK:
4191 			sctp_send_shutdown_complete2(src, dst, sh,
4192 			    mflowtype, mflowid, fibnum,
4193 			    vrf_id, port);
4194 			return;
4195 		default:
4196 			break;
4197 		}
4198 		offset += SCTP_SIZE32(chk_length);
4199 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4200 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4201 	}
4202 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4203 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4204 	    (contains_init_chunk == 0))) {
4205 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4206 		    mflowtype, mflowid, fibnum,
4207 		    vrf_id, port);
4208 	}
4209 }
4210 
4211 /*
4212  * check the inbound datagram to make sure there is not an abort inside it,
4213  * if there is return 1, else return 0.
4214  */
4215 int
4216 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4217 {
4218 	struct sctp_chunkhdr *ch;
4219 	struct sctp_init_chunk *init_chk, chunk_buf;
4220 	int offset;
4221 	unsigned int chk_length;
4222 
4223 	offset = iphlen + sizeof(struct sctphdr);
4224 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4225 	    (uint8_t *) & chunk_buf);
4226 	while (ch != NULL) {
4227 		chk_length = ntohs(ch->chunk_length);
4228 		if (chk_length < sizeof(*ch)) {
4229 			/* packet is probably corrupt */
4230 			break;
4231 		}
4232 		/* we seem to be ok, is it an abort? */
4233 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4234 			/* yep, tell them */
4235 			return (1);
4236 		}
4237 		if (ch->chunk_type == SCTP_INITIATION) {
4238 			/* need to update the Vtag */
4239 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4240 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4241 			if (init_chk != NULL) {
4242 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4243 			}
4244 		}
4245 		/* Nope, move to the next chunk */
4246 		offset += SCTP_SIZE32(chk_length);
4247 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4248 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4249 	}
4250 	return (0);
4251 }
4252 
4253 /*
4254  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4255  * set (i.e. it's 0) so, create this function to compare link local scopes
4256  */
4257 #ifdef INET6
4258 uint32_t
4259 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4260 {
4261 	struct sockaddr_in6 a, b;
4262 
4263 	/* save copies */
4264 	a = *addr1;
4265 	b = *addr2;
4266 
4267 	if (a.sin6_scope_id == 0)
4268 		if (sa6_recoverscope(&a)) {
4269 			/* can't get scope, so can't match */
4270 			return (0);
4271 		}
4272 	if (b.sin6_scope_id == 0)
4273 		if (sa6_recoverscope(&b)) {
4274 			/* can't get scope, so can't match */
4275 			return (0);
4276 		}
4277 	if (a.sin6_scope_id != b.sin6_scope_id)
4278 		return (0);
4279 
4280 	return (1);
4281 }
4282 
4283 /*
4284  * returns a sockaddr_in6 with embedded scope recovered and removed
4285  */
4286 struct sockaddr_in6 *
4287 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4288 {
4289 	/* check and strip embedded scope junk */
4290 	if (addr->sin6_family == AF_INET6) {
4291 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4292 			if (addr->sin6_scope_id == 0) {
4293 				*store = *addr;
4294 				if (!sa6_recoverscope(store)) {
4295 					/* use the recovered scope */
4296 					addr = store;
4297 				}
4298 			} else {
4299 				/* else, return the original "to" addr */
4300 				in6_clearscope(&addr->sin6_addr);
4301 			}
4302 		}
4303 	}
4304 	return (addr);
4305 }
4306 
4307 #endif
4308 
4309 /*
4310  * are the two addresses the same?  currently a "scopeless" check returns: 1
4311  * if same, 0 if not
4312  */
4313 int
4314 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4315 {
4316 
4317 	/* must be valid */
4318 	if (sa1 == NULL || sa2 == NULL)
4319 		return (0);
4320 
4321 	/* must be the same family */
4322 	if (sa1->sa_family != sa2->sa_family)
4323 		return (0);
4324 
4325 	switch (sa1->sa_family) {
4326 #ifdef INET6
4327 	case AF_INET6:
4328 		{
4329 			/* IPv6 addresses */
4330 			struct sockaddr_in6 *sin6_1, *sin6_2;
4331 
4332 			sin6_1 = (struct sockaddr_in6 *)sa1;
4333 			sin6_2 = (struct sockaddr_in6 *)sa2;
4334 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4335 			    sin6_2));
4336 		}
4337 #endif
4338 #ifdef INET
4339 	case AF_INET:
4340 		{
4341 			/* IPv4 addresses */
4342 			struct sockaddr_in *sin_1, *sin_2;
4343 
4344 			sin_1 = (struct sockaddr_in *)sa1;
4345 			sin_2 = (struct sockaddr_in *)sa2;
4346 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4347 		}
4348 #endif
4349 	default:
4350 		/* we don't do these... */
4351 		return (0);
4352 	}
4353 }
4354 
4355 void
4356 sctp_print_address(struct sockaddr *sa)
4357 {
4358 #ifdef INET6
4359 	char ip6buf[INET6_ADDRSTRLEN];
4360 
4361 #endif
4362 
4363 	switch (sa->sa_family) {
4364 #ifdef INET6
4365 	case AF_INET6:
4366 		{
4367 			struct sockaddr_in6 *sin6;
4368 
4369 			sin6 = (struct sockaddr_in6 *)sa;
4370 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4371 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4372 			    ntohs(sin6->sin6_port),
4373 			    sin6->sin6_scope_id);
4374 			break;
4375 		}
4376 #endif
4377 #ifdef INET
4378 	case AF_INET:
4379 		{
4380 			struct sockaddr_in *sin;
4381 			unsigned char *p;
4382 
4383 			sin = (struct sockaddr_in *)sa;
4384 			p = (unsigned char *)&sin->sin_addr;
4385 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4386 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4387 			break;
4388 		}
4389 #endif
4390 	default:
4391 		SCTP_PRINTF("?\n");
4392 		break;
4393 	}
4394 }
4395 
4396 void
4397 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4398     struct sctp_inpcb *new_inp,
4399     struct sctp_tcb *stcb,
4400     int waitflags)
4401 {
4402 	/*
4403 	 * go through our old INP and pull off any control structures that
4404 	 * belong to stcb and move then to the new inp.
4405 	 */
4406 	struct socket *old_so, *new_so;
4407 	struct sctp_queued_to_read *control, *nctl;
4408 	struct sctp_readhead tmp_queue;
4409 	struct mbuf *m;
4410 	int error = 0;
4411 
4412 	old_so = old_inp->sctp_socket;
4413 	new_so = new_inp->sctp_socket;
4414 	TAILQ_INIT(&tmp_queue);
4415 	error = sblock(&old_so->so_rcv, waitflags);
4416 	if (error) {
4417 		/*
4418 		 * Gak, can't get sblock, we have a problem. data will be
4419 		 * left stranded.. and we don't dare look at it since the
4420 		 * other thread may be reading something. Oh well, its a
4421 		 * screwed up app that does a peeloff OR a accept while
4422 		 * reading from the main socket... actually its only the
4423 		 * peeloff() case, since I think read will fail on a
4424 		 * listening socket..
4425 		 */
4426 		return;
4427 	}
4428 	/* lock the socket buffers */
4429 	SCTP_INP_READ_LOCK(old_inp);
4430 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4431 		/* Pull off all for out target stcb */
4432 		if (control->stcb == stcb) {
4433 			/* remove it we want it */
4434 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4435 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4436 			m = control->data;
4437 			while (m) {
4438 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4439 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4440 				}
4441 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4442 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4443 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4444 				}
4445 				m = SCTP_BUF_NEXT(m);
4446 			}
4447 		}
4448 	}
4449 	SCTP_INP_READ_UNLOCK(old_inp);
4450 	/* Remove the sb-lock on the old socket */
4451 
4452 	sbunlock(&old_so->so_rcv);
4453 	/* Now we move them over to the new socket buffer */
4454 	SCTP_INP_READ_LOCK(new_inp);
4455 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4456 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4457 		m = control->data;
4458 		while (m) {
4459 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4460 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4461 			}
4462 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4463 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4464 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4465 			}
4466 			m = SCTP_BUF_NEXT(m);
4467 		}
4468 	}
4469 	SCTP_INP_READ_UNLOCK(new_inp);
4470 }
4471 
4472 void
4473 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4474     struct sctp_tcb *stcb,
4475     int so_locked
4476 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4477     SCTP_UNUSED
4478 #endif
4479 )
4480 {
4481 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4482 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4483 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4484 		} else {
4485 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4486 			struct socket *so;
4487 
4488 			so = SCTP_INP_SO(inp);
4489 			if (!so_locked) {
4490 				if (stcb) {
4491 					atomic_add_int(&stcb->asoc.refcnt, 1);
4492 					SCTP_TCB_UNLOCK(stcb);
4493 				}
4494 				SCTP_SOCKET_LOCK(so, 1);
4495 				if (stcb) {
4496 					SCTP_TCB_LOCK(stcb);
4497 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4498 				}
4499 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4500 					SCTP_SOCKET_UNLOCK(so, 1);
4501 					return;
4502 				}
4503 			}
4504 #endif
4505 			sctp_sorwakeup(inp, inp->sctp_socket);
4506 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4507 			if (!so_locked) {
4508 				SCTP_SOCKET_UNLOCK(so, 1);
4509 			}
4510 #endif
4511 		}
4512 	}
4513 }
4514 
4515 void
4516 sctp_add_to_readq(struct sctp_inpcb *inp,
4517     struct sctp_tcb *stcb,
4518     struct sctp_queued_to_read *control,
4519     struct sockbuf *sb,
4520     int end,
4521     int inp_read_lock_held,
4522     int so_locked
4523 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4524     SCTP_UNUSED
4525 #endif
4526 )
4527 {
4528 	/*
4529 	 * Here we must place the control on the end of the socket read
4530 	 * queue AND increment sb_cc so that select will work properly on
4531 	 * read.
4532 	 */
4533 	struct mbuf *m, *prev = NULL;
4534 
4535 	if (inp == NULL) {
4536 		/* Gak, TSNH!! */
4537 #ifdef INVARIANTS
4538 		panic("Gak, inp NULL on add_to_readq");
4539 #endif
4540 		return;
4541 	}
4542 	if (inp_read_lock_held == 0)
4543 		SCTP_INP_READ_LOCK(inp);
4544 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4545 		sctp_free_remote_addr(control->whoFrom);
4546 		if (control->data) {
4547 			sctp_m_freem(control->data);
4548 			control->data = NULL;
4549 		}
4550 		sctp_free_a_readq(stcb, control);
4551 		if (inp_read_lock_held == 0)
4552 			SCTP_INP_READ_UNLOCK(inp);
4553 		return;
4554 	}
4555 	if (!(control->spec_flags & M_NOTIFICATION)) {
4556 		atomic_add_int(&inp->total_recvs, 1);
4557 		if (!control->do_not_ref_stcb) {
4558 			atomic_add_int(&stcb->total_recvs, 1);
4559 		}
4560 	}
4561 	m = control->data;
4562 	control->held_length = 0;
4563 	control->length = 0;
4564 	while (m) {
4565 		if (SCTP_BUF_LEN(m) == 0) {
4566 			/* Skip mbufs with NO length */
4567 			if (prev == NULL) {
4568 				/* First one */
4569 				control->data = sctp_m_free(m);
4570 				m = control->data;
4571 			} else {
4572 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4573 				m = SCTP_BUF_NEXT(prev);
4574 			}
4575 			if (m == NULL) {
4576 				control->tail_mbuf = prev;
4577 			}
4578 			continue;
4579 		}
4580 		prev = m;
4581 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4582 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4583 		}
4584 		sctp_sballoc(stcb, sb, m);
4585 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4586 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4587 		}
4588 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4589 		m = SCTP_BUF_NEXT(m);
4590 	}
4591 	if (prev != NULL) {
4592 		control->tail_mbuf = prev;
4593 	} else {
4594 		/* Everything got collapsed out?? */
4595 		sctp_free_remote_addr(control->whoFrom);
4596 		sctp_free_a_readq(stcb, control);
4597 		if (inp_read_lock_held == 0)
4598 			SCTP_INP_READ_UNLOCK(inp);
4599 		return;
4600 	}
4601 	if (end) {
4602 		control->end_added = 1;
4603 	}
4604 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4605 	control->on_read_q = 1;
4606 	if (inp_read_lock_held == 0)
4607 		SCTP_INP_READ_UNLOCK(inp);
4608 	if (inp && inp->sctp_socket) {
4609 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4610 	}
4611 }
4612 
4613 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4614  *************ALTERNATE ROUTING CODE
4615  */
4616 
4617 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4618  *************ALTERNATE ROUTING CODE
4619  */
4620 
4621 struct mbuf *
4622 sctp_generate_cause(uint16_t code, char *info)
4623 {
4624 	struct mbuf *m;
4625 	struct sctp_gen_error_cause *cause;
4626 	size_t info_len;
4627 	uint16_t len;
4628 
4629 	if ((code == 0) || (info == NULL)) {
4630 		return (NULL);
4631 	}
4632 	info_len = strlen(info);
4633 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4634 		return (NULL);
4635 	}
4636 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4637 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4638 	if (m != NULL) {
4639 		SCTP_BUF_LEN(m) = len;
4640 		cause = mtod(m, struct sctp_gen_error_cause *);
4641 		cause->code = htons(code);
4642 		cause->length = htons(len);
4643 		memcpy(cause->info, info, info_len);
4644 	}
4645 	return (m);
4646 }
4647 
4648 struct mbuf *
4649 sctp_generate_no_user_data_cause(uint32_t tsn)
4650 {
4651 	struct mbuf *m;
4652 	struct sctp_error_no_user_data *no_user_data_cause;
4653 	uint16_t len;
4654 
4655 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4656 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4657 	if (m != NULL) {
4658 		SCTP_BUF_LEN(m) = len;
4659 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4660 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4661 		no_user_data_cause->cause.length = htons(len);
4662 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4663 	}
4664 	return (m);
4665 }
4666 
4667 #ifdef SCTP_MBCNT_LOGGING
4668 void
4669 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4670     struct sctp_tmit_chunk *tp1, int chk_cnt)
4671 {
4672 	if (tp1->data == NULL) {
4673 		return;
4674 	}
4675 	asoc->chunks_on_out_queue -= chk_cnt;
4676 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4677 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4678 		    asoc->total_output_queue_size,
4679 		    tp1->book_size,
4680 		    0,
4681 		    tp1->mbcnt);
4682 	}
4683 	if (asoc->total_output_queue_size >= tp1->book_size) {
4684 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4685 	} else {
4686 		asoc->total_output_queue_size = 0;
4687 	}
4688 
4689 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4690 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4691 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4692 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4693 		} else {
4694 			stcb->sctp_socket->so_snd.sb_cc = 0;
4695 
4696 		}
4697 	}
4698 }
4699 
4700 #endif
4701 
4702 int
4703 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4704     uint8_t sent, int so_locked
4705 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4706     SCTP_UNUSED
4707 #endif
4708 )
4709 {
4710 	struct sctp_stream_out *strq;
4711 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4712 	struct sctp_stream_queue_pending *sp;
4713 	uint16_t stream = 0, seq = 0;
4714 	uint8_t foundeom = 0;
4715 	int ret_sz = 0;
4716 	int notdone;
4717 	int do_wakeup_routine = 0;
4718 
4719 	stream = tp1->rec.data.stream_number;
4720 	seq = tp1->rec.data.stream_seq;
4721 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4722 		stcb->asoc.abandoned_sent[0]++;
4723 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4724 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4725 #if defined(SCTP_DETAILED_STR_STATS)
4726 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4727 #endif
4728 	} else {
4729 		stcb->asoc.abandoned_unsent[0]++;
4730 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4731 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4732 #if defined(SCTP_DETAILED_STR_STATS)
4733 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4734 #endif
4735 	}
4736 	do {
4737 		ret_sz += tp1->book_size;
4738 		if (tp1->data != NULL) {
4739 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4740 				sctp_flight_size_decrease(tp1);
4741 				sctp_total_flight_decrease(stcb, tp1);
4742 			}
4743 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4744 			stcb->asoc.peers_rwnd += tp1->send_size;
4745 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4746 			if (sent) {
4747 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4748 			} else {
4749 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4750 			}
4751 			if (tp1->data) {
4752 				sctp_m_freem(tp1->data);
4753 				tp1->data = NULL;
4754 			}
4755 			do_wakeup_routine = 1;
4756 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4757 				stcb->asoc.sent_queue_cnt_removeable--;
4758 			}
4759 		}
4760 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4761 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4762 		    SCTP_DATA_NOT_FRAG) {
4763 			/* not frag'ed we ae done   */
4764 			notdone = 0;
4765 			foundeom = 1;
4766 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4767 			/* end of frag, we are done */
4768 			notdone = 0;
4769 			foundeom = 1;
4770 		} else {
4771 			/*
4772 			 * Its a begin or middle piece, we must mark all of
4773 			 * it
4774 			 */
4775 			notdone = 1;
4776 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4777 		}
4778 	} while (tp1 && notdone);
4779 	if (foundeom == 0) {
4780 		/*
4781 		 * The multi-part message was scattered across the send and
4782 		 * sent queue.
4783 		 */
4784 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4785 			if ((tp1->rec.data.stream_number != stream) ||
4786 			    (tp1->rec.data.stream_seq != seq)) {
4787 				break;
4788 			}
4789 			/*
4790 			 * save to chk in case we have some on stream out
4791 			 * queue. If so and we have an un-transmitted one we
4792 			 * don't have to fudge the TSN.
4793 			 */
4794 			chk = tp1;
4795 			ret_sz += tp1->book_size;
4796 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4797 			if (sent) {
4798 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4799 			} else {
4800 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4801 			}
4802 			if (tp1->data) {
4803 				sctp_m_freem(tp1->data);
4804 				tp1->data = NULL;
4805 			}
4806 			/* No flight involved here book the size to 0 */
4807 			tp1->book_size = 0;
4808 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4809 				foundeom = 1;
4810 			}
4811 			do_wakeup_routine = 1;
4812 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4813 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4814 			/*
4815 			 * on to the sent queue so we can wait for it to be
4816 			 * passed by.
4817 			 */
4818 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4819 			    sctp_next);
4820 			stcb->asoc.send_queue_cnt--;
4821 			stcb->asoc.sent_queue_cnt++;
4822 		}
4823 	}
4824 	if (foundeom == 0) {
4825 		/*
4826 		 * Still no eom found. That means there is stuff left on the
4827 		 * stream out queue.. yuck.
4828 		 */
4829 		SCTP_TCB_SEND_LOCK(stcb);
4830 		strq = &stcb->asoc.strmout[stream];
4831 		sp = TAILQ_FIRST(&strq->outqueue);
4832 		if (sp != NULL) {
4833 			sp->discard_rest = 1;
4834 			/*
4835 			 * We may need to put a chunk on the queue that
4836 			 * holds the TSN that would have been sent with the
4837 			 * LAST bit.
4838 			 */
4839 			if (chk == NULL) {
4840 				/* Yep, we have to */
4841 				sctp_alloc_a_chunk(stcb, chk);
4842 				if (chk == NULL) {
4843 					/*
4844 					 * we are hosed. All we can do is
4845 					 * nothing.. which will cause an
4846 					 * abort if the peer is paying
4847 					 * attention.
4848 					 */
4849 					goto oh_well;
4850 				}
4851 				memset(chk, 0, sizeof(*chk));
4852 				chk->rec.data.rcv_flags = 0;
4853 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4854 				chk->asoc = &stcb->asoc;
4855 				if (stcb->asoc.idata_supported == 0) {
4856 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4857 						chk->rec.data.stream_seq = 0;
4858 					} else {
4859 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4860 					}
4861 				} else {
4862 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4863 						chk->rec.data.stream_seq = strq->next_mid_unordered;
4864 					} else {
4865 						chk->rec.data.stream_seq = strq->next_mid_ordered;
4866 					}
4867 				}
4868 				chk->rec.data.stream_number = sp->stream;
4869 				chk->rec.data.payloadtype = sp->ppid;
4870 				chk->rec.data.context = sp->context;
4871 				chk->flags = sp->act_flags;
4872 				chk->whoTo = NULL;
4873 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4874 				strq->chunks_on_queues++;
4875 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4876 				stcb->asoc.sent_queue_cnt++;
4877 				stcb->asoc.pr_sctp_cnt++;
4878 			}
4879 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4880 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4881 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4882 			}
4883 			if (stcb->asoc.idata_supported == 0) {
4884 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4885 					strq->next_mid_ordered++;
4886 				}
4887 			} else {
4888 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4889 					strq->next_mid_unordered++;
4890 				} else {
4891 					strq->next_mid_ordered++;
4892 				}
4893 			}
4894 	oh_well:
4895 			if (sp->data) {
4896 				/*
4897 				 * Pull any data to free up the SB and allow
4898 				 * sender to "add more" while we will throw
4899 				 * away :-)
4900 				 */
4901 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4902 				ret_sz += sp->length;
4903 				do_wakeup_routine = 1;
4904 				sp->some_taken = 1;
4905 				sctp_m_freem(sp->data);
4906 				sp->data = NULL;
4907 				sp->tail_mbuf = NULL;
4908 				sp->length = 0;
4909 			}
4910 		}
4911 		SCTP_TCB_SEND_UNLOCK(stcb);
4912 	}
4913 	if (do_wakeup_routine) {
4914 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4915 		struct socket *so;
4916 
4917 		so = SCTP_INP_SO(stcb->sctp_ep);
4918 		if (!so_locked) {
4919 			atomic_add_int(&stcb->asoc.refcnt, 1);
4920 			SCTP_TCB_UNLOCK(stcb);
4921 			SCTP_SOCKET_LOCK(so, 1);
4922 			SCTP_TCB_LOCK(stcb);
4923 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4924 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4925 				/* assoc was freed while we were unlocked */
4926 				SCTP_SOCKET_UNLOCK(so, 1);
4927 				return (ret_sz);
4928 			}
4929 		}
4930 #endif
4931 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4932 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4933 		if (!so_locked) {
4934 			SCTP_SOCKET_UNLOCK(so, 1);
4935 		}
4936 #endif
4937 	}
4938 	return (ret_sz);
4939 }
4940 
4941 /*
4942  * checks to see if the given address, sa, is one that is currently known by
4943  * the kernel note: can't distinguish the same address on multiple interfaces
4944  * and doesn't handle multiple addresses with different zone/scope id's note:
4945  * ifa_ifwithaddr() compares the entire sockaddr struct
4946  */
4947 struct sctp_ifa *
4948 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4949     int holds_lock)
4950 {
4951 	struct sctp_laddr *laddr;
4952 
4953 	if (holds_lock == 0) {
4954 		SCTP_INP_RLOCK(inp);
4955 	}
4956 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4957 		if (laddr->ifa == NULL)
4958 			continue;
4959 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4960 			continue;
4961 #ifdef INET
4962 		if (addr->sa_family == AF_INET) {
4963 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4964 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4965 				/* found him. */
4966 				if (holds_lock == 0) {
4967 					SCTP_INP_RUNLOCK(inp);
4968 				}
4969 				return (laddr->ifa);
4970 				break;
4971 			}
4972 		}
4973 #endif
4974 #ifdef INET6
4975 		if (addr->sa_family == AF_INET6) {
4976 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4977 			    &laddr->ifa->address.sin6)) {
4978 				/* found him. */
4979 				if (holds_lock == 0) {
4980 					SCTP_INP_RUNLOCK(inp);
4981 				}
4982 				return (laddr->ifa);
4983 				break;
4984 			}
4985 		}
4986 #endif
4987 	}
4988 	if (holds_lock == 0) {
4989 		SCTP_INP_RUNLOCK(inp);
4990 	}
4991 	return (NULL);
4992 }
4993 
4994 uint32_t
4995 sctp_get_ifa_hash_val(struct sockaddr *addr)
4996 {
4997 	switch (addr->sa_family) {
4998 #ifdef INET
4999 	case AF_INET:
5000 		{
5001 			struct sockaddr_in *sin;
5002 
5003 			sin = (struct sockaddr_in *)addr;
5004 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5005 		}
5006 #endif
5007 #ifdef INET6
5008 	case AF_INET6:
5009 		{
5010 			struct sockaddr_in6 *sin6;
5011 			uint32_t hash_of_addr;
5012 
5013 			sin6 = (struct sockaddr_in6 *)addr;
5014 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5015 			    sin6->sin6_addr.s6_addr32[1] +
5016 			    sin6->sin6_addr.s6_addr32[2] +
5017 			    sin6->sin6_addr.s6_addr32[3]);
5018 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5019 			return (hash_of_addr);
5020 		}
5021 #endif
5022 	default:
5023 		break;
5024 	}
5025 	return (0);
5026 }
5027 
5028 struct sctp_ifa *
5029 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5030 {
5031 	struct sctp_ifa *sctp_ifap;
5032 	struct sctp_vrf *vrf;
5033 	struct sctp_ifalist *hash_head;
5034 	uint32_t hash_of_addr;
5035 
5036 	if (holds_lock == 0)
5037 		SCTP_IPI_ADDR_RLOCK();
5038 
5039 	vrf = sctp_find_vrf(vrf_id);
5040 	if (vrf == NULL) {
5041 		if (holds_lock == 0)
5042 			SCTP_IPI_ADDR_RUNLOCK();
5043 		return (NULL);
5044 	}
5045 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5046 
5047 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5048 	if (hash_head == NULL) {
5049 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5050 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5051 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5052 		sctp_print_address(addr);
5053 		SCTP_PRINTF("No such bucket for address\n");
5054 		if (holds_lock == 0)
5055 			SCTP_IPI_ADDR_RUNLOCK();
5056 
5057 		return (NULL);
5058 	}
5059 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5060 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5061 			continue;
5062 #ifdef INET
5063 		if (addr->sa_family == AF_INET) {
5064 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5065 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5066 				/* found him. */
5067 				if (holds_lock == 0)
5068 					SCTP_IPI_ADDR_RUNLOCK();
5069 				return (sctp_ifap);
5070 				break;
5071 			}
5072 		}
5073 #endif
5074 #ifdef INET6
5075 		if (addr->sa_family == AF_INET6) {
5076 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5077 			    &sctp_ifap->address.sin6)) {
5078 				/* found him. */
5079 				if (holds_lock == 0)
5080 					SCTP_IPI_ADDR_RUNLOCK();
5081 				return (sctp_ifap);
5082 				break;
5083 			}
5084 		}
5085 #endif
5086 	}
5087 	if (holds_lock == 0)
5088 		SCTP_IPI_ADDR_RUNLOCK();
5089 	return (NULL);
5090 }
5091 
5092 static void
5093 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5094     uint32_t rwnd_req)
5095 {
5096 	/* User pulled some data, do we need a rwnd update? */
5097 	int r_unlocked = 0;
5098 	uint32_t dif, rwnd;
5099 	struct socket *so = NULL;
5100 
5101 	if (stcb == NULL)
5102 		return;
5103 
5104 	atomic_add_int(&stcb->asoc.refcnt, 1);
5105 
5106 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5107 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5108 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5109 		/* Pre-check If we are freeing no update */
5110 		goto no_lock;
5111 	}
5112 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5113 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5114 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5115 		goto out;
5116 	}
5117 	so = stcb->sctp_socket;
5118 	if (so == NULL) {
5119 		goto out;
5120 	}
5121 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5122 	/* Have you have freed enough to look */
5123 	*freed_so_far = 0;
5124 	/* Yep, its worth a look and the lock overhead */
5125 
5126 	/* Figure out what the rwnd would be */
5127 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5128 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5129 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5130 	} else {
5131 		dif = 0;
5132 	}
5133 	if (dif >= rwnd_req) {
5134 		if (hold_rlock) {
5135 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5136 			r_unlocked = 1;
5137 		}
5138 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5139 			/*
5140 			 * One last check before we allow the guy possibly
5141 			 * to get in. There is a race, where the guy has not
5142 			 * reached the gate. In that case
5143 			 */
5144 			goto out;
5145 		}
5146 		SCTP_TCB_LOCK(stcb);
5147 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5148 			/* No reports here */
5149 			SCTP_TCB_UNLOCK(stcb);
5150 			goto out;
5151 		}
5152 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5153 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5154 
5155 		sctp_chunk_output(stcb->sctp_ep, stcb,
5156 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5157 		/* make sure no timer is running */
5158 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5159 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5160 		SCTP_TCB_UNLOCK(stcb);
5161 	} else {
5162 		/* Update how much we have pending */
5163 		stcb->freed_by_sorcv_sincelast = dif;
5164 	}
5165 out:
5166 	if (so && r_unlocked && hold_rlock) {
5167 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5168 	}
5169 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5170 no_lock:
5171 	atomic_add_int(&stcb->asoc.refcnt, -1);
5172 	return;
5173 }
5174 
5175 int
5176 sctp_sorecvmsg(struct socket *so,
5177     struct uio *uio,
5178     struct mbuf **mp,
5179     struct sockaddr *from,
5180     int fromlen,
5181     int *msg_flags,
5182     struct sctp_sndrcvinfo *sinfo,
5183     int filling_sinfo)
5184 {
5185 	/*
5186 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5187 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5188 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5189 	 * On the way out we may send out any combination of:
5190 	 * MSG_NOTIFICATION MSG_EOR
5191 	 *
5192 	 */
5193 	struct sctp_inpcb *inp = NULL;
5194 	int my_len = 0;
5195 	int cp_len = 0, error = 0;
5196 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5197 	struct mbuf *m = NULL;
5198 	struct sctp_tcb *stcb = NULL;
5199 	int wakeup_read_socket = 0;
5200 	int freecnt_applied = 0;
5201 	int out_flags = 0, in_flags = 0;
5202 	int block_allowed = 1;
5203 	uint32_t freed_so_far = 0;
5204 	uint32_t copied_so_far = 0;
5205 	int in_eeor_mode = 0;
5206 	int no_rcv_needed = 0;
5207 	uint32_t rwnd_req = 0;
5208 	int hold_sblock = 0;
5209 	int hold_rlock = 0;
5210 	ssize_t slen = 0;
5211 	uint32_t held_length = 0;
5212 	int sockbuf_lock = 0;
5213 
5214 	if (uio == NULL) {
5215 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5216 		return (EINVAL);
5217 	}
5218 	if (msg_flags) {
5219 		in_flags = *msg_flags;
5220 		if (in_flags & MSG_PEEK)
5221 			SCTP_STAT_INCR(sctps_read_peeks);
5222 	} else {
5223 		in_flags = 0;
5224 	}
5225 	slen = uio->uio_resid;
5226 
5227 	/* Pull in and set up our int flags */
5228 	if (in_flags & MSG_OOB) {
5229 		/* Out of band's NOT supported */
5230 		return (EOPNOTSUPP);
5231 	}
5232 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5233 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5234 		return (EINVAL);
5235 	}
5236 	if ((in_flags & (MSG_DONTWAIT
5237 	    | MSG_NBIO
5238 	    )) ||
5239 	    SCTP_SO_IS_NBIO(so)) {
5240 		block_allowed = 0;
5241 	}
5242 	/* setup the endpoint */
5243 	inp = (struct sctp_inpcb *)so->so_pcb;
5244 	if (inp == NULL) {
5245 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5246 		return (EFAULT);
5247 	}
5248 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5249 	/* Must be at least a MTU's worth */
5250 	if (rwnd_req < SCTP_MIN_RWND)
5251 		rwnd_req = SCTP_MIN_RWND;
5252 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5253 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5254 		sctp_misc_ints(SCTP_SORECV_ENTER,
5255 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5256 	}
5257 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5258 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5259 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5260 	}
5261 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5262 	if (error) {
5263 		goto release_unlocked;
5264 	}
5265 	sockbuf_lock = 1;
5266 restart:
5267 
5268 
5269 restart_nosblocks:
5270 	if (hold_sblock == 0) {
5271 		SOCKBUF_LOCK(&so->so_rcv);
5272 		hold_sblock = 1;
5273 	}
5274 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5275 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5276 		goto out;
5277 	}
5278 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5279 		if (so->so_error) {
5280 			error = so->so_error;
5281 			if ((in_flags & MSG_PEEK) == 0)
5282 				so->so_error = 0;
5283 			goto out;
5284 		} else {
5285 			if (so->so_rcv.sb_cc == 0) {
5286 				/* indicate EOF */
5287 				error = 0;
5288 				goto out;
5289 			}
5290 		}
5291 	}
5292 	if (so->so_rcv.sb_cc <= held_length) {
5293 		if (so->so_error) {
5294 			error = so->so_error;
5295 			if ((in_flags & MSG_PEEK) == 0) {
5296 				so->so_error = 0;
5297 			}
5298 			goto out;
5299 		}
5300 		if ((so->so_rcv.sb_cc == 0) &&
5301 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5302 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5303 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5304 				/*
5305 				 * For active open side clear flags for
5306 				 * re-use passive open is blocked by
5307 				 * connect.
5308 				 */
5309 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5310 					/*
5311 					 * You were aborted, passive side
5312 					 * always hits here
5313 					 */
5314 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5315 					error = ECONNRESET;
5316 				}
5317 				so->so_state &= ~(SS_ISCONNECTING |
5318 				    SS_ISDISCONNECTING |
5319 				    SS_ISCONFIRMING |
5320 				    SS_ISCONNECTED);
5321 				if (error == 0) {
5322 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5323 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5324 						error = ENOTCONN;
5325 					}
5326 				}
5327 				goto out;
5328 			}
5329 		}
5330 		if (block_allowed) {
5331 			error = sbwait(&so->so_rcv);
5332 			if (error) {
5333 				goto out;
5334 			}
5335 			held_length = 0;
5336 			goto restart_nosblocks;
5337 		} else {
5338 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5339 			error = EWOULDBLOCK;
5340 			goto out;
5341 		}
5342 	}
5343 	if (hold_sblock == 1) {
5344 		SOCKBUF_UNLOCK(&so->so_rcv);
5345 		hold_sblock = 0;
5346 	}
5347 	/* we possibly have data we can read */
5348 	/* sa_ignore FREED_MEMORY */
5349 	control = TAILQ_FIRST(&inp->read_queue);
5350 	if (control == NULL) {
5351 		/*
5352 		 * This could be happening since the appender did the
5353 		 * increment but as not yet did the tailq insert onto the
5354 		 * read_queue
5355 		 */
5356 		if (hold_rlock == 0) {
5357 			SCTP_INP_READ_LOCK(inp);
5358 		}
5359 		control = TAILQ_FIRST(&inp->read_queue);
5360 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5361 #ifdef INVARIANTS
5362 			panic("Huh, its non zero and nothing on control?");
5363 #endif
5364 			so->so_rcv.sb_cc = 0;
5365 		}
5366 		SCTP_INP_READ_UNLOCK(inp);
5367 		hold_rlock = 0;
5368 		goto restart;
5369 	}
5370 	if ((control->length == 0) &&
5371 	    (control->do_not_ref_stcb)) {
5372 		/*
5373 		 * Clean up code for freeing assoc that left behind a
5374 		 * pdapi.. maybe a peer in EEOR that just closed after
5375 		 * sending and never indicated a EOR.
5376 		 */
5377 		if (hold_rlock == 0) {
5378 			hold_rlock = 1;
5379 			SCTP_INP_READ_LOCK(inp);
5380 		}
5381 		control->held_length = 0;
5382 		if (control->data) {
5383 			/* Hmm there is data here .. fix */
5384 			struct mbuf *m_tmp;
5385 			int cnt = 0;
5386 
5387 			m_tmp = control->data;
5388 			while (m_tmp) {
5389 				cnt += SCTP_BUF_LEN(m_tmp);
5390 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5391 					control->tail_mbuf = m_tmp;
5392 					control->end_added = 1;
5393 				}
5394 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5395 			}
5396 			control->length = cnt;
5397 		} else {
5398 			/* remove it */
5399 			TAILQ_REMOVE(&inp->read_queue, control, next);
5400 			/* Add back any hiddend data */
5401 			sctp_free_remote_addr(control->whoFrom);
5402 			sctp_free_a_readq(stcb, control);
5403 		}
5404 		if (hold_rlock) {
5405 			hold_rlock = 0;
5406 			SCTP_INP_READ_UNLOCK(inp);
5407 		}
5408 		goto restart;
5409 	}
5410 	if ((control->length == 0) &&
5411 	    (control->end_added == 1)) {
5412 		/*
5413 		 * Do we also need to check for (control->pdapi_aborted ==
5414 		 * 1)?
5415 		 */
5416 		if (hold_rlock == 0) {
5417 			hold_rlock = 1;
5418 			SCTP_INP_READ_LOCK(inp);
5419 		}
5420 		TAILQ_REMOVE(&inp->read_queue, control, next);
5421 		if (control->data) {
5422 #ifdef INVARIANTS
5423 			panic("control->data not null but control->length == 0");
5424 #else
5425 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5426 			sctp_m_freem(control->data);
5427 			control->data = NULL;
5428 #endif
5429 		}
5430 		if (control->aux_data) {
5431 			sctp_m_free(control->aux_data);
5432 			control->aux_data = NULL;
5433 		}
5434 #ifdef INVARIANTS
5435 		if (control->on_strm_q) {
5436 			panic("About to free ctl:%p so:%p and its in %d",
5437 			    control, so, control->on_strm_q);
5438 		}
5439 #endif
5440 		sctp_free_remote_addr(control->whoFrom);
5441 		sctp_free_a_readq(stcb, control);
5442 		if (hold_rlock) {
5443 			hold_rlock = 0;
5444 			SCTP_INP_READ_UNLOCK(inp);
5445 		}
5446 		goto restart;
5447 	}
5448 	if (control->length == 0) {
5449 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5450 		    (filling_sinfo)) {
5451 			/* find a more suitable one then this */
5452 			ctl = TAILQ_NEXT(control, next);
5453 			while (ctl) {
5454 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5455 				    (ctl->some_taken ||
5456 				    (ctl->spec_flags & M_NOTIFICATION) ||
5457 				    ((ctl->do_not_ref_stcb == 0) &&
5458 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5459 				    ) {
5460 					/*-
5461 					 * If we have a different TCB next, and there is data
5462 					 * present. If we have already taken some (pdapi), OR we can
5463 					 * ref the tcb and no delivery as started on this stream, we
5464 					 * take it. Note we allow a notification on a different
5465 					 * assoc to be delivered..
5466 					 */
5467 					control = ctl;
5468 					goto found_one;
5469 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5470 					    (ctl->length) &&
5471 					    ((ctl->some_taken) ||
5472 					    ((ctl->do_not_ref_stcb == 0) &&
5473 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5474 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5475 					/*-
5476 					 * If we have the same tcb, and there is data present, and we
5477 					 * have the strm interleave feature present. Then if we have
5478 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5479 					 * not started a delivery for this stream, we can take it.
5480 					 * Note we do NOT allow a notificaiton on the same assoc to
5481 					 * be delivered.
5482 					 */
5483 					control = ctl;
5484 					goto found_one;
5485 				}
5486 				ctl = TAILQ_NEXT(ctl, next);
5487 			}
5488 		}
5489 		/*
5490 		 * if we reach here, not suitable replacement is available
5491 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5492 		 * into the our held count, and its time to sleep again.
5493 		 */
5494 		held_length = so->so_rcv.sb_cc;
5495 		control->held_length = so->so_rcv.sb_cc;
5496 		goto restart;
5497 	}
5498 	/* Clear the held length since there is something to read */
5499 	control->held_length = 0;
5500 found_one:
5501 	/*
5502 	 * If we reach here, control has a some data for us to read off.
5503 	 * Note that stcb COULD be NULL.
5504 	 */
5505 	if (hold_rlock == 0) {
5506 		hold_rlock = 1;
5507 		SCTP_INP_READ_LOCK(inp);
5508 	}
5509 	control->some_taken++;
5510 	stcb = control->stcb;
5511 	if (stcb) {
5512 		if ((control->do_not_ref_stcb == 0) &&
5513 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5514 			if (freecnt_applied == 0)
5515 				stcb = NULL;
5516 		} else if (control->do_not_ref_stcb == 0) {
5517 			/* you can't free it on me please */
5518 			/*
5519 			 * The lock on the socket buffer protects us so the
5520 			 * free code will stop. But since we used the
5521 			 * socketbuf lock and the sender uses the tcb_lock
5522 			 * to increment, we need to use the atomic add to
5523 			 * the refcnt
5524 			 */
5525 			if (freecnt_applied) {
5526 #ifdef INVARIANTS
5527 				panic("refcnt already incremented");
5528 #else
5529 				SCTP_PRINTF("refcnt already incremented?\n");
5530 #endif
5531 			} else {
5532 				atomic_add_int(&stcb->asoc.refcnt, 1);
5533 				freecnt_applied = 1;
5534 			}
5535 			/*
5536 			 * Setup to remember how much we have not yet told
5537 			 * the peer our rwnd has opened up. Note we grab the
5538 			 * value from the tcb from last time. Note too that
5539 			 * sack sending clears this when a sack is sent,
5540 			 * which is fine. Once we hit the rwnd_req, we then
5541 			 * will go to the sctp_user_rcvd() that will not
5542 			 * lock until it KNOWs it MUST send a WUP-SACK.
5543 			 */
5544 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5545 			stcb->freed_by_sorcv_sincelast = 0;
5546 		}
5547 	}
5548 	if (stcb &&
5549 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5550 	    control->do_not_ref_stcb == 0) {
5551 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5552 	}
5553 	/* First lets get off the sinfo and sockaddr info */
5554 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5555 		sinfo->sinfo_stream = control->sinfo_stream;
5556 		sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn;
5557 		sinfo->sinfo_flags = control->sinfo_flags;
5558 		sinfo->sinfo_ppid = control->sinfo_ppid;
5559 		sinfo->sinfo_context = control->sinfo_context;
5560 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5561 		sinfo->sinfo_tsn = control->sinfo_tsn;
5562 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5563 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5564 		nxt = TAILQ_NEXT(control, next);
5565 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5566 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5567 			struct sctp_extrcvinfo *s_extra;
5568 
5569 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5570 			if ((nxt) &&
5571 			    (nxt->length)) {
5572 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5573 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5574 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5575 				}
5576 				if (nxt->spec_flags & M_NOTIFICATION) {
5577 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5578 				}
5579 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5580 				s_extra->serinfo_next_length = nxt->length;
5581 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5582 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5583 				if (nxt->tail_mbuf != NULL) {
5584 					if (nxt->end_added) {
5585 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5586 					}
5587 				}
5588 			} else {
5589 				/*
5590 				 * we explicitly 0 this, since the memcpy
5591 				 * got some other things beyond the older
5592 				 * sinfo_ that is on the control's structure
5593 				 * :-D
5594 				 */
5595 				nxt = NULL;
5596 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5597 				s_extra->serinfo_next_aid = 0;
5598 				s_extra->serinfo_next_length = 0;
5599 				s_extra->serinfo_next_ppid = 0;
5600 				s_extra->serinfo_next_stream = 0;
5601 			}
5602 		}
5603 		/*
5604 		 * update off the real current cum-ack, if we have an stcb.
5605 		 */
5606 		if ((control->do_not_ref_stcb == 0) && stcb)
5607 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5608 		/*
5609 		 * mask off the high bits, we keep the actual chunk bits in
5610 		 * there.
5611 		 */
5612 		sinfo->sinfo_flags &= 0x00ff;
5613 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5614 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5615 		}
5616 	}
5617 #ifdef SCTP_ASOCLOG_OF_TSNS
5618 	{
5619 		int index, newindex;
5620 		struct sctp_pcbtsn_rlog *entry;
5621 
5622 		do {
5623 			index = inp->readlog_index;
5624 			newindex = index + 1;
5625 			if (newindex >= SCTP_READ_LOG_SIZE) {
5626 				newindex = 0;
5627 			}
5628 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5629 		entry = &inp->readlog[index];
5630 		entry->vtag = control->sinfo_assoc_id;
5631 		entry->strm = control->sinfo_stream;
5632 		entry->seq = control->sinfo_ssn;
5633 		entry->sz = control->length;
5634 		entry->flgs = control->sinfo_flags;
5635 	}
5636 #endif
5637 	if ((fromlen > 0) && (from != NULL)) {
5638 		union sctp_sockstore store;
5639 		size_t len;
5640 
5641 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5642 #ifdef INET6
5643 		case AF_INET6:
5644 			len = sizeof(struct sockaddr_in6);
5645 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5646 			store.sin6.sin6_port = control->port_from;
5647 			break;
5648 #endif
5649 #ifdef INET
5650 		case AF_INET:
5651 #ifdef INET6
5652 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5653 				len = sizeof(struct sockaddr_in6);
5654 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5655 				    &store.sin6);
5656 				store.sin6.sin6_port = control->port_from;
5657 			} else {
5658 				len = sizeof(struct sockaddr_in);
5659 				store.sin = control->whoFrom->ro._l_addr.sin;
5660 				store.sin.sin_port = control->port_from;
5661 			}
5662 #else
5663 			len = sizeof(struct sockaddr_in);
5664 			store.sin = control->whoFrom->ro._l_addr.sin;
5665 			store.sin.sin_port = control->port_from;
5666 #endif
5667 			break;
5668 #endif
5669 		default:
5670 			len = 0;
5671 			break;
5672 		}
5673 		memcpy(from, &store, min((size_t)fromlen, len));
5674 #ifdef INET6
5675 		{
5676 			struct sockaddr_in6 lsa6, *from6;
5677 
5678 			from6 = (struct sockaddr_in6 *)from;
5679 			sctp_recover_scope_mac(from6, (&lsa6));
5680 		}
5681 #endif
5682 	}
5683 	if (hold_rlock) {
5684 		SCTP_INP_READ_UNLOCK(inp);
5685 		hold_rlock = 0;
5686 	}
5687 	if (hold_sblock) {
5688 		SOCKBUF_UNLOCK(&so->so_rcv);
5689 		hold_sblock = 0;
5690 	}
5691 	/* now copy out what data we can */
5692 	if (mp == NULL) {
5693 		/* copy out each mbuf in the chain up to length */
5694 get_more_data:
5695 		m = control->data;
5696 		while (m) {
5697 			/* Move out all we can */
5698 			cp_len = (int)uio->uio_resid;
5699 			my_len = (int)SCTP_BUF_LEN(m);
5700 			if (cp_len > my_len) {
5701 				/* not enough in this buf */
5702 				cp_len = my_len;
5703 			}
5704 			if (hold_rlock) {
5705 				SCTP_INP_READ_UNLOCK(inp);
5706 				hold_rlock = 0;
5707 			}
5708 			if (cp_len > 0)
5709 				error = uiomove(mtod(m, char *), cp_len, uio);
5710 			/* re-read */
5711 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5712 				goto release;
5713 			}
5714 			if ((control->do_not_ref_stcb == 0) && stcb &&
5715 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5716 				no_rcv_needed = 1;
5717 			}
5718 			if (error) {
5719 				/* error we are out of here */
5720 				goto release;
5721 			}
5722 			SCTP_INP_READ_LOCK(inp);
5723 			hold_rlock = 1;
5724 			if (cp_len == SCTP_BUF_LEN(m)) {
5725 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5726 				    (control->end_added)) {
5727 					out_flags |= MSG_EOR;
5728 					if ((control->do_not_ref_stcb == 0) &&
5729 					    (control->stcb != NULL) &&
5730 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5731 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5732 				}
5733 				if (control->spec_flags & M_NOTIFICATION) {
5734 					out_flags |= MSG_NOTIFICATION;
5735 				}
5736 				/* we ate up the mbuf */
5737 				if (in_flags & MSG_PEEK) {
5738 					/* just looking */
5739 					m = SCTP_BUF_NEXT(m);
5740 					copied_so_far += cp_len;
5741 				} else {
5742 					/* dispose of the mbuf */
5743 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5744 						sctp_sblog(&so->so_rcv,
5745 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5746 					}
5747 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5748 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5749 						sctp_sblog(&so->so_rcv,
5750 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5751 					}
5752 					copied_so_far += cp_len;
5753 					freed_so_far += cp_len;
5754 					freed_so_far += MSIZE;
5755 					atomic_subtract_int(&control->length, cp_len);
5756 					control->data = sctp_m_free(m);
5757 					m = control->data;
5758 					/*
5759 					 * been through it all, must hold sb
5760 					 * lock ok to null tail
5761 					 */
5762 					if (control->data == NULL) {
5763 #ifdef INVARIANTS
5764 						if ((control->end_added == 0) ||
5765 						    (TAILQ_NEXT(control, next) == NULL)) {
5766 							/*
5767 							 * If the end is not
5768 							 * added, OR the
5769 							 * next is NOT null
5770 							 * we MUST have the
5771 							 * lock.
5772 							 */
5773 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5774 								panic("Hmm we don't own the lock?");
5775 							}
5776 						}
5777 #endif
5778 						control->tail_mbuf = NULL;
5779 #ifdef INVARIANTS
5780 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5781 							panic("end_added, nothing left and no MSG_EOR");
5782 						}
5783 #endif
5784 					}
5785 				}
5786 			} else {
5787 				/* Do we need to trim the mbuf? */
5788 				if (control->spec_flags & M_NOTIFICATION) {
5789 					out_flags |= MSG_NOTIFICATION;
5790 				}
5791 				if ((in_flags & MSG_PEEK) == 0) {
5792 					SCTP_BUF_RESV_UF(m, cp_len);
5793 					SCTP_BUF_LEN(m) -= cp_len;
5794 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5795 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5796 					}
5797 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5798 					if ((control->do_not_ref_stcb == 0) &&
5799 					    stcb) {
5800 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5801 					}
5802 					copied_so_far += cp_len;
5803 					freed_so_far += cp_len;
5804 					freed_so_far += MSIZE;
5805 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5806 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5807 						    SCTP_LOG_SBRESULT, 0);
5808 					}
5809 					atomic_subtract_int(&control->length, cp_len);
5810 				} else {
5811 					copied_so_far += cp_len;
5812 				}
5813 			}
5814 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5815 				break;
5816 			}
5817 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5818 			    (control->do_not_ref_stcb == 0) &&
5819 			    (freed_so_far >= rwnd_req)) {
5820 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5821 			}
5822 		}		/* end while(m) */
5823 		/*
5824 		 * At this point we have looked at it all and we either have
5825 		 * a MSG_EOR/or read all the user wants... <OR>
5826 		 * control->length == 0.
5827 		 */
5828 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5829 			/* we are done with this control */
5830 			if (control->length == 0) {
5831 				if (control->data) {
5832 #ifdef INVARIANTS
5833 					panic("control->data not null at read eor?");
5834 #else
5835 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5836 					sctp_m_freem(control->data);
5837 					control->data = NULL;
5838 #endif
5839 				}
5840 		done_with_control:
5841 				if (hold_rlock == 0) {
5842 					SCTP_INP_READ_LOCK(inp);
5843 					hold_rlock = 1;
5844 				}
5845 				TAILQ_REMOVE(&inp->read_queue, control, next);
5846 				/* Add back any hiddend data */
5847 				if (control->held_length) {
5848 					held_length = 0;
5849 					control->held_length = 0;
5850 					wakeup_read_socket = 1;
5851 				}
5852 				if (control->aux_data) {
5853 					sctp_m_free(control->aux_data);
5854 					control->aux_data = NULL;
5855 				}
5856 				no_rcv_needed = control->do_not_ref_stcb;
5857 				sctp_free_remote_addr(control->whoFrom);
5858 				control->data = NULL;
5859 #ifdef INVARIANTS
5860 				if (control->on_strm_q) {
5861 					panic("About to free ctl:%p so:%p and its in %d",
5862 					    control, so, control->on_strm_q);
5863 				}
5864 #endif
5865 				sctp_free_a_readq(stcb, control);
5866 				control = NULL;
5867 				if ((freed_so_far >= rwnd_req) &&
5868 				    (no_rcv_needed == 0))
5869 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5870 
5871 			} else {
5872 				/*
5873 				 * The user did not read all of this
5874 				 * message, turn off the returned MSG_EOR
5875 				 * since we are leaving more behind on the
5876 				 * control to read.
5877 				 */
5878 #ifdef INVARIANTS
5879 				if (control->end_added &&
5880 				    (control->data == NULL) &&
5881 				    (control->tail_mbuf == NULL)) {
5882 					panic("Gak, control->length is corrupt?");
5883 				}
5884 #endif
5885 				no_rcv_needed = control->do_not_ref_stcb;
5886 				out_flags &= ~MSG_EOR;
5887 			}
5888 		}
5889 		if (out_flags & MSG_EOR) {
5890 			goto release;
5891 		}
5892 		if ((uio->uio_resid == 0) ||
5893 		    ((in_eeor_mode) &&
5894 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5895 			goto release;
5896 		}
5897 		/*
5898 		 * If I hit here the receiver wants more and this message is
5899 		 * NOT done (pd-api). So two questions. Can we block? if not
5900 		 * we are done. Did the user NOT set MSG_WAITALL?
5901 		 */
5902 		if (block_allowed == 0) {
5903 			goto release;
5904 		}
5905 		/*
5906 		 * We need to wait for more data a few things: - We don't
5907 		 * sbunlock() so we don't get someone else reading. - We
5908 		 * must be sure to account for the case where what is added
5909 		 * is NOT to our control when we wakeup.
5910 		 */
5911 
5912 		/*
5913 		 * Do we need to tell the transport a rwnd update might be
5914 		 * needed before we go to sleep?
5915 		 */
5916 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5917 		    ((freed_so_far >= rwnd_req) &&
5918 		    (control->do_not_ref_stcb == 0) &&
5919 		    (no_rcv_needed == 0))) {
5920 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5921 		}
5922 wait_some_more:
5923 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5924 			goto release;
5925 		}
5926 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5927 			goto release;
5928 
5929 		if (hold_rlock == 1) {
5930 			SCTP_INP_READ_UNLOCK(inp);
5931 			hold_rlock = 0;
5932 		}
5933 		if (hold_sblock == 0) {
5934 			SOCKBUF_LOCK(&so->so_rcv);
5935 			hold_sblock = 1;
5936 		}
5937 		if ((copied_so_far) && (control->length == 0) &&
5938 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5939 			goto release;
5940 		}
5941 		if (so->so_rcv.sb_cc <= control->held_length) {
5942 			error = sbwait(&so->so_rcv);
5943 			if (error) {
5944 				goto release;
5945 			}
5946 			control->held_length = 0;
5947 		}
5948 		if (hold_sblock) {
5949 			SOCKBUF_UNLOCK(&so->so_rcv);
5950 			hold_sblock = 0;
5951 		}
5952 		if (control->length == 0) {
5953 			/* still nothing here */
5954 			if (control->end_added == 1) {
5955 				/* he aborted, or is done i.e.did a shutdown */
5956 				out_flags |= MSG_EOR;
5957 				if (control->pdapi_aborted) {
5958 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5959 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5960 
5961 					out_flags |= MSG_TRUNC;
5962 				} else {
5963 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5964 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5965 				}
5966 				goto done_with_control;
5967 			}
5968 			if (so->so_rcv.sb_cc > held_length) {
5969 				control->held_length = so->so_rcv.sb_cc;
5970 				held_length = 0;
5971 			}
5972 			goto wait_some_more;
5973 		} else if (control->data == NULL) {
5974 			/*
5975 			 * we must re-sync since data is probably being
5976 			 * added
5977 			 */
5978 			SCTP_INP_READ_LOCK(inp);
5979 			if ((control->length > 0) && (control->data == NULL)) {
5980 				/*
5981 				 * big trouble.. we have the lock and its
5982 				 * corrupt?
5983 				 */
5984 #ifdef INVARIANTS
5985 				panic("Impossible data==NULL length !=0");
5986 #endif
5987 				out_flags |= MSG_EOR;
5988 				out_flags |= MSG_TRUNC;
5989 				control->length = 0;
5990 				SCTP_INP_READ_UNLOCK(inp);
5991 				goto done_with_control;
5992 			}
5993 			SCTP_INP_READ_UNLOCK(inp);
5994 			/* We will fall around to get more data */
5995 		}
5996 		goto get_more_data;
5997 	} else {
5998 		/*-
5999 		 * Give caller back the mbuf chain,
6000 		 * store in uio_resid the length
6001 		 */
6002 		wakeup_read_socket = 0;
6003 		if ((control->end_added == 0) ||
6004 		    (TAILQ_NEXT(control, next) == NULL)) {
6005 			/* Need to get rlock */
6006 			if (hold_rlock == 0) {
6007 				SCTP_INP_READ_LOCK(inp);
6008 				hold_rlock = 1;
6009 			}
6010 		}
6011 		if (control->end_added) {
6012 			out_flags |= MSG_EOR;
6013 			if ((control->do_not_ref_stcb == 0) &&
6014 			    (control->stcb != NULL) &&
6015 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6016 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6017 		}
6018 		if (control->spec_flags & M_NOTIFICATION) {
6019 			out_flags |= MSG_NOTIFICATION;
6020 		}
6021 		uio->uio_resid = control->length;
6022 		*mp = control->data;
6023 		m = control->data;
6024 		while (m) {
6025 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6026 				sctp_sblog(&so->so_rcv,
6027 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6028 			}
6029 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6030 			freed_so_far += SCTP_BUF_LEN(m);
6031 			freed_so_far += MSIZE;
6032 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6033 				sctp_sblog(&so->so_rcv,
6034 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6035 			}
6036 			m = SCTP_BUF_NEXT(m);
6037 		}
6038 		control->data = control->tail_mbuf = NULL;
6039 		control->length = 0;
6040 		if (out_flags & MSG_EOR) {
6041 			/* Done with this control */
6042 			goto done_with_control;
6043 		}
6044 	}
6045 release:
6046 	if (hold_rlock == 1) {
6047 		SCTP_INP_READ_UNLOCK(inp);
6048 		hold_rlock = 0;
6049 	}
6050 	if (hold_sblock == 1) {
6051 		SOCKBUF_UNLOCK(&so->so_rcv);
6052 		hold_sblock = 0;
6053 	}
6054 	sbunlock(&so->so_rcv);
6055 	sockbuf_lock = 0;
6056 
6057 release_unlocked:
6058 	if (hold_sblock) {
6059 		SOCKBUF_UNLOCK(&so->so_rcv);
6060 		hold_sblock = 0;
6061 	}
6062 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6063 		if ((freed_so_far >= rwnd_req) &&
6064 		    (control && (control->do_not_ref_stcb == 0)) &&
6065 		    (no_rcv_needed == 0))
6066 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6067 	}
6068 out:
6069 	if (msg_flags) {
6070 		*msg_flags = out_flags;
6071 	}
6072 	if (((out_flags & MSG_EOR) == 0) &&
6073 	    ((in_flags & MSG_PEEK) == 0) &&
6074 	    (sinfo) &&
6075 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6076 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6077 		struct sctp_extrcvinfo *s_extra;
6078 
6079 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6080 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6081 	}
6082 	if (hold_rlock == 1) {
6083 		SCTP_INP_READ_UNLOCK(inp);
6084 	}
6085 	if (hold_sblock) {
6086 		SOCKBUF_UNLOCK(&so->so_rcv);
6087 	}
6088 	if (sockbuf_lock) {
6089 		sbunlock(&so->so_rcv);
6090 	}
6091 	if (freecnt_applied) {
6092 		/*
6093 		 * The lock on the socket buffer protects us so the free
6094 		 * code will stop. But since we used the socketbuf lock and
6095 		 * the sender uses the tcb_lock to increment, we need to use
6096 		 * the atomic add to the refcnt.
6097 		 */
6098 		if (stcb == NULL) {
6099 #ifdef INVARIANTS
6100 			panic("stcb for refcnt has gone NULL?");
6101 			goto stage_left;
6102 #else
6103 			goto stage_left;
6104 #endif
6105 		}
6106 		/* Save the value back for next time */
6107 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6108 		atomic_add_int(&stcb->asoc.refcnt, -1);
6109 	}
6110 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6111 		if (stcb) {
6112 			sctp_misc_ints(SCTP_SORECV_DONE,
6113 			    freed_so_far,
6114 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6115 			    stcb->asoc.my_rwnd,
6116 			    so->so_rcv.sb_cc);
6117 		} else {
6118 			sctp_misc_ints(SCTP_SORECV_DONE,
6119 			    freed_so_far,
6120 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6121 			    0,
6122 			    so->so_rcv.sb_cc);
6123 		}
6124 	}
6125 stage_left:
6126 	if (wakeup_read_socket) {
6127 		sctp_sorwakeup(inp, so);
6128 	}
6129 	return (error);
6130 }
6131 
6132 
6133 #ifdef SCTP_MBUF_LOGGING
6134 struct mbuf *
6135 sctp_m_free(struct mbuf *m)
6136 {
6137 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6138 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6139 	}
6140 	return (m_free(m));
6141 }
6142 
6143 void
6144 sctp_m_freem(struct mbuf *mb)
6145 {
6146 	while (mb != NULL)
6147 		mb = sctp_m_free(mb);
6148 }
6149 
6150 #endif
6151 
6152 int
6153 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6154 {
6155 	/*
6156 	 * Given a local address. For all associations that holds the
6157 	 * address, request a peer-set-primary.
6158 	 */
6159 	struct sctp_ifa *ifa;
6160 	struct sctp_laddr *wi;
6161 
6162 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6163 	if (ifa == NULL) {
6164 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6165 		return (EADDRNOTAVAIL);
6166 	}
6167 	/*
6168 	 * Now that we have the ifa we must awaken the iterator with this
6169 	 * message.
6170 	 */
6171 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6172 	if (wi == NULL) {
6173 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6174 		return (ENOMEM);
6175 	}
6176 	/* Now incr the count and int wi structure */
6177 	SCTP_INCR_LADDR_COUNT();
6178 	bzero(wi, sizeof(*wi));
6179 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6180 	wi->ifa = ifa;
6181 	wi->action = SCTP_SET_PRIM_ADDR;
6182 	atomic_add_int(&ifa->refcount, 1);
6183 
6184 	/* Now add it to the work queue */
6185 	SCTP_WQ_ADDR_LOCK();
6186 	/*
6187 	 * Should this really be a tailq? As it is we will process the
6188 	 * newest first :-0
6189 	 */
6190 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6191 	SCTP_WQ_ADDR_UNLOCK();
6192 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6193 	    (struct sctp_inpcb *)NULL,
6194 	    (struct sctp_tcb *)NULL,
6195 	    (struct sctp_nets *)NULL);
6196 	return (0);
6197 }
6198 
6199 
6200 int
6201 sctp_soreceive(struct socket *so,
6202     struct sockaddr **psa,
6203     struct uio *uio,
6204     struct mbuf **mp0,
6205     struct mbuf **controlp,
6206     int *flagsp)
6207 {
6208 	int error, fromlen;
6209 	uint8_t sockbuf[256];
6210 	struct sockaddr *from;
6211 	struct sctp_extrcvinfo sinfo;
6212 	int filling_sinfo = 1;
6213 	struct sctp_inpcb *inp;
6214 
6215 	inp = (struct sctp_inpcb *)so->so_pcb;
6216 	/* pickup the assoc we are reading from */
6217 	if (inp == NULL) {
6218 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6219 		return (EINVAL);
6220 	}
6221 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6222 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6223 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6224 	    (controlp == NULL)) {
6225 		/* user does not want the sndrcv ctl */
6226 		filling_sinfo = 0;
6227 	}
6228 	if (psa) {
6229 		from = (struct sockaddr *)sockbuf;
6230 		fromlen = sizeof(sockbuf);
6231 		from->sa_len = 0;
6232 	} else {
6233 		from = NULL;
6234 		fromlen = 0;
6235 	}
6236 
6237 	if (filling_sinfo) {
6238 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6239 	}
6240 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6241 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6242 	if (controlp != NULL) {
6243 		/* copy back the sinfo in a CMSG format */
6244 		if (filling_sinfo)
6245 			*controlp = sctp_build_ctl_nchunk(inp,
6246 			    (struct sctp_sndrcvinfo *)&sinfo);
6247 		else
6248 			*controlp = NULL;
6249 	}
6250 	if (psa) {
6251 		/* copy back the address info */
6252 		if (from && from->sa_len) {
6253 			*psa = sodupsockaddr(from, M_NOWAIT);
6254 		} else {
6255 			*psa = NULL;
6256 		}
6257 	}
6258 	return (error);
6259 }
6260 
6261 
6262 
6263 
6264 
6265 int
6266 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6267     int totaddr, int *error)
6268 {
6269 	int added = 0;
6270 	int i;
6271 	struct sctp_inpcb *inp;
6272 	struct sockaddr *sa;
6273 	size_t incr = 0;
6274 
6275 #ifdef INET
6276 	struct sockaddr_in *sin;
6277 
6278 #endif
6279 #ifdef INET6
6280 	struct sockaddr_in6 *sin6;
6281 
6282 #endif
6283 
6284 	sa = addr;
6285 	inp = stcb->sctp_ep;
6286 	*error = 0;
6287 	for (i = 0; i < totaddr; i++) {
6288 		switch (sa->sa_family) {
6289 #ifdef INET
6290 		case AF_INET:
6291 			incr = sizeof(struct sockaddr_in);
6292 			sin = (struct sockaddr_in *)sa;
6293 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6294 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6295 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6296 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6297 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6298 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6299 				*error = EINVAL;
6300 				goto out_now;
6301 			}
6302 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6303 			    SCTP_DONOT_SETSCOPE,
6304 			    SCTP_ADDR_IS_CONFIRMED)) {
6305 				/* assoc gone no un-lock */
6306 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6307 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6308 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6309 				*error = ENOBUFS;
6310 				goto out_now;
6311 			}
6312 			added++;
6313 			break;
6314 #endif
6315 #ifdef INET6
6316 		case AF_INET6:
6317 			incr = sizeof(struct sockaddr_in6);
6318 			sin6 = (struct sockaddr_in6 *)sa;
6319 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6320 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6321 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6322 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6323 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6324 				*error = EINVAL;
6325 				goto out_now;
6326 			}
6327 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6328 			    SCTP_DONOT_SETSCOPE,
6329 			    SCTP_ADDR_IS_CONFIRMED)) {
6330 				/* assoc gone no un-lock */
6331 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6332 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6333 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6334 				*error = ENOBUFS;
6335 				goto out_now;
6336 			}
6337 			added++;
6338 			break;
6339 #endif
6340 		default:
6341 			break;
6342 		}
6343 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6344 	}
6345 out_now:
6346 	return (added);
6347 }
6348 
6349 struct sctp_tcb *
6350 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6351     unsigned int *totaddr,
6352     unsigned int *num_v4, unsigned int *num_v6, int *error,
6353     unsigned int limit, int *bad_addr)
6354 {
6355 	struct sockaddr *sa;
6356 	struct sctp_tcb *stcb = NULL;
6357 	unsigned int incr, at, i;
6358 
6359 	at = incr = 0;
6360 	sa = addr;
6361 	*error = *num_v6 = *num_v4 = 0;
6362 	/* account and validate addresses */
6363 	for (i = 0; i < *totaddr; i++) {
6364 		switch (sa->sa_family) {
6365 #ifdef INET
6366 		case AF_INET:
6367 			if (sa->sa_len != incr) {
6368 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6369 				*error = EINVAL;
6370 				*bad_addr = 1;
6371 				return (NULL);
6372 			}
6373 			(*num_v4) += 1;
6374 			incr = (unsigned int)sizeof(struct sockaddr_in);
6375 			break;
6376 #endif
6377 #ifdef INET6
6378 		case AF_INET6:
6379 			{
6380 				struct sockaddr_in6 *sin6;
6381 
6382 				sin6 = (struct sockaddr_in6 *)sa;
6383 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6384 					/* Must be non-mapped for connectx */
6385 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6386 					*error = EINVAL;
6387 					*bad_addr = 1;
6388 					return (NULL);
6389 				}
6390 				if (sa->sa_len != incr) {
6391 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6392 					*error = EINVAL;
6393 					*bad_addr = 1;
6394 					return (NULL);
6395 				}
6396 				(*num_v6) += 1;
6397 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6398 				break;
6399 			}
6400 #endif
6401 		default:
6402 			*totaddr = i;
6403 			/* we are done */
6404 			break;
6405 		}
6406 		if (i == *totaddr) {
6407 			break;
6408 		}
6409 		SCTP_INP_INCR_REF(inp);
6410 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6411 		if (stcb != NULL) {
6412 			/* Already have or am bring up an association */
6413 			return (stcb);
6414 		} else {
6415 			SCTP_INP_DECR_REF(inp);
6416 		}
6417 		if ((at + incr) > limit) {
6418 			*totaddr = i;
6419 			break;
6420 		}
6421 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6422 	}
6423 	return ((struct sctp_tcb *)NULL);
6424 }
6425 
6426 /*
6427  * sctp_bindx(ADD) for one address.
6428  * assumes all arguments are valid/checked by caller.
6429  */
6430 void
6431 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6432     struct sockaddr *sa, sctp_assoc_t assoc_id,
6433     uint32_t vrf_id, int *error, void *p)
6434 {
6435 	struct sockaddr *addr_touse;
6436 
6437 #if defined(INET) && defined(INET6)
6438 	struct sockaddr_in sin;
6439 
6440 #endif
6441 
6442 	/* see if we're bound all already! */
6443 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6444 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6445 		*error = EINVAL;
6446 		return;
6447 	}
6448 	addr_touse = sa;
6449 #ifdef INET6
6450 	if (sa->sa_family == AF_INET6) {
6451 #ifdef INET
6452 		struct sockaddr_in6 *sin6;
6453 
6454 #endif
6455 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6456 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6457 			*error = EINVAL;
6458 			return;
6459 		}
6460 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6461 			/* can only bind v6 on PF_INET6 sockets */
6462 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6463 			*error = EINVAL;
6464 			return;
6465 		}
6466 #ifdef INET
6467 		sin6 = (struct sockaddr_in6 *)addr_touse;
6468 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6469 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6470 			    SCTP_IPV6_V6ONLY(inp)) {
6471 				/* can't bind v4-mapped on PF_INET sockets */
6472 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473 				*error = EINVAL;
6474 				return;
6475 			}
6476 			in6_sin6_2_sin(&sin, sin6);
6477 			addr_touse = (struct sockaddr *)&sin;
6478 		}
6479 #endif
6480 	}
6481 #endif
6482 #ifdef INET
6483 	if (sa->sa_family == AF_INET) {
6484 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6485 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6486 			*error = EINVAL;
6487 			return;
6488 		}
6489 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6490 		    SCTP_IPV6_V6ONLY(inp)) {
6491 			/* can't bind v4 on PF_INET sockets */
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		}
6496 	}
6497 #endif
6498 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6499 		if (p == NULL) {
6500 			/* Can't get proc for Net/Open BSD */
6501 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6502 			*error = EINVAL;
6503 			return;
6504 		}
6505 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6506 		return;
6507 	}
6508 	/*
6509 	 * No locks required here since bind and mgmt_ep_sa all do their own
6510 	 * locking. If we do something for the FIX: below we may need to
6511 	 * lock in that case.
6512 	 */
6513 	if (assoc_id == 0) {
6514 		/* add the address */
6515 		struct sctp_inpcb *lep;
6516 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6517 
6518 		/* validate the incoming port */
6519 		if ((lsin->sin_port != 0) &&
6520 		    (lsin->sin_port != inp->sctp_lport)) {
6521 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6522 			*error = EINVAL;
6523 			return;
6524 		} else {
6525 			/* user specified 0 port, set it to existing port */
6526 			lsin->sin_port = inp->sctp_lport;
6527 		}
6528 
6529 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6530 		if (lep != NULL) {
6531 			/*
6532 			 * We must decrement the refcount since we have the
6533 			 * ep already and are binding. No remove going on
6534 			 * here.
6535 			 */
6536 			SCTP_INP_DECR_REF(lep);
6537 		}
6538 		if (lep == inp) {
6539 			/* already bound to it.. ok */
6540 			return;
6541 		} else if (lep == NULL) {
6542 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6543 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6544 			    SCTP_ADD_IP_ADDRESS,
6545 			    vrf_id, NULL);
6546 		} else {
6547 			*error = EADDRINUSE;
6548 		}
6549 		if (*error)
6550 			return;
6551 	} else {
6552 		/*
6553 		 * FIX: decide whether we allow assoc based bindx
6554 		 */
6555 	}
6556 }
6557 
6558 /*
6559  * sctp_bindx(DELETE) for one address.
6560  * assumes all arguments are valid/checked by caller.
6561  */
6562 void
6563 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6564     struct sockaddr *sa, sctp_assoc_t assoc_id,
6565     uint32_t vrf_id, int *error)
6566 {
6567 	struct sockaddr *addr_touse;
6568 
6569 #if defined(INET) && defined(INET6)
6570 	struct sockaddr_in sin;
6571 
6572 #endif
6573 
6574 	/* see if we're bound all already! */
6575 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6576 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 		*error = EINVAL;
6578 		return;
6579 	}
6580 	addr_touse = sa;
6581 #ifdef INET6
6582 	if (sa->sa_family == AF_INET6) {
6583 #ifdef INET
6584 		struct sockaddr_in6 *sin6;
6585 
6586 #endif
6587 
6588 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6594 			/* can only bind v6 on PF_INET6 sockets */
6595 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6596 			*error = EINVAL;
6597 			return;
6598 		}
6599 #ifdef INET
6600 		sin6 = (struct sockaddr_in6 *)addr_touse;
6601 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6602 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6603 			    SCTP_IPV6_V6ONLY(inp)) {
6604 				/* can't bind mapped-v4 on PF_INET sockets */
6605 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6606 				*error = EINVAL;
6607 				return;
6608 			}
6609 			in6_sin6_2_sin(&sin, sin6);
6610 			addr_touse = (struct sockaddr *)&sin;
6611 		}
6612 #endif
6613 	}
6614 #endif
6615 #ifdef INET
6616 	if (sa->sa_family == AF_INET) {
6617 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6618 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6619 			*error = EINVAL;
6620 			return;
6621 		}
6622 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6623 		    SCTP_IPV6_V6ONLY(inp)) {
6624 			/* can't bind v4 on PF_INET sockets */
6625 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626 			*error = EINVAL;
6627 			return;
6628 		}
6629 	}
6630 #endif
6631 	/*
6632 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6633 	 * below is ever changed we may need to lock before calling
6634 	 * association level binding.
6635 	 */
6636 	if (assoc_id == 0) {
6637 		/* delete the address */
6638 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6639 		    SCTP_DEL_IP_ADDRESS,
6640 		    vrf_id, NULL);
6641 	} else {
6642 		/*
6643 		 * FIX: decide whether we allow assoc based bindx
6644 		 */
6645 	}
6646 }
6647 
6648 /*
6649  * returns the valid local address count for an assoc, taking into account
6650  * all scoping rules
6651  */
6652 int
6653 sctp_local_addr_count(struct sctp_tcb *stcb)
6654 {
6655 	int loopback_scope;
6656 
6657 #if defined(INET)
6658 	int ipv4_local_scope, ipv4_addr_legal;
6659 
6660 #endif
6661 #if defined (INET6)
6662 	int local_scope, site_scope, ipv6_addr_legal;
6663 
6664 #endif
6665 	struct sctp_vrf *vrf;
6666 	struct sctp_ifn *sctp_ifn;
6667 	struct sctp_ifa *sctp_ifa;
6668 	int count = 0;
6669 
6670 	/* Turn on all the appropriate scopes */
6671 	loopback_scope = stcb->asoc.scope.loopback_scope;
6672 #if defined(INET)
6673 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6674 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6675 #endif
6676 #if defined(INET6)
6677 	local_scope = stcb->asoc.scope.local_scope;
6678 	site_scope = stcb->asoc.scope.site_scope;
6679 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6680 #endif
6681 	SCTP_IPI_ADDR_RLOCK();
6682 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6683 	if (vrf == NULL) {
6684 		/* no vrf, no addresses */
6685 		SCTP_IPI_ADDR_RUNLOCK();
6686 		return (0);
6687 	}
6688 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6689 		/*
6690 		 * bound all case: go through all ifns on the vrf
6691 		 */
6692 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6693 			if ((loopback_scope == 0) &&
6694 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6695 				continue;
6696 			}
6697 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6698 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6699 					continue;
6700 				switch (sctp_ifa->address.sa.sa_family) {
6701 #ifdef INET
6702 				case AF_INET:
6703 					if (ipv4_addr_legal) {
6704 						struct sockaddr_in *sin;
6705 
6706 						sin = &sctp_ifa->address.sin;
6707 						if (sin->sin_addr.s_addr == 0) {
6708 							/*
6709 							 * skip unspecified
6710 							 * addrs
6711 							 */
6712 							continue;
6713 						}
6714 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6715 						    &sin->sin_addr) != 0) {
6716 							continue;
6717 						}
6718 						if ((ipv4_local_scope == 0) &&
6719 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6720 							continue;
6721 						}
6722 						/* count this one */
6723 						count++;
6724 					} else {
6725 						continue;
6726 					}
6727 					break;
6728 #endif
6729 #ifdef INET6
6730 				case AF_INET6:
6731 					if (ipv6_addr_legal) {
6732 						struct sockaddr_in6 *sin6;
6733 
6734 						sin6 = &sctp_ifa->address.sin6;
6735 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6736 							continue;
6737 						}
6738 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6739 						    &sin6->sin6_addr) != 0) {
6740 							continue;
6741 						}
6742 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6743 							if (local_scope == 0)
6744 								continue;
6745 							if (sin6->sin6_scope_id == 0) {
6746 								if (sa6_recoverscope(sin6) != 0)
6747 									/*
6748 									 *
6749 									 * bad
6750 									 *
6751 									 * li
6752 									 * nk
6753 									 *
6754 									 * loc
6755 									 * al
6756 									 *
6757 									 * add
6758 									 * re
6759 									 * ss
6760 									 * */
6761 									continue;
6762 							}
6763 						}
6764 						if ((site_scope == 0) &&
6765 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6766 							continue;
6767 						}
6768 						/* count this one */
6769 						count++;
6770 					}
6771 					break;
6772 #endif
6773 				default:
6774 					/* TSNH */
6775 					break;
6776 				}
6777 			}
6778 		}
6779 	} else {
6780 		/*
6781 		 * subset bound case
6782 		 */
6783 		struct sctp_laddr *laddr;
6784 
6785 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6786 		    sctp_nxt_addr) {
6787 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6788 				continue;
6789 			}
6790 			/* count this one */
6791 			count++;
6792 		}
6793 	}
6794 	SCTP_IPI_ADDR_RUNLOCK();
6795 	return (count);
6796 }
6797 
6798 #if defined(SCTP_LOCAL_TRACE_BUF)
6799 
6800 void
6801 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6802 {
6803 	uint32_t saveindex, newindex;
6804 
6805 	do {
6806 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6807 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6808 			newindex = 1;
6809 		} else {
6810 			newindex = saveindex + 1;
6811 		}
6812 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6813 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6814 		saveindex = 0;
6815 	}
6816 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6817 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6818 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6819 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6820 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6821 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6822 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6823 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6824 }
6825 
6826 #endif
6827 static void
6828 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6829     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6830 {
6831 	struct ip *iph;
6832 
6833 #ifdef INET6
6834 	struct ip6_hdr *ip6;
6835 
6836 #endif
6837 	struct mbuf *sp, *last;
6838 	struct udphdr *uhdr;
6839 	uint16_t port;
6840 
6841 	if ((m->m_flags & M_PKTHDR) == 0) {
6842 		/* Can't handle one that is not a pkt hdr */
6843 		goto out;
6844 	}
6845 	/* Pull the src port */
6846 	iph = mtod(m, struct ip *);
6847 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6848 	port = uhdr->uh_sport;
6849 	/*
6850 	 * Split out the mbuf chain. Leave the IP header in m, place the
6851 	 * rest in the sp.
6852 	 */
6853 	sp = m_split(m, off, M_NOWAIT);
6854 	if (sp == NULL) {
6855 		/* Gak, drop packet, we can't do a split */
6856 		goto out;
6857 	}
6858 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6859 		/* Gak, packet can't have an SCTP header in it - too small */
6860 		m_freem(sp);
6861 		goto out;
6862 	}
6863 	/* Now pull up the UDP header and SCTP header together */
6864 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6865 	if (sp == NULL) {
6866 		/* Gak pullup failed */
6867 		goto out;
6868 	}
6869 	/* Trim out the UDP header */
6870 	m_adj(sp, sizeof(struct udphdr));
6871 
6872 	/* Now reconstruct the mbuf chain */
6873 	for (last = m; last->m_next; last = last->m_next);
6874 	last->m_next = sp;
6875 	m->m_pkthdr.len += sp->m_pkthdr.len;
6876 	/*
6877 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6878 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6879 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6880 	 * SCTP checksum. Therefore, clear the bit.
6881 	 */
6882 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6883 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6884 	    m->m_pkthdr.len,
6885 	    if_name(m->m_pkthdr.rcvif),
6886 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6887 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6888 	iph = mtod(m, struct ip *);
6889 	switch (iph->ip_v) {
6890 #ifdef INET
6891 	case IPVERSION:
6892 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6893 		sctp_input_with_port(m, off, port);
6894 		break;
6895 #endif
6896 #ifdef INET6
6897 	case IPV6_VERSION >> 4:
6898 		ip6 = mtod(m, struct ip6_hdr *);
6899 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6900 		sctp6_input_with_port(&m, &off, port);
6901 		break;
6902 #endif
6903 	default:
6904 		goto out;
6905 		break;
6906 	}
6907 	return;
6908 out:
6909 	m_freem(m);
6910 }
6911 
6912 #ifdef INET
6913 static void
6914 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6915 {
6916 	struct ip *outer_ip, *inner_ip;
6917 	struct sctphdr *sh;
6918 	struct icmp *icmp;
6919 	struct udphdr *udp;
6920 	struct sctp_inpcb *inp;
6921 	struct sctp_tcb *stcb;
6922 	struct sctp_nets *net;
6923 	struct sctp_init_chunk *ch;
6924 	struct sockaddr_in src, dst;
6925 	uint8_t type, code;
6926 
6927 	inner_ip = (struct ip *)vip;
6928 	icmp = (struct icmp *)((caddr_t)inner_ip -
6929 	    (sizeof(struct icmp) - sizeof(struct ip)));
6930 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6931 	if (ntohs(outer_ip->ip_len) <
6932 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6933 		return;
6934 	}
6935 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6936 	sh = (struct sctphdr *)(udp + 1);
6937 	memset(&src, 0, sizeof(struct sockaddr_in));
6938 	src.sin_family = AF_INET;
6939 	src.sin_len = sizeof(struct sockaddr_in);
6940 	src.sin_port = sh->src_port;
6941 	src.sin_addr = inner_ip->ip_src;
6942 	memset(&dst, 0, sizeof(struct sockaddr_in));
6943 	dst.sin_family = AF_INET;
6944 	dst.sin_len = sizeof(struct sockaddr_in);
6945 	dst.sin_port = sh->dest_port;
6946 	dst.sin_addr = inner_ip->ip_dst;
6947 	/*
6948 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6949 	 * holds our local endpoint address. Thus we reverse the dst and the
6950 	 * src in the lookup.
6951 	 */
6952 	inp = NULL;
6953 	net = NULL;
6954 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6955 	    (struct sockaddr *)&src,
6956 	    &inp, &net, 1,
6957 	    SCTP_DEFAULT_VRFID);
6958 	if ((stcb != NULL) &&
6959 	    (net != NULL) &&
6960 	    (inp != NULL)) {
6961 		/* Check the UDP port numbers */
6962 		if ((udp->uh_dport != net->port) ||
6963 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6964 			SCTP_TCB_UNLOCK(stcb);
6965 			return;
6966 		}
6967 		/* Check the verification tag */
6968 		if (ntohl(sh->v_tag) != 0) {
6969 			/*
6970 			 * This must be the verification tag used for
6971 			 * sending out packets. We don't consider packets
6972 			 * reflecting the verification tag.
6973 			 */
6974 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6975 				SCTP_TCB_UNLOCK(stcb);
6976 				return;
6977 			}
6978 		} else {
6979 			if (ntohs(outer_ip->ip_len) >=
6980 			    sizeof(struct ip) +
6981 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6982 				/*
6983 				 * In this case we can check if we got an
6984 				 * INIT chunk and if the initiate tag
6985 				 * matches.
6986 				 */
6987 				ch = (struct sctp_init_chunk *)(sh + 1);
6988 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6989 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6990 					SCTP_TCB_UNLOCK(stcb);
6991 					return;
6992 				}
6993 			} else {
6994 				SCTP_TCB_UNLOCK(stcb);
6995 				return;
6996 			}
6997 		}
6998 		type = icmp->icmp_type;
6999 		code = icmp->icmp_code;
7000 		if ((type == ICMP_UNREACH) &&
7001 		    (code == ICMP_UNREACH_PORT)) {
7002 			code = ICMP_UNREACH_PROTOCOL;
7003 		}
7004 		sctp_notify(inp, stcb, net, type, code,
7005 		    ntohs(inner_ip->ip_len),
7006 		    ntohs(icmp->icmp_nextmtu));
7007 	} else {
7008 		if ((stcb == NULL) && (inp != NULL)) {
7009 			/* reduce ref-count */
7010 			SCTP_INP_WLOCK(inp);
7011 			SCTP_INP_DECR_REF(inp);
7012 			SCTP_INP_WUNLOCK(inp);
7013 		}
7014 		if (stcb) {
7015 			SCTP_TCB_UNLOCK(stcb);
7016 		}
7017 	}
7018 	return;
7019 }
7020 
7021 #endif
7022 
7023 #ifdef INET6
7024 static void
7025 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7026 {
7027 	struct ip6ctlparam *ip6cp;
7028 	struct sctp_inpcb *inp;
7029 	struct sctp_tcb *stcb;
7030 	struct sctp_nets *net;
7031 	struct sctphdr sh;
7032 	struct udphdr udp;
7033 	struct sockaddr_in6 src, dst;
7034 	uint8_t type, code;
7035 
7036 	ip6cp = (struct ip6ctlparam *)d;
7037 	/*
7038 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7039 	 */
7040 	if (ip6cp->ip6c_m == NULL) {
7041 		return;
7042 	}
7043 	/*
7044 	 * Check if we can safely examine the ports and the verification tag
7045 	 * of the SCTP common header.
7046 	 */
7047 	if (ip6cp->ip6c_m->m_pkthdr.len <
7048 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7049 		return;
7050 	}
7051 	/* Copy out the UDP header. */
7052 	memset(&udp, 0, sizeof(struct udphdr));
7053 	m_copydata(ip6cp->ip6c_m,
7054 	    ip6cp->ip6c_off,
7055 	    sizeof(struct udphdr),
7056 	    (caddr_t)&udp);
7057 	/* Copy out the port numbers and the verification tag. */
7058 	memset(&sh, 0, sizeof(struct sctphdr));
7059 	m_copydata(ip6cp->ip6c_m,
7060 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7061 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7062 	    (caddr_t)&sh);
7063 	memset(&src, 0, sizeof(struct sockaddr_in6));
7064 	src.sin6_family = AF_INET6;
7065 	src.sin6_len = sizeof(struct sockaddr_in6);
7066 	src.sin6_port = sh.src_port;
7067 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7068 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7069 		return;
7070 	}
7071 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7072 	dst.sin6_family = AF_INET6;
7073 	dst.sin6_len = sizeof(struct sockaddr_in6);
7074 	dst.sin6_port = sh.dest_port;
7075 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7076 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7077 		return;
7078 	}
7079 	inp = NULL;
7080 	net = NULL;
7081 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7082 	    (struct sockaddr *)&src,
7083 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7084 	if ((stcb != NULL) &&
7085 	    (net != NULL) &&
7086 	    (inp != NULL)) {
7087 		/* Check the UDP port numbers */
7088 		if ((udp.uh_dport != net->port) ||
7089 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7090 			SCTP_TCB_UNLOCK(stcb);
7091 			return;
7092 		}
7093 		/* Check the verification tag */
7094 		if (ntohl(sh.v_tag) != 0) {
7095 			/*
7096 			 * This must be the verification tag used for
7097 			 * sending out packets. We don't consider packets
7098 			 * reflecting the verification tag.
7099 			 */
7100 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7101 				SCTP_TCB_UNLOCK(stcb);
7102 				return;
7103 			}
7104 		} else {
7105 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7106 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7107 			    sizeof(struct sctphdr) +
7108 			    sizeof(struct sctp_chunkhdr) +
7109 			    offsetof(struct sctp_init, a_rwnd)) {
7110 				/*
7111 				 * In this case we can check if we got an
7112 				 * INIT chunk and if the initiate tag
7113 				 * matches.
7114 				 */
7115 				uint32_t initiate_tag;
7116 				uint8_t chunk_type;
7117 
7118 				m_copydata(ip6cp->ip6c_m,
7119 				    ip6cp->ip6c_off +
7120 				    sizeof(struct udphdr) +
7121 				    sizeof(struct sctphdr),
7122 				    sizeof(uint8_t),
7123 				    (caddr_t)&chunk_type);
7124 				m_copydata(ip6cp->ip6c_m,
7125 				    ip6cp->ip6c_off +
7126 				    sizeof(struct udphdr) +
7127 				    sizeof(struct sctphdr) +
7128 				    sizeof(struct sctp_chunkhdr),
7129 				    sizeof(uint32_t),
7130 				    (caddr_t)&initiate_tag);
7131 				if ((chunk_type != SCTP_INITIATION) ||
7132 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7133 					SCTP_TCB_UNLOCK(stcb);
7134 					return;
7135 				}
7136 			} else {
7137 				SCTP_TCB_UNLOCK(stcb);
7138 				return;
7139 			}
7140 		}
7141 		type = ip6cp->ip6c_icmp6->icmp6_type;
7142 		code = ip6cp->ip6c_icmp6->icmp6_code;
7143 		if ((type == ICMP6_DST_UNREACH) &&
7144 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7145 			type = ICMP6_PARAM_PROB;
7146 			code = ICMP6_PARAMPROB_NEXTHEADER;
7147 		}
7148 		sctp6_notify(inp, stcb, net, type, code,
7149 		    (uint16_t) ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7150 	} else {
7151 		if ((stcb == NULL) && (inp != NULL)) {
7152 			/* reduce inp's ref-count */
7153 			SCTP_INP_WLOCK(inp);
7154 			SCTP_INP_DECR_REF(inp);
7155 			SCTP_INP_WUNLOCK(inp);
7156 		}
7157 		if (stcb) {
7158 			SCTP_TCB_UNLOCK(stcb);
7159 		}
7160 	}
7161 }
7162 
7163 #endif
7164 
7165 void
7166 sctp_over_udp_stop(void)
7167 {
7168 	/*
7169 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7170 	 * for writting!
7171 	 */
7172 #ifdef INET
7173 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7174 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7175 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7176 	}
7177 #endif
7178 #ifdef INET6
7179 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7180 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7181 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7182 	}
7183 #endif
7184 }
7185 
7186 int
7187 sctp_over_udp_start(void)
7188 {
7189 	uint16_t port;
7190 	int ret;
7191 
7192 #ifdef INET
7193 	struct sockaddr_in sin;
7194 
7195 #endif
7196 #ifdef INET6
7197 	struct sockaddr_in6 sin6;
7198 
7199 #endif
7200 	/*
7201 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7202 	 * for writting!
7203 	 */
7204 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7205 	if (ntohs(port) == 0) {
7206 		/* Must have a port set */
7207 		return (EINVAL);
7208 	}
7209 #ifdef INET
7210 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7211 		/* Already running -- must stop first */
7212 		return (EALREADY);
7213 	}
7214 #endif
7215 #ifdef INET6
7216 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7217 		/* Already running -- must stop first */
7218 		return (EALREADY);
7219 	}
7220 #endif
7221 #ifdef INET
7222 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7223 	    SOCK_DGRAM, IPPROTO_UDP,
7224 	    curthread->td_ucred, curthread))) {
7225 		sctp_over_udp_stop();
7226 		return (ret);
7227 	}
7228 	/* Call the special UDP hook. */
7229 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7230 	    sctp_recv_udp_tunneled_packet,
7231 	    sctp_recv_icmp_tunneled_packet,
7232 	    NULL))) {
7233 		sctp_over_udp_stop();
7234 		return (ret);
7235 	}
7236 	/* Ok, we have a socket, bind it to the port. */
7237 	memset(&sin, 0, sizeof(struct sockaddr_in));
7238 	sin.sin_len = sizeof(struct sockaddr_in);
7239 	sin.sin_family = AF_INET;
7240 	sin.sin_port = htons(port);
7241 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7242 	    (struct sockaddr *)&sin, curthread))) {
7243 		sctp_over_udp_stop();
7244 		return (ret);
7245 	}
7246 #endif
7247 #ifdef INET6
7248 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7249 	    SOCK_DGRAM, IPPROTO_UDP,
7250 	    curthread->td_ucred, curthread))) {
7251 		sctp_over_udp_stop();
7252 		return (ret);
7253 	}
7254 	/* Call the special UDP hook. */
7255 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7256 	    sctp_recv_udp_tunneled_packet,
7257 	    sctp_recv_icmp6_tunneled_packet,
7258 	    NULL))) {
7259 		sctp_over_udp_stop();
7260 		return (ret);
7261 	}
7262 	/* Ok, we have a socket, bind it to the port. */
7263 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7264 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7265 	sin6.sin6_family = AF_INET6;
7266 	sin6.sin6_port = htons(port);
7267 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7268 	    (struct sockaddr *)&sin6, curthread))) {
7269 		sctp_over_udp_stop();
7270 		return (ret);
7271 	}
7272 #endif
7273 	return (0);
7274 }
7275