xref: /freebsd/sys/netinet/sctputil.c (revision 8657387683946d0c03e09fe77029edfe309eeb20)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61 
62 
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66 
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69 
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 	struct sctp_cwnd_log sctp_clog;
74 
75 	sctp_clog.x.sb.stcb = stcb;
76 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 	if (stcb)
78 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 	else
80 		sctp_clog.x.sb.stcb_sbcc = 0;
81 	sctp_clog.x.sb.incr = incr;
82 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 	    SCTP_LOG_EVENT_SB,
84 	    from,
85 	    sctp_clog.x.misc.log1,
86 	    sctp_clog.x.misc.log2,
87 	    sctp_clog.x.misc.log3,
88 	    sctp_clog.x.misc.log4);
89 }
90 
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 	struct sctp_cwnd_log sctp_clog;
95 
96 	sctp_clog.x.close.inp = (void *)inp;
97 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 	if (stcb) {
99 		sctp_clog.x.close.stcb = (void *)stcb;
100 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 	} else {
102 		sctp_clog.x.close.stcb = 0;
103 		sctp_clog.x.close.state = 0;
104 	}
105 	sctp_clog.x.close.loc = loc;
106 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 	    SCTP_LOG_EVENT_CLOSE,
108 	    0,
109 	    sctp_clog.x.misc.log1,
110 	    sctp_clog.x.misc.log2,
111 	    sctp_clog.x.misc.log3,
112 	    sctp_clog.x.misc.log4);
113 }
114 
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 	struct sctp_cwnd_log sctp_clog;
119 
120 	memset(&sctp_clog, 0, sizeof(sctp_clog));
121 	sctp_clog.x.rto.net = (void *)net;
122 	sctp_clog.x.rto.rtt = net->rtt / 1000;
123 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 	    SCTP_LOG_EVENT_RTT,
125 	    from,
126 	    sctp_clog.x.misc.log1,
127 	    sctp_clog.x.misc.log2,
128 	    sctp_clog.x.misc.log3,
129 	    sctp_clog.x.misc.log4);
130 }
131 
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 	struct sctp_cwnd_log sctp_clog;
136 
137 	sctp_clog.x.strlog.stcb = stcb;
138 	sctp_clog.x.strlog.n_tsn = tsn;
139 	sctp_clog.x.strlog.n_sseq = sseq;
140 	sctp_clog.x.strlog.e_tsn = 0;
141 	sctp_clog.x.strlog.e_sseq = 0;
142 	sctp_clog.x.strlog.strm = stream;
143 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 	    SCTP_LOG_EVENT_STRM,
145 	    from,
146 	    sctp_clog.x.misc.log1,
147 	    sctp_clog.x.misc.log2,
148 	    sctp_clog.x.misc.log3,
149 	    sctp_clog.x.misc.log4);
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 	struct sctp_cwnd_log sctp_clog;
212 
213 	memset(&sctp_clog, 0, sizeof(sctp_clog));
214 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 	sctp_clog.x.fr.tsn = tsn;
217 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 	    SCTP_LOG_EVENT_FR,
219 	    from,
220 	    sctp_clog.x.misc.log1,
221 	    sctp_clog.x.misc.log2,
222 	    sctp_clog.x.misc.log3,
223 	    sctp_clog.x.misc.log4);
224 }
225 
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 	struct sctp_cwnd_log sctp_clog;
231 
232 	sctp_clog.x.mb.mp = m;
233 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 	if (SCTP_BUF_IS_EXTENDED(m)) {
237 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 	} else {
240 		sctp_clog.x.mb.ext = 0;
241 		sctp_clog.x.mb.refcnt = 0;
242 	}
243 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 	    SCTP_LOG_EVENT_MBUF,
245 	    from,
246 	    sctp_clog.x.misc.log1,
247 	    sctp_clog.x.misc.log2,
248 	    sctp_clog.x.misc.log3,
249 	    sctp_clog.x.misc.log4);
250 }
251 
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 	struct mbuf *mat;
256 
257 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 		sctp_log_mb(mat, from);
259 	}
260 }
261 #endif
262 
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 	struct sctp_cwnd_log sctp_clog;
267 
268 	if (control == NULL) {
269 		SCTP_PRINTF("Gak log of NULL?\n");
270 		return;
271 	}
272 	sctp_clog.x.strlog.stcb = control->stcb;
273 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 	sctp_clog.x.strlog.strm = control->sinfo_stream;
276 	if (poschk != NULL) {
277 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 	} else {
280 		sctp_clog.x.strlog.e_tsn = 0;
281 		sctp_clog.x.strlog.e_sseq = 0;
282 	}
283 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 	    SCTP_LOG_EVENT_STRM,
285 	    from,
286 	    sctp_clog.x.misc.log1,
287 	    sctp_clog.x.misc.log2,
288 	    sctp_clog.x.misc.log3,
289 	    sctp_clog.x.misc.log4);
290 }
291 
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 	struct sctp_cwnd_log sctp_clog;
296 
297 	sctp_clog.x.cwnd.net = net;
298 	if (stcb->asoc.send_queue_cnt > 255)
299 		sctp_clog.x.cwnd.cnt_in_send = 255;
300 	else
301 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 	if (stcb->asoc.stream_queue_cnt > 255)
303 		sctp_clog.x.cwnd.cnt_in_str = 255;
304 	else
305 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306 
307 	if (net) {
308 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 		sctp_clog.x.cwnd.inflight = net->flight_size;
310 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 	}
314 	if (SCTP_CWNDLOG_PRESEND == from) {
315 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 	}
317 	sctp_clog.x.cwnd.cwnd_augment = augment;
318 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 	    SCTP_LOG_EVENT_CWND,
320 	    from,
321 	    sctp_clog.x.misc.log1,
322 	    sctp_clog.x.misc.log2,
323 	    sctp_clog.x.misc.log3,
324 	    sctp_clog.x.misc.log4);
325 }
326 
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 	struct sctp_cwnd_log sctp_clog;
331 
332 	memset(&sctp_clog, 0, sizeof(sctp_clog));
333 	if (inp) {
334 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335 
336 	} else {
337 		sctp_clog.x.lock.sock = (void *)NULL;
338 	}
339 	sctp_clog.x.lock.inp = (void *)inp;
340 	if (stcb) {
341 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 	} else {
343 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 	}
345 	if (inp) {
346 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 	} else {
349 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 	if (inp && (inp->sctp_socket)) {
354 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 	} else {
358 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 	}
362 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 	    SCTP_LOG_LOCK_EVENT,
364 	    from,
365 	    sctp_clog.x.misc.log1,
366 	    sctp_clog.x.misc.log2,
367 	    sctp_clog.x.misc.log3,
368 	    sctp_clog.x.misc.log4);
369 }
370 
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 	struct sctp_cwnd_log sctp_clog;
375 
376 	memset(&sctp_clog, 0, sizeof(sctp_clog));
377 	sctp_clog.x.cwnd.net = net;
378 	sctp_clog.x.cwnd.cwnd_new_value = error;
379 	sctp_clog.x.cwnd.inflight = net->flight_size;
380 	sctp_clog.x.cwnd.cwnd_augment = burst;
381 	if (stcb->asoc.send_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_send = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 	if (stcb->asoc.stream_queue_cnt > 255)
386 		sctp_clog.x.cwnd.cnt_in_str = 255;
387 	else
388 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_MAXBURST,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = snd_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = 0;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 	sctp_clog.x.rwnd.send_size = flight_size;
423 	sctp_clog.x.rwnd.overhead = overhead;
424 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_RWND,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 	struct sctp_cwnd_log sctp_clog;
439 
440 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 	sctp_clog.x.mbcnt.size_change = book;
442 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_EVENT_MBCNT,
446 	    from,
447 	    sctp_clog.x.misc.log1,
448 	    sctp_clog.x.misc.log2,
449 	    sctp_clog.x.misc.log3,
450 	    sctp_clog.x.misc.log4);
451 }
452 #endif
453 
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_MISC_EVENT,
459 	    from,
460 	    a, b, c, d);
461 }
462 
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 	struct sctp_cwnd_log sctp_clog;
467 
468 	sctp_clog.x.wake.stcb = (void *)stcb;
469 	sctp_clog.x.wake.wake_cnt = wake_cnt;
470 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473 
474 	if (stcb->asoc.stream_queue_cnt < 0xff)
475 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 	else
477 		sctp_clog.x.wake.stream_qcnt = 0xff;
478 
479 	if (stcb->asoc.chunks_on_out_queue < 0xff)
480 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 	else
482 		sctp_clog.x.wake.chunks_on_oque = 0xff;
483 
484 	sctp_clog.x.wake.sctpflags = 0;
485 	/* set in the defered mode stuff */
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 		sctp_clog.x.wake.sctpflags |= 1;
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 		sctp_clog.x.wake.sctpflags |= 2;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 		sctp_clog.x.wake.sctpflags |= 4;
492 	/* what about the sb */
493 	if (stcb->sctp_socket) {
494 		struct socket *so = stcb->sctp_socket;
495 
496 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 	} else {
498 		sctp_clog.x.wake.sbflags = 0xff;
499 	}
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	    SCTP_LOG_EVENT_WAKE,
502 	    from,
503 	    sctp_clog.x.misc.log1,
504 	    sctp_clog.x.misc.log2,
505 	    sctp_clog.x.misc.log3,
506 	    sctp_clog.x.misc.log4);
507 }
508 
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 	    SCTP_LOG_EVENT_BLOCK,
523 	    from,
524 	    sctp_clog.x.misc.log1,
525 	    sctp_clog.x.misc.log2,
526 	    sctp_clog.x.misc.log3,
527 	    sctp_clog.x.misc.log4);
528 }
529 
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 	/* May need to fix this if ktrdump does not work */
534 	return (0);
535 }
536 
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540 
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 	int i;
546 	int cnt;
547 
548 	cnt = 0;
549 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 		if ((sctp_audit_data[i][0] == 0xe0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if (sctp_audit_data[i][0] == 0xf0) {
555 			cnt = 0;
556 			SCTP_PRINTF("\n");
557 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558 		    (sctp_audit_data[i][1] == 0x01)) {
559 			SCTP_PRINTF("\n");
560 			cnt = 0;
561 		}
562 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 		    (uint32_t)sctp_audit_data[i][1]);
564 		cnt++;
565 		if ((cnt % 14) == 0)
566 			SCTP_PRINTF("\n");
567 	}
568 	for (i = 0; i < sctp_audit_indx; i++) {
569 		if ((sctp_audit_data[i][0] == 0xe0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if (sctp_audit_data[i][0] == 0xf0) {
574 			cnt = 0;
575 			SCTP_PRINTF("\n");
576 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577 		    (sctp_audit_data[i][1] == 0x01)) {
578 			SCTP_PRINTF("\n");
579 			cnt = 0;
580 		}
581 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 		    (uint32_t)sctp_audit_data[i][1]);
583 		cnt++;
584 		if ((cnt % 14) == 0)
585 			SCTP_PRINTF("\n");
586 	}
587 	SCTP_PRINTF("\n");
588 }
589 
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592     struct sctp_nets *net)
593 {
594 	int resend_cnt, tot_out, rep, tot_book_cnt;
595 	struct sctp_nets *lnet;
596 	struct sctp_tmit_chunk *chk;
597 
598 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 	sctp_audit_indx++;
601 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 		sctp_audit_indx = 0;
603 	}
604 	if (inp == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	if (stcb == NULL) {
614 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 		sctp_audit_indx++;
617 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 			sctp_audit_indx = 0;
619 		}
620 		return;
621 	}
622 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 	sctp_audit_data[sctp_audit_indx][1] =
624 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 	sctp_audit_indx++;
626 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 		sctp_audit_indx = 0;
628 	}
629 	rep = 0;
630 	tot_book_cnt = 0;
631 	resend_cnt = tot_out = 0;
632 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 			resend_cnt++;
635 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 			tot_out += chk->book_size;
637 			tot_book_cnt++;
638 		}
639 	}
640 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 		rep = 1;
650 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 		sctp_audit_data[sctp_audit_indx][1] =
653 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 	}
659 	if (tot_out != stcb->asoc.total_flight) {
660 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 		sctp_audit_indx++;
663 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 			sctp_audit_indx = 0;
665 		}
666 		rep = 1;
667 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 		    (int)stcb->asoc.total_flight);
669 		stcb->asoc.total_flight = tot_out;
670 	}
671 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 		sctp_audit_indx++;
675 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 			sctp_audit_indx = 0;
677 		}
678 		rep = 1;
679 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680 
681 		stcb->asoc.total_flight_count = tot_book_cnt;
682 	}
683 	tot_out = 0;
684 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 		tot_out += lnet->flight_size;
686 	}
687 	if (tot_out != stcb->asoc.total_flight) {
688 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 		sctp_audit_indx++;
691 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 			sctp_audit_indx = 0;
693 		}
694 		rep = 1;
695 		SCTP_PRINTF("real flight:%d net total was %d\n",
696 		    stcb->asoc.total_flight, tot_out);
697 		/* now corrective action */
698 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699 
700 			tot_out = 0;
701 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 				if ((chk->whoTo == lnet) &&
703 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 					tot_out += chk->book_size;
705 				}
706 			}
707 			if (lnet->flight_size != tot_out) {
708 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 				    (void *)lnet, lnet->flight_size,
710 				    tot_out);
711 				lnet->flight_size = tot_out;
712 			}
713 		}
714 	}
715 	if (rep) {
716 		sctp_print_audit_report();
717 	}
718 }
719 
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723 
724 	sctp_audit_data[sctp_audit_indx][0] = ev;
725 	sctp_audit_data[sctp_audit_indx][1] = fd;
726 	sctp_audit_indx++;
727 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 		sctp_audit_indx = 0;
729 	}
730 }
731 
732 #endif
733 
734 /*
735  * sctp_stop_timers_for_shutdown() should be called
736  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737  * state to make sure that all timers are stopped.
738  */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 	struct sctp_association *asoc;
743 	struct sctp_nets *net;
744 
745 	asoc = &stcb->asoc;
746 
747 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 	}
756 }
757 
758 /*
759  * a list of sizes based on typical mtu's, used only if next hop size not
760  * returned.
761  */
762 static uint32_t sctp_mtu_sizes[] = {
763 	68,
764 	296,
765 	508,
766 	512,
767 	544,
768 	576,
769 	1006,
770 	1492,
771 	1500,
772 	1536,
773 	2002,
774 	2048,
775 	4352,
776 	4464,
777 	8166,
778 	17914,
779 	32000,
780 	65535
781 };
782 
783 /*
784  * Return the largest MTU smaller than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 	uint32_t i;
791 
792 	if (val <= sctp_mtu_sizes[0]) {
793 		return (val);
794 	}
795 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val <= sctp_mtu_sizes[i]) {
797 			break;
798 		}
799 	}
800 	return (sctp_mtu_sizes[i - 1]);
801 }
802 
803 /*
804  * Return the smallest MTU larger than val. If there is no
805  * entry, just return val.
806  */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 	/* select another MTU that is just bigger than this one */
811 	uint32_t i;
812 
813 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 		if (val < sctp_mtu_sizes[i]) {
815 			return (sctp_mtu_sizes[i]);
816 		}
817 	}
818 	return (val);
819 }
820 
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 	/*
825 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 	 * our counter. The result becomes our good random numbers and we
827 	 * then setup to give these out. Note that we do no locking to
828 	 * protect this. This is ok, since if competing folks call this we
829 	 * will get more gobbled gook in the random store which is what we
830 	 * want. There is a danger that two guys will use the same random
831 	 * numbers, but thats ok too since that is random as well :->
832 	 */
833 	m->store_at = 0;
834 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837 	m->random_counter++;
838 }
839 
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 	/*
844 	 * A true implementation should use random selection process to get
845 	 * the initial stream sequence number, using RFC1750 as a good
846 	 * guideline
847 	 */
848 	uint32_t x, *xp;
849 	uint8_t *p;
850 	int store_at, new_store;
851 
852 	if (inp->initial_sequence_debug != 0) {
853 		uint32_t ret;
854 
855 		ret = inp->initial_sequence_debug;
856 		inp->initial_sequence_debug++;
857 		return (ret);
858 	}
859 retry:
860 	store_at = inp->store_at;
861 	new_store = store_at + sizeof(uint32_t);
862 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 		new_store = 0;
864 	}
865 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 		goto retry;
867 	}
868 	if (new_store == 0) {
869 		/* Refill the random store */
870 		sctp_fill_random_store(inp);
871 	}
872 	p = &inp->random_store[store_at];
873 	xp = (uint32_t *)p;
874 	x = *xp;
875 	return (x);
876 }
877 
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 	uint32_t x;
882 	struct timeval now;
883 
884 	if (check) {
885 		(void)SCTP_GETTIME_TIMEVAL(&now);
886 	}
887 	for (;;) {
888 		x = sctp_select_initial_TSN(&inp->sctp_ep);
889 		if (x == 0) {
890 			/* we never use 0 */
891 			continue;
892 		}
893 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 			break;
895 		}
896 	}
897 	return (x);
898 }
899 
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 	int32_t user_state;
904 
905 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 		user_state = SCTP_CLOSED;
907 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 		user_state = SCTP_SHUTDOWN_PENDING;
909 	} else {
910 		switch (kernel_state & SCTP_STATE_MASK) {
911 		case SCTP_STATE_EMPTY:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_INUSE:
915 			user_state = SCTP_CLOSED;
916 			break;
917 		case SCTP_STATE_COOKIE_WAIT:
918 			user_state = SCTP_COOKIE_WAIT;
919 			break;
920 		case SCTP_STATE_COOKIE_ECHOED:
921 			user_state = SCTP_COOKIE_ECHOED;
922 			break;
923 		case SCTP_STATE_OPEN:
924 			user_state = SCTP_ESTABLISHED;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_SENT:
927 			user_state = SCTP_SHUTDOWN_SENT;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_RECEIVED:
930 			user_state = SCTP_SHUTDOWN_RECEIVED;
931 			break;
932 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 			user_state = SCTP_SHUTDOWN_ACK_SENT;
934 			break;
935 		default:
936 			user_state = SCTP_CLOSED;
937 			break;
938 		}
939 	}
940 	return (user_state);
941 }
942 
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 	struct sctp_association *asoc;
948 
949 	/*
950 	 * Anything set to zero is taken care of by the allocation routine's
951 	 * bzero
952 	 */
953 
954 	/*
955 	 * Up front select what scoping to apply on addresses I tell my peer
956 	 * Not sure what to do with these right now, we will need to come up
957 	 * with a way to set them. We may need to pass them through from the
958 	 * caller in the sctp_aloc_assoc() function.
959 	 */
960 	int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 #endif
964 
965 	asoc = &stcb->asoc;
966 	/* init all variables to a known value. */
967 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 	asoc->max_burst = inp->sctp_ep.max_burst;
969 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 	asoc->ecn_supported = inp->ecn_supported;
974 	asoc->prsctp_supported = inp->prsctp_supported;
975 	asoc->idata_supported = inp->idata_supported;
976 	asoc->auth_supported = inp->auth_supported;
977 	asoc->asconf_supported = inp->asconf_supported;
978 	asoc->reconfig_supported = inp->reconfig_supported;
979 	asoc->nrsack_supported = inp->nrsack_supported;
980 	asoc->pktdrop_supported = inp->pktdrop_supported;
981 	asoc->idata_supported = inp->idata_supported;
982 	asoc->sctp_cmt_pf = (uint8_t)0;
983 	asoc->sctp_frag_point = inp->sctp_frag_point;
984 	asoc->sctp_features = inp->sctp_features;
985 	asoc->default_dscp = inp->sctp_ep.default_dscp;
986 	asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 	if (inp->sctp_ep.default_flowlabel) {
989 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 	} else {
991 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 			asoc->default_flowlabel &= 0x000fffff;
994 			asoc->default_flowlabel |= 0x80000000;
995 		} else {
996 			asoc->default_flowlabel = 0;
997 		}
998 	}
999 #endif
1000 	asoc->sb_send_resv = 0;
1001 	if (override_tag) {
1002 		asoc->my_vtag = override_tag;
1003 	} else {
1004 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 	}
1006 	/* Get the nonce tags */
1007 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->vrf_id = vrf_id;
1010 
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 	asoc->tsn_in_at = 0;
1013 	asoc->tsn_out_at = 0;
1014 	asoc->tsn_in_wrapped = 0;
1015 	asoc->tsn_out_wrapped = 0;
1016 	asoc->cumack_log_at = 0;
1017 	asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 	asoc->fs_index = 0;
1021 #endif
1022 	asoc->refcnt = 0;
1023 	asoc->assoc_up_sent = 0;
1024 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 	    sctp_select_initial_TSN(&inp->sctp_ep);
1026 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 	/* we are optimisitic here */
1028 	asoc->peer_supports_nat = 0;
1029 	asoc->sent_queue_retran_cnt = 0;
1030 
1031 	/* for CMT */
1032 	asoc->last_net_cmt_send_started = NULL;
1033 
1034 	/* This will need to be adjusted */
1035 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 	asoc->asconf_seq_in = asoc->last_acked_seq;
1038 
1039 	/* here we are different, we hold the next one we expect */
1040 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041 
1042 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044 
1045 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1046 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1047 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1048 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1049 	asoc->free_chunk_cnt = 0;
1050 
1051 	asoc->iam_blocking = 0;
1052 	asoc->context = inp->sctp_context;
1053 	asoc->local_strreset_support = inp->local_strreset_support;
1054 	asoc->def_send = inp->def_send;
1055 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1056 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1057 	asoc->pr_sctp_cnt = 0;
1058 	asoc->total_output_queue_size = 0;
1059 
1060 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1061 		asoc->scope.ipv6_addr_legal = 1;
1062 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1063 			asoc->scope.ipv4_addr_legal = 1;
1064 		} else {
1065 			asoc->scope.ipv4_addr_legal = 0;
1066 		}
1067 	} else {
1068 		asoc->scope.ipv6_addr_legal = 0;
1069 		asoc->scope.ipv4_addr_legal = 1;
1070 	}
1071 
1072 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1073 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1074 
1075 	asoc->smallest_mtu = inp->sctp_frag_point;
1076 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1077 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1078 
1079 	asoc->stream_locked_on = 0;
1080 	asoc->ecn_echo_cnt_onq = 0;
1081 	asoc->stream_locked = 0;
1082 
1083 	asoc->send_sack = 1;
1084 
1085 	LIST_INIT(&asoc->sctp_restricted_addrs);
1086 
1087 	TAILQ_INIT(&asoc->nets);
1088 	TAILQ_INIT(&asoc->pending_reply_queue);
1089 	TAILQ_INIT(&asoc->asconf_ack_sent);
1090 	/* Setup to fill the hb random cache at first HB */
1091 	asoc->hb_random_idx = 4;
1092 
1093 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1094 
1095 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1096 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1097 
1098 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1099 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    o_strms;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_mid_ordered = 0;
1125 		asoc->strmout[i].next_mid_unordered = 0;
1126 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127 		asoc->strmout[i].chunks_on_queues = 0;
1128 #if defined(SCTP_DETAILED_STR_STATS)
1129 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1130 			asoc->strmout[i].abandoned_sent[j] = 0;
1131 			asoc->strmout[i].abandoned_unsent[j] = 0;
1132 		}
1133 #else
1134 		asoc->strmout[i].abandoned_sent[0] = 0;
1135 		asoc->strmout[i].abandoned_unsent[0] = 0;
1136 #endif
1137 		asoc->strmout[i].sid = i;
1138 		asoc->strmout[i].last_msg_incomplete = 0;
1139 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1140 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1141 	}
1142 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1143 
1144 	/* Now the mapping array */
1145 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1146 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1147 	    SCTP_M_MAP);
1148 	if (asoc->mapping_array == NULL) {
1149 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1150 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1151 		return (ENOMEM);
1152 	}
1153 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1154 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1155 	    SCTP_M_MAP);
1156 	if (asoc->nr_mapping_array == NULL) {
1157 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1158 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1159 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1160 		return (ENOMEM);
1161 	}
1162 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1163 
1164 	/* Now the init of the other outqueues */
1165 	TAILQ_INIT(&asoc->free_chunks);
1166 	TAILQ_INIT(&asoc->control_send_queue);
1167 	TAILQ_INIT(&asoc->asconf_send_queue);
1168 	TAILQ_INIT(&asoc->send_queue);
1169 	TAILQ_INIT(&asoc->sent_queue);
1170 	TAILQ_INIT(&asoc->resetHead);
1171 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1172 	TAILQ_INIT(&asoc->asconf_queue);
1173 	/* authentication fields */
1174 	asoc->authinfo.random = NULL;
1175 	asoc->authinfo.active_keyid = 0;
1176 	asoc->authinfo.assoc_key = NULL;
1177 	asoc->authinfo.assoc_keyid = 0;
1178 	asoc->authinfo.recv_key = NULL;
1179 	asoc->authinfo.recv_keyid = 0;
1180 	LIST_INIT(&asoc->shared_keys);
1181 	asoc->marked_retrans = 0;
1182 	asoc->port = inp->sctp_ep.port;
1183 	asoc->timoinit = 0;
1184 	asoc->timodata = 0;
1185 	asoc->timosack = 0;
1186 	asoc->timoshutdown = 0;
1187 	asoc->timoheartbeat = 0;
1188 	asoc->timocookie = 0;
1189 	asoc->timoshutdownack = 0;
1190 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1191 	asoc->discontinuity_time = asoc->start_time;
1192 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1193 		asoc->abandoned_unsent[i] = 0;
1194 		asoc->abandoned_sent[i] = 0;
1195 	}
1196 	/*
1197 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1198 	 * freed later when the association is freed.
1199 	 */
1200 	return (0);
1201 }
1202 
1203 void
1204 sctp_print_mapping_array(struct sctp_association *asoc)
1205 {
1206 	unsigned int i, limit;
1207 
1208 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1209 	    asoc->mapping_array_size,
1210 	    asoc->mapping_array_base_tsn,
1211 	    asoc->cumulative_tsn,
1212 	    asoc->highest_tsn_inside_map,
1213 	    asoc->highest_tsn_inside_nr_map);
1214 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1215 		if (asoc->mapping_array[limit - 1] != 0) {
1216 			break;
1217 		}
1218 	}
1219 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1220 	for (i = 0; i < limit; i++) {
1221 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1222 	}
1223 	if (limit % 16)
1224 		SCTP_PRINTF("\n");
1225 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1226 		if (asoc->nr_mapping_array[limit - 1]) {
1227 			break;
1228 		}
1229 	}
1230 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1231 	for (i = 0; i < limit; i++) {
1232 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1233 	}
1234 	if (limit % 16)
1235 		SCTP_PRINTF("\n");
1236 }
1237 
1238 int
1239 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1240 {
1241 	/* mapping array needs to grow */
1242 	uint8_t *new_array1, *new_array2;
1243 	uint32_t new_size;
1244 
1245 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1246 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1247 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1248 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1249 		/* can't get more, forget it */
1250 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1251 		if (new_array1) {
1252 			SCTP_FREE(new_array1, SCTP_M_MAP);
1253 		}
1254 		if (new_array2) {
1255 			SCTP_FREE(new_array2, SCTP_M_MAP);
1256 		}
1257 		return (-1);
1258 	}
1259 	memset(new_array1, 0, new_size);
1260 	memset(new_array2, 0, new_size);
1261 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1262 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1263 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1264 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1265 	asoc->mapping_array = new_array1;
1266 	asoc->nr_mapping_array = new_array2;
1267 	asoc->mapping_array_size = new_size;
1268 	return (0);
1269 }
1270 
1271 
1272 static void
1273 sctp_iterator_work(struct sctp_iterator *it)
1274 {
1275 	int iteration_count = 0;
1276 	int inp_skip = 0;
1277 	int first_in = 1;
1278 	struct sctp_inpcb *tinp;
1279 
1280 	SCTP_INP_INFO_RLOCK();
1281 	SCTP_ITERATOR_LOCK();
1282 	sctp_it_ctl.cur_it = it;
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		sctp_it_ctl.cur_it = NULL;
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		CURVNET_RESTORE();
1437 		SCTP_IPI_ITERATOR_WQ_LOCK();
1438 		/* sa_ignore FREED_MEMORY */
1439 	}
1440 	sctp_it_ctl.iterator_running = 0;
1441 	return;
1442 }
1443 
1444 
1445 static void
1446 sctp_handle_addr_wq(void)
1447 {
1448 	/* deal with the ADDR wq from the rtsock calls */
1449 	struct sctp_laddr *wi, *nwi;
1450 	struct sctp_asconf_iterator *asc;
1451 
1452 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1453 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1454 	if (asc == NULL) {
1455 		/* Try later, no memory */
1456 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1457 		    (struct sctp_inpcb *)NULL,
1458 		    (struct sctp_tcb *)NULL,
1459 		    (struct sctp_nets *)NULL);
1460 		return;
1461 	}
1462 	LIST_INIT(&asc->list_of_work);
1463 	asc->cnt = 0;
1464 
1465 	SCTP_WQ_ADDR_LOCK();
1466 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1467 		LIST_REMOVE(wi, sctp_nxt_addr);
1468 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1469 		asc->cnt++;
1470 	}
1471 	SCTP_WQ_ADDR_UNLOCK();
1472 
1473 	if (asc->cnt == 0) {
1474 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1475 	} else {
1476 		int ret;
1477 
1478 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1479 		    sctp_asconf_iterator_stcb,
1480 		    NULL,	/* No ep end for boundall */
1481 		    SCTP_PCB_FLAGS_BOUNDALL,
1482 		    SCTP_PCB_ANY_FEATURES,
1483 		    SCTP_ASOC_ANY_STATE,
1484 		    (void *)asc, 0,
1485 		    sctp_asconf_iterator_end, NULL, 0);
1486 		if (ret) {
1487 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1488 			/*
1489 			 * Freeing if we are stopping or put back on the
1490 			 * addr_wq.
1491 			 */
1492 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1493 				sctp_asconf_iterator_end(asc, 0);
1494 			} else {
1495 				SCTP_WQ_ADDR_LOCK();
1496 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1497 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1498 				}
1499 				SCTP_WQ_ADDR_UNLOCK();
1500 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1501 			}
1502 		}
1503 	}
1504 }
1505 
1506 void
1507 sctp_timeout_handler(void *t)
1508 {
1509 	struct sctp_inpcb *inp;
1510 	struct sctp_tcb *stcb;
1511 	struct sctp_nets *net;
1512 	struct sctp_timer *tmr;
1513 	struct mbuf *op_err;
1514 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1515 	struct socket *so;
1516 #endif
1517 	int did_output;
1518 	int type;
1519 
1520 	tmr = (struct sctp_timer *)t;
1521 	inp = (struct sctp_inpcb *)tmr->ep;
1522 	stcb = (struct sctp_tcb *)tmr->tcb;
1523 	net = (struct sctp_nets *)tmr->net;
1524 	CURVNET_SET((struct vnet *)tmr->vnet);
1525 	did_output = 1;
1526 
1527 #ifdef SCTP_AUDITING_ENABLED
1528 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1529 	sctp_auditing(3, inp, stcb, net);
1530 #endif
1531 
1532 	/* sanity checks... */
1533 	if (tmr->self != (void *)tmr) {
1534 		/*
1535 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536 		 * (void *)tmr);
1537 		 */
1538 		CURVNET_RESTORE();
1539 		return;
1540 	}
1541 	tmr->stopped_from = 0xa001;
1542 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543 		/*
1544 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545 		 * tmr->type);
1546 		 */
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	tmr->stopped_from = 0xa002;
1551 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552 		CURVNET_RESTORE();
1553 		return;
1554 	}
1555 	/* if this is an iterator timeout, get the struct and clear inp */
1556 	tmr->stopped_from = 0xa003;
1557 	if (inp) {
1558 		SCTP_INP_INCR_REF(inp);
1559 		if ((inp->sctp_socket == NULL) &&
1560 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569 		    ) {
1570 			SCTP_INP_DECR_REF(inp);
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 			CURVNET_RESTORE();
1584 			return;
1585 		}
1586 	}
1587 	type = tmr->type;
1588 	tmr->stopped_from = 0xa005;
1589 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591 		if (inp) {
1592 			SCTP_INP_DECR_REF(inp);
1593 		}
1594 		if (stcb) {
1595 			atomic_add_int(&stcb->asoc.refcnt, -1);
1596 		}
1597 		CURVNET_RESTORE();
1598 		return;
1599 	}
1600 	tmr->stopped_from = 0xa006;
1601 
1602 	if (stcb) {
1603 		SCTP_TCB_LOCK(stcb);
1604 		atomic_add_int(&stcb->asoc.refcnt, -1);
1605 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606 		    ((stcb->asoc.state == 0) ||
1607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608 			SCTP_TCB_UNLOCK(stcb);
1609 			if (inp) {
1610 				SCTP_INP_DECR_REF(inp);
1611 			}
1612 			CURVNET_RESTORE();
1613 			return;
1614 		}
1615 	}
1616 	/* record in stopped what t-o occurred */
1617 	tmr->stopped_from = type;
1618 
1619 	/* mark as being serviced now */
1620 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621 		/*
1622 		 * Callout has been rescheduled.
1623 		 */
1624 		goto get_out;
1625 	}
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		/*
1628 		 * Not active, so no action.
1629 		 */
1630 		goto get_out;
1631 	}
1632 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633 
1634 	/* call the handler for the appropriate timer type */
1635 	switch (type) {
1636 	case SCTP_TIMER_TYPE_ZERO_COPY:
1637 		if (inp == NULL) {
1638 			break;
1639 		}
1640 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1641 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1645 		if (inp == NULL) {
1646 			break;
1647 		}
1648 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1649 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_ADDR_WQ:
1653 		sctp_handle_addr_wq();
1654 		break;
1655 	case SCTP_TIMER_TYPE_SEND:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		}
1659 		SCTP_STAT_INCR(sctps_timodata);
1660 		stcb->asoc.timodata++;
1661 		stcb->asoc.num_send_timers_up--;
1662 		if (stcb->asoc.num_send_timers_up < 0) {
1663 			stcb->asoc.num_send_timers_up = 0;
1664 		}
1665 		SCTP_TCB_LOCK_ASSERT(stcb);
1666 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1667 			/* no need to unlock on tcb its gone */
1668 
1669 			goto out_decr;
1670 		}
1671 		SCTP_TCB_LOCK_ASSERT(stcb);
1672 #ifdef SCTP_AUDITING_ENABLED
1673 		sctp_auditing(4, inp, stcb, net);
1674 #endif
1675 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1676 		if ((stcb->asoc.num_send_timers_up == 0) &&
1677 		    (stcb->asoc.sent_queue_cnt > 0)) {
1678 			struct sctp_tmit_chunk *chk;
1679 
1680 			/*
1681 			 * safeguard. If there on some on the sent queue
1682 			 * somewhere but no timers running something is
1683 			 * wrong... so we start a timer on the first chunk
1684 			 * on the send queue on whatever net it is sent to.
1685 			 */
1686 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1687 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1688 			    chk->whoTo);
1689 		}
1690 		break;
1691 	case SCTP_TIMER_TYPE_INIT:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timoinit);
1696 		stcb->asoc.timoinit++;
1697 		if (sctp_t1init_timer(inp, stcb, net)) {
1698 			/* no need to unlock on tcb its gone */
1699 			goto out_decr;
1700 		}
1701 		/* We do output but not here */
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_RECV:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timosack);
1709 		stcb->asoc.timosack++;
1710 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_SHUTDOWN:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		if (sctp_shutdown_timer(inp, stcb, net)) {
1721 			/* no need to unlock on tcb its gone */
1722 			goto out_decr;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timoshutdown);
1725 		stcb->asoc.timoshutdown++;
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1730 		break;
1731 	case SCTP_TIMER_TYPE_HEARTBEAT:
1732 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoheartbeat);
1736 		stcb->asoc.timoheartbeat++;
1737 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 #ifdef SCTP_AUDITING_ENABLED
1742 		sctp_auditing(4, inp, stcb, net);
1743 #endif
1744 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1745 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1746 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1747 		}
1748 		break;
1749 	case SCTP_TIMER_TYPE_COOKIE:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		if (sctp_cookie_timer(inp, stcb, net)) {
1754 			/* no need to unlock on tcb its gone */
1755 			goto out_decr;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timocookie);
1758 		stcb->asoc.timocookie++;
1759 #ifdef SCTP_AUDITING_ENABLED
1760 		sctp_auditing(4, inp, stcb, net);
1761 #endif
1762 		/*
1763 		 * We consider T3 and Cookie timer pretty much the same with
1764 		 * respect to where from in chunk_output.
1765 		 */
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1769 		{
1770 			struct timeval tv;
1771 			int i, secret;
1772 
1773 			if (inp == NULL) {
1774 				break;
1775 			}
1776 			SCTP_STAT_INCR(sctps_timosecret);
1777 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1778 			SCTP_INP_WLOCK(inp);
1779 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1780 			inp->sctp_ep.last_secret_number =
1781 			    inp->sctp_ep.current_secret_number;
1782 			inp->sctp_ep.current_secret_number++;
1783 			if (inp->sctp_ep.current_secret_number >=
1784 			    SCTP_HOW_MANY_SECRETS) {
1785 				inp->sctp_ep.current_secret_number = 0;
1786 			}
1787 			secret = (int)inp->sctp_ep.current_secret_number;
1788 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1789 				inp->sctp_ep.secret_key[secret][i] =
1790 				    sctp_select_initial_TSN(&inp->sctp_ep);
1791 			}
1792 			SCTP_INP_WUNLOCK(inp);
1793 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1794 		}
1795 		did_output = 0;
1796 		break;
1797 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timopathmtu);
1802 		sctp_pathmtu_timer(inp, stcb, net);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdownack);
1814 		stcb->asoc.timoshutdownack++;
1815 #ifdef SCTP_AUDITING_ENABLED
1816 		sctp_auditing(4, inp, stcb, net);
1817 #endif
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1821 		if ((stcb == NULL) || (inp == NULL)) {
1822 			break;
1823 		}
1824 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1825 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1826 		    "Shutdown guard timer expired");
1827 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828 		/* no need to unlock on tcb its gone */
1829 		goto out_decr;
1830 
1831 	case SCTP_TIMER_TYPE_STRRESET:
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		if (sctp_strreset_timer(inp, stcb, net)) {
1836 			/* no need to unlock on tcb its gone */
1837 			goto out_decr;
1838 		}
1839 		SCTP_STAT_INCR(sctps_timostrmrst);
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASCONF:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_asconf_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		sctp_delete_prim_timer(inp, stcb, net);
1861 		SCTP_STAT_INCR(sctps_timodelprim);
1862 		break;
1863 
1864 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 		if ((stcb == NULL) || (inp == NULL)) {
1866 			break;
1867 		}
1868 		SCTP_STAT_INCR(sctps_timoautoclose);
1869 		sctp_autoclose_timer(inp, stcb, net);
1870 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 		did_output = 0;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ASOCKILL:
1874 		if ((stcb == NULL) || (inp == NULL)) {
1875 			break;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timoassockill);
1878 		/* Can we free it yet? */
1879 		SCTP_INP_DECR_REF(inp);
1880 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1881 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1883 		so = SCTP_INP_SO(inp);
1884 		atomic_add_int(&stcb->asoc.refcnt, 1);
1885 		SCTP_TCB_UNLOCK(stcb);
1886 		SCTP_SOCKET_LOCK(so, 1);
1887 		SCTP_TCB_LOCK(stcb);
1888 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1889 #endif
1890 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1893 		SCTP_SOCKET_UNLOCK(so, 1);
1894 #endif
1895 		/*
1896 		 * free asoc, always unlocks (or destroy's) so prevent
1897 		 * duplicate unlock or unlock of a free mtx :-0
1898 		 */
1899 		stcb = NULL;
1900 		goto out_no_decr;
1901 	case SCTP_TIMER_TYPE_INPKILL:
1902 		SCTP_STAT_INCR(sctps_timoinpkill);
1903 		if (inp == NULL) {
1904 			break;
1905 		}
1906 		/*
1907 		 * special case, take away our increment since WE are the
1908 		 * killer
1909 		 */
1910 		SCTP_INP_DECR_REF(inp);
1911 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1913 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1914 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1915 		inp = NULL;
1916 		goto out_no_decr;
1917 	default:
1918 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1919 		    type);
1920 		break;
1921 	}
1922 #ifdef SCTP_AUDITING_ENABLED
1923 	sctp_audit_log(0xF1, (uint8_t)type);
1924 	if (inp)
1925 		sctp_auditing(5, inp, stcb, net);
1926 #endif
1927 	if ((did_output) && stcb) {
1928 		/*
1929 		 * Now we need to clean up the control chunk chain if an
1930 		 * ECNE is on it. It must be marked as UNSENT again so next
1931 		 * call will continue to send it until such time that we get
1932 		 * a CWR, to remove it. It is, however, less likely that we
1933 		 * will find a ecn echo on the chain though.
1934 		 */
1935 		sctp_fix_ecn_echo(&stcb->asoc);
1936 	}
1937 get_out:
1938 	if (stcb) {
1939 		SCTP_TCB_UNLOCK(stcb);
1940 	}
1941 out_decr:
1942 	if (inp) {
1943 		SCTP_INP_DECR_REF(inp);
1944 	}
1945 out_no_decr:
1946 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1947 	CURVNET_RESTORE();
1948 }
1949 
1950 void
1951 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1952     struct sctp_nets *net)
1953 {
1954 	uint32_t to_ticks;
1955 	struct sctp_timer *tmr;
1956 
1957 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1958 		return;
1959 
1960 	tmr = NULL;
1961 	if (stcb) {
1962 		SCTP_TCB_LOCK_ASSERT(stcb);
1963 	}
1964 	switch (t_type) {
1965 	case SCTP_TIMER_TYPE_ZERO_COPY:
1966 		tmr = &inp->sctp_ep.zero_copy_timer;
1967 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ADDR_WQ:
1974 		/* Only 1 tick away :-) */
1975 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 		break;
1978 	case SCTP_TIMER_TYPE_SEND:
1979 		/* Here we use the RTO timer */
1980 		{
1981 			int rto_val;
1982 
1983 			if ((stcb == NULL) || (net == NULL)) {
1984 				return;
1985 			}
1986 			tmr = &net->rxt_timer;
1987 			if (net->RTO == 0) {
1988 				rto_val = stcb->asoc.initial_rto;
1989 			} else {
1990 				rto_val = net->RTO;
1991 			}
1992 			to_ticks = MSEC_TO_TICKS(rto_val);
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_INIT:
1996 		/*
1997 		 * Here we use the INIT timer default usually about 1
1998 		 * minute.
1999 		 */
2000 		if ((stcb == NULL) || (net == NULL)) {
2001 			return;
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		break;
2010 	case SCTP_TIMER_TYPE_RECV:
2011 		/*
2012 		 * Here we use the Delayed-Ack timer value from the inp
2013 		 * ususually about 200ms.
2014 		 */
2015 		if (stcb == NULL) {
2016 			return;
2017 		}
2018 		tmr = &stcb->asoc.dack_timer;
2019 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 		break;
2021 	case SCTP_TIMER_TYPE_SHUTDOWN:
2022 		/* Here we use the RTO of the destination. */
2023 		if ((stcb == NULL) || (net == NULL)) {
2024 			return;
2025 		}
2026 		if (net->RTO == 0) {
2027 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 		} else {
2029 			to_ticks = MSEC_TO_TICKS(net->RTO);
2030 		}
2031 		tmr = &net->rxt_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_HEARTBEAT:
2034 		/*
2035 		 * the net is used here so that we can add in the RTO. Even
2036 		 * though we use a different timer. We also add the HB timer
2037 		 * PLUS a random jitter.
2038 		 */
2039 		if ((stcb == NULL) || (net == NULL)) {
2040 			return;
2041 		} else {
2042 			uint32_t rndval;
2043 			uint32_t jitter;
2044 
2045 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2046 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2047 				return;
2048 			}
2049 			if (net->RTO == 0) {
2050 				to_ticks = stcb->asoc.initial_rto;
2051 			} else {
2052 				to_ticks = net->RTO;
2053 			}
2054 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 			jitter = rndval % to_ticks;
2056 			if (jitter >= (to_ticks >> 1)) {
2057 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2058 			} else {
2059 				to_ticks = to_ticks - jitter;
2060 			}
2061 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2062 			    !(net->dest_state & SCTP_ADDR_PF)) {
2063 				to_ticks += net->heart_beat_delay;
2064 			}
2065 			/*
2066 			 * Now we must convert the to_ticks that are now in
2067 			 * ms to ticks.
2068 			 */
2069 			to_ticks = MSEC_TO_TICKS(to_ticks);
2070 			tmr = &net->hb_timer;
2071 		}
2072 		break;
2073 	case SCTP_TIMER_TYPE_COOKIE:
2074 		/*
2075 		 * Here we can use the RTO timer from the network since one
2076 		 * RTT was compelete. If a retran happened then we will be
2077 		 * using the RTO initial value.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &net->rxt_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2090 		/*
2091 		 * nothing needed but the endpoint here ususually about 60
2092 		 * minutes.
2093 		 */
2094 		tmr = &inp->sctp_ep.signature_change;
2095 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2096 		break;
2097 	case SCTP_TIMER_TYPE_ASOCKILL:
2098 		if (stcb == NULL) {
2099 			return;
2100 		}
2101 		tmr = &stcb->asoc.strreset_timer;
2102 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2103 		break;
2104 	case SCTP_TIMER_TYPE_INPKILL:
2105 		/*
2106 		 * The inp is setup to die. We re-use the signature_chage
2107 		 * timer since that has stopped and we are in the GONE
2108 		 * state.
2109 		 */
2110 		tmr = &inp->sctp_ep.signature_change;
2111 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2112 		break;
2113 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2114 		/*
2115 		 * Here we use the value found in the EP for PMTU ususually
2116 		 * about 10 minutes.
2117 		 */
2118 		if ((stcb == NULL) || (net == NULL)) {
2119 			return;
2120 		}
2121 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2122 			return;
2123 		}
2124 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2125 		tmr = &net->pmtu_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2128 		/* Here we use the RTO of the destination */
2129 		if ((stcb == NULL) || (net == NULL)) {
2130 			return;
2131 		}
2132 		if (net->RTO == 0) {
2133 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2134 		} else {
2135 			to_ticks = MSEC_TO_TICKS(net->RTO);
2136 		}
2137 		tmr = &net->rxt_timer;
2138 		break;
2139 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2140 		/*
2141 		 * Here we use the endpoints shutdown guard timer usually
2142 		 * about 3 minutes.
2143 		 */
2144 		if (stcb == NULL) {
2145 			return;
2146 		}
2147 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2148 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2149 		} else {
2150 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2151 		}
2152 		tmr = &stcb->asoc.shut_guard_timer;
2153 		break;
2154 	case SCTP_TIMER_TYPE_STRRESET:
2155 		/*
2156 		 * Here the timer comes from the stcb but its value is from
2157 		 * the net's RTO.
2158 		 */
2159 		if ((stcb == NULL) || (net == NULL)) {
2160 			return;
2161 		}
2162 		if (net->RTO == 0) {
2163 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2164 		} else {
2165 			to_ticks = MSEC_TO_TICKS(net->RTO);
2166 		}
2167 		tmr = &stcb->asoc.strreset_timer;
2168 		break;
2169 	case SCTP_TIMER_TYPE_ASCONF:
2170 		/*
2171 		 * Here the timer comes from the stcb but its value is from
2172 		 * the net's RTO.
2173 		 */
2174 		if ((stcb == NULL) || (net == NULL)) {
2175 			return;
2176 		}
2177 		if (net->RTO == 0) {
2178 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2179 		} else {
2180 			to_ticks = MSEC_TO_TICKS(net->RTO);
2181 		}
2182 		tmr = &stcb->asoc.asconf_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2185 		if ((stcb == NULL) || (net != NULL)) {
2186 			return;
2187 		}
2188 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189 		tmr = &stcb->asoc.delete_prim_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2192 		if (stcb == NULL) {
2193 			return;
2194 		}
2195 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2196 			/*
2197 			 * Really an error since stcb is NOT set to
2198 			 * autoclose
2199 			 */
2200 			return;
2201 		}
2202 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2203 		tmr = &stcb->asoc.autoclose_timer;
2204 		break;
2205 	default:
2206 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2207 		    __func__, t_type);
2208 		return;
2209 		break;
2210 	}
2211 	if ((to_ticks <= 0) || (tmr == NULL)) {
2212 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2213 		    __func__, t_type, to_ticks, (void *)tmr);
2214 		return;
2215 	}
2216 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2217 		/*
2218 		 * we do NOT allow you to have it already running. if it is
2219 		 * we leave the current one up unchanged
2220 		 */
2221 		return;
2222 	}
2223 	/* At this point we can proceed */
2224 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2225 		stcb->asoc.num_send_timers_up++;
2226 	}
2227 	tmr->stopped_from = 0;
2228 	tmr->type = t_type;
2229 	tmr->ep = (void *)inp;
2230 	tmr->tcb = (void *)stcb;
2231 	tmr->net = (void *)net;
2232 	tmr->self = (void *)tmr;
2233 	tmr->vnet = (void *)curvnet;
2234 	tmr->ticks = sctp_get_tick_count();
2235 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236 	return;
2237 }
2238 
2239 void
2240 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241     struct sctp_nets *net, uint32_t from)
2242 {
2243 	struct sctp_timer *tmr;
2244 
2245 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246 	    (inp == NULL))
2247 		return;
2248 
2249 	tmr = NULL;
2250 	if (stcb) {
2251 		SCTP_TCB_LOCK_ASSERT(stcb);
2252 	}
2253 	switch (t_type) {
2254 	case SCTP_TIMER_TYPE_ZERO_COPY:
2255 		tmr = &inp->sctp_ep.zero_copy_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ADDR_WQ:
2261 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262 		break;
2263 	case SCTP_TIMER_TYPE_SEND:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_INIT:
2270 		if ((stcb == NULL) || (net == NULL)) {
2271 			return;
2272 		}
2273 		tmr = &net->rxt_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_RECV:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.dack_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_SHUTDOWN:
2282 		if ((stcb == NULL) || (net == NULL)) {
2283 			return;
2284 		}
2285 		tmr = &net->rxt_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_HEARTBEAT:
2288 		if ((stcb == NULL) || (net == NULL)) {
2289 			return;
2290 		}
2291 		tmr = &net->hb_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_COOKIE:
2294 		if ((stcb == NULL) || (net == NULL)) {
2295 			return;
2296 		}
2297 		tmr = &net->rxt_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2300 		/* nothing needed but the endpoint here */
2301 		tmr = &inp->sctp_ep.signature_change;
2302 		/*
2303 		 * We re-use the newcookie timer for the INP kill timer. We
2304 		 * must assure that we do not kill it by accident.
2305 		 */
2306 		break;
2307 	case SCTP_TIMER_TYPE_ASOCKILL:
2308 		/*
2309 		 * Stop the asoc kill timer.
2310 		 */
2311 		if (stcb == NULL) {
2312 			return;
2313 		}
2314 		tmr = &stcb->asoc.strreset_timer;
2315 		break;
2316 
2317 	case SCTP_TIMER_TYPE_INPKILL:
2318 		/*
2319 		 * The inp is setup to die. We re-use the signature_chage
2320 		 * timer since that has stopped and we are in the GONE
2321 		 * state.
2322 		 */
2323 		tmr = &inp->sctp_ep.signature_change;
2324 		break;
2325 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2326 		if ((stcb == NULL) || (net == NULL)) {
2327 			return;
2328 		}
2329 		tmr = &net->pmtu_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2332 		if ((stcb == NULL) || (net == NULL)) {
2333 			return;
2334 		}
2335 		tmr = &net->rxt_timer;
2336 		break;
2337 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.shut_guard_timer;
2342 		break;
2343 	case SCTP_TIMER_TYPE_STRRESET:
2344 		if (stcb == NULL) {
2345 			return;
2346 		}
2347 		tmr = &stcb->asoc.strreset_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_ASCONF:
2350 		if (stcb == NULL) {
2351 			return;
2352 		}
2353 		tmr = &stcb->asoc.asconf_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2356 		if (stcb == NULL) {
2357 			return;
2358 		}
2359 		tmr = &stcb->asoc.delete_prim_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.autoclose_timer;
2366 		break;
2367 	default:
2368 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2369 		    __func__, t_type);
2370 		break;
2371 	}
2372 	if (tmr == NULL) {
2373 		return;
2374 	}
2375 	if ((tmr->type != t_type) && tmr->type) {
2376 		/*
2377 		 * Ok we have a timer that is under joint use. Cookie timer
2378 		 * per chance with the SEND timer. We therefore are NOT
2379 		 * running the timer that the caller wants stopped.  So just
2380 		 * return.
2381 		 */
2382 		return;
2383 	}
2384 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2385 		stcb->asoc.num_send_timers_up--;
2386 		if (stcb->asoc.num_send_timers_up < 0) {
2387 			stcb->asoc.num_send_timers_up = 0;
2388 		}
2389 	}
2390 	tmr->self = NULL;
2391 	tmr->stopped_from = from;
2392 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2393 	return;
2394 }
2395 
2396 uint32_t
2397 sctp_calculate_len(struct mbuf *m)
2398 {
2399 	uint32_t tlen = 0;
2400 	struct mbuf *at;
2401 
2402 	at = m;
2403 	while (at) {
2404 		tlen += SCTP_BUF_LEN(at);
2405 		at = SCTP_BUF_NEXT(at);
2406 	}
2407 	return (tlen);
2408 }
2409 
2410 void
2411 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2412     struct sctp_association *asoc, uint32_t mtu)
2413 {
2414 	/*
2415 	 * Reset the P-MTU size on this association, this involves changing
2416 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2417 	 * allow the DF flag to be cleared.
2418 	 */
2419 	struct sctp_tmit_chunk *chk;
2420 	unsigned int eff_mtu, ovh;
2421 
2422 	asoc->smallest_mtu = mtu;
2423 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2424 		ovh = SCTP_MIN_OVERHEAD;
2425 	} else {
2426 		ovh = SCTP_MIN_V4_OVERHEAD;
2427 	}
2428 	eff_mtu = mtu - ovh;
2429 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2430 		if (chk->send_size > eff_mtu) {
2431 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432 		}
2433 	}
2434 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2435 		if (chk->send_size > eff_mtu) {
2436 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2437 		}
2438 	}
2439 }
2440 
2441 
2442 /*
2443  * given an association and starting time of the current RTT period return
2444  * RTO in number of msecs net should point to the current network
2445  */
2446 
2447 uint32_t
2448 sctp_calculate_rto(struct sctp_tcb *stcb,
2449     struct sctp_association *asoc,
2450     struct sctp_nets *net,
2451     struct timeval *told,
2452     int safe, int rtt_from_sack)
2453 {
2454 	/*-
2455 	 * given an association and the starting time of the current RTT
2456 	 * period (in value1/value2) return RTO in number of msecs.
2457 	 */
2458 	int32_t rtt;		/* RTT in ms */
2459 	uint32_t new_rto;
2460 	int first_measure = 0;
2461 	struct timeval now, then, *old;
2462 
2463 	/* Copy it out for sparc64 */
2464 	if (safe == sctp_align_unsafe_makecopy) {
2465 		old = &then;
2466 		memcpy(&then, told, sizeof(struct timeval));
2467 	} else if (safe == sctp_align_safe_nocopy) {
2468 		old = told;
2469 	} else {
2470 		/* error */
2471 		SCTP_PRINTF("Huh, bad rto calc call\n");
2472 		return (0);
2473 	}
2474 	/************************/
2475 	/* 1. calculate new RTT */
2476 	/************************/
2477 	/* get the current time */
2478 	if (stcb->asoc.use_precise_time) {
2479 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2480 	} else {
2481 		(void)SCTP_GETTIME_TIMEVAL(&now);
2482 	}
2483 	timevalsub(&now, old);
2484 	/* store the current RTT in us */
2485 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2486 	        (uint64_t)now.tv_usec;
2487 
2488 	/* compute rtt in ms */
2489 	rtt = (int32_t)(net->rtt / 1000);
2490 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2491 		/*
2492 		 * Tell the CC module that a new update has just occurred
2493 		 * from a sack
2494 		 */
2495 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2496 	}
2497 	/*
2498 	 * Do we need to determine the lan? We do this only on sacks i.e.
2499 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2500 	 */
2501 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2502 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2503 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2504 			net->lan_type = SCTP_LAN_INTERNET;
2505 		} else {
2506 			net->lan_type = SCTP_LAN_LOCAL;
2507 		}
2508 	}
2509 	/***************************/
2510 	/* 2. update RTTVAR & SRTT */
2511 	/***************************/
2512 	/*-
2513 	 * Compute the scaled average lastsa and the
2514 	 * scaled variance lastsv as described in van Jacobson
2515 	 * Paper "Congestion Avoidance and Control", Annex A.
2516 	 *
2517 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2518 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2519 	 */
2520 	if (net->RTO_measured) {
2521 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2522 		net->lastsa += rtt;
2523 		if (rtt < 0) {
2524 			rtt = -rtt;
2525 		}
2526 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2527 		net->lastsv += rtt;
2528 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2529 			rto_logging(net, SCTP_LOG_RTTVAR);
2530 		}
2531 	} else {
2532 		/* First RTO measurment */
2533 		net->RTO_measured = 1;
2534 		first_measure = 1;
2535 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2536 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2538 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2539 		}
2540 	}
2541 	if (net->lastsv == 0) {
2542 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2543 	}
2544 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2545 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2546 	    (stcb->asoc.sat_network_lockout == 0)) {
2547 		stcb->asoc.sat_network = 1;
2548 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2549 		stcb->asoc.sat_network = 0;
2550 		stcb->asoc.sat_network_lockout = 1;
2551 	}
2552 	/* bound it, per C6/C7 in Section 5.3.1 */
2553 	if (new_rto < stcb->asoc.minrto) {
2554 		new_rto = stcb->asoc.minrto;
2555 	}
2556 	if (new_rto > stcb->asoc.maxrto) {
2557 		new_rto = stcb->asoc.maxrto;
2558 	}
2559 	/* we are now returning the RTO */
2560 	return (new_rto);
2561 }
2562 
2563 /*
2564  * return a pointer to a contiguous piece of data from the given mbuf chain
2565  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2566  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2567  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2568  */
2569 caddr_t
2570 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2571 {
2572 	uint32_t count;
2573 	uint8_t *ptr;
2574 
2575 	ptr = in_ptr;
2576 	if ((off < 0) || (len <= 0))
2577 		return (NULL);
2578 
2579 	/* find the desired start location */
2580 	while ((m != NULL) && (off > 0)) {
2581 		if (off < SCTP_BUF_LEN(m))
2582 			break;
2583 		off -= SCTP_BUF_LEN(m);
2584 		m = SCTP_BUF_NEXT(m);
2585 	}
2586 	if (m == NULL)
2587 		return (NULL);
2588 
2589 	/* is the current mbuf large enough (eg. contiguous)? */
2590 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2591 		return (mtod(m, caddr_t)+off);
2592 	} else {
2593 		/* else, it spans more than one mbuf, so save a temp copy... */
2594 		while ((m != NULL) && (len > 0)) {
2595 			count = min(SCTP_BUF_LEN(m) - off, len);
2596 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2597 			len -= count;
2598 			ptr += count;
2599 			off = 0;
2600 			m = SCTP_BUF_NEXT(m);
2601 		}
2602 		if ((m == NULL) && (len > 0))
2603 			return (NULL);
2604 		else
2605 			return ((caddr_t)in_ptr);
2606 	}
2607 }
2608 
2609 
2610 
2611 struct sctp_paramhdr *
2612 sctp_get_next_param(struct mbuf *m,
2613     int offset,
2614     struct sctp_paramhdr *pull,
2615     int pull_limit)
2616 {
2617 	/* This just provides a typed signature to Peter's Pull routine */
2618 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2619 	    (uint8_t *)pull));
2620 }
2621 
2622 
2623 struct mbuf *
2624 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2625 {
2626 	struct mbuf *m_last;
2627 	caddr_t dp;
2628 
2629 	if (padlen > 3) {
2630 		return (NULL);
2631 	}
2632 	if (padlen <= M_TRAILINGSPACE(m)) {
2633 		/*
2634 		 * The easy way. We hope the majority of the time we hit
2635 		 * here :)
2636 		 */
2637 		m_last = m;
2638 	} else {
2639 		/* Hard way we must grow the mbuf chain */
2640 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2641 		if (m_last == NULL) {
2642 			return (NULL);
2643 		}
2644 		SCTP_BUF_LEN(m_last) = 0;
2645 		SCTP_BUF_NEXT(m_last) = NULL;
2646 		SCTP_BUF_NEXT(m) = m_last;
2647 	}
2648 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2649 	SCTP_BUF_LEN(m_last) += padlen;
2650 	memset(dp, 0, padlen);
2651 	return (m_last);
2652 }
2653 
2654 struct mbuf *
2655 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2656 {
2657 	/* find the last mbuf in chain and pad it */
2658 	struct mbuf *m_at;
2659 
2660 	if (last_mbuf != NULL) {
2661 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2662 	} else {
2663 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2664 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2665 				return (sctp_add_pad_tombuf(m_at, padval));
2666 			}
2667 		}
2668 	}
2669 	return (NULL);
2670 }
2671 
2672 static void
2673 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2674     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2675 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2676     SCTP_UNUSED
2677 #endif
2678 )
2679 {
2680 	struct mbuf *m_notify;
2681 	struct sctp_assoc_change *sac;
2682 	struct sctp_queued_to_read *control;
2683 	unsigned int notif_len;
2684 	uint16_t abort_len;
2685 	unsigned int i;
2686 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2687 	struct socket *so;
2688 #endif
2689 
2690 	if (stcb == NULL) {
2691 		return;
2692 	}
2693 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2694 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2695 		if (abort != NULL) {
2696 			abort_len = ntohs(abort->ch.chunk_length);
2697 		} else {
2698 			abort_len = 0;
2699 		}
2700 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2701 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2702 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2703 			notif_len += abort_len;
2704 		}
2705 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2706 		if (m_notify == NULL) {
2707 			/* Retry with smaller value. */
2708 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2709 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2710 			if (m_notify == NULL) {
2711 				goto set_error;
2712 			}
2713 		}
2714 		SCTP_BUF_NEXT(m_notify) = NULL;
2715 		sac = mtod(m_notify, struct sctp_assoc_change *);
2716 		memset(sac, 0, notif_len);
2717 		sac->sac_type = SCTP_ASSOC_CHANGE;
2718 		sac->sac_flags = 0;
2719 		sac->sac_length = sizeof(struct sctp_assoc_change);
2720 		sac->sac_state = state;
2721 		sac->sac_error = error;
2722 		/* XXX verify these stream counts */
2723 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2724 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2725 		sac->sac_assoc_id = sctp_get_associd(stcb);
2726 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2727 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2728 				i = 0;
2729 				if (stcb->asoc.prsctp_supported == 1) {
2730 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2731 				}
2732 				if (stcb->asoc.auth_supported == 1) {
2733 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2734 				}
2735 				if (stcb->asoc.asconf_supported == 1) {
2736 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2737 				}
2738 				if (stcb->asoc.idata_supported == 1) {
2739 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2740 				}
2741 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2742 				if (stcb->asoc.reconfig_supported == 1) {
2743 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2744 				}
2745 				sac->sac_length += i;
2746 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2747 				memcpy(sac->sac_info, abort, abort_len);
2748 				sac->sac_length += abort_len;
2749 			}
2750 		}
2751 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2752 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2753 		    0, 0, stcb->asoc.context, 0, 0, 0,
2754 		    m_notify);
2755 		if (control != NULL) {
2756 			control->length = SCTP_BUF_LEN(m_notify);
2757 			control->spec_flags = M_NOTIFICATION;
2758 			/* not that we need this */
2759 			control->tail_mbuf = m_notify;
2760 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2761 			    control,
2762 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2763 			    so_locked);
2764 		} else {
2765 			sctp_m_freem(m_notify);
2766 		}
2767 	}
2768 	/*
2769 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2770 	 * comes in.
2771 	 */
2772 set_error:
2773 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2774 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2775 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2776 		SOCK_LOCK(stcb->sctp_socket);
2777 		if (from_peer) {
2778 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2779 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2780 				stcb->sctp_socket->so_error = ECONNREFUSED;
2781 			} else {
2782 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2783 				stcb->sctp_socket->so_error = ECONNRESET;
2784 			}
2785 		} else {
2786 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2787 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2788 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2789 				stcb->sctp_socket->so_error = ETIMEDOUT;
2790 			} else {
2791 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2792 				stcb->sctp_socket->so_error = ECONNABORTED;
2793 			}
2794 		}
2795 		SOCK_UNLOCK(stcb->sctp_socket);
2796 	}
2797 	/* Wake ANY sleepers */
2798 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2799 	so = SCTP_INP_SO(stcb->sctp_ep);
2800 	if (!so_locked) {
2801 		atomic_add_int(&stcb->asoc.refcnt, 1);
2802 		SCTP_TCB_UNLOCK(stcb);
2803 		SCTP_SOCKET_LOCK(so, 1);
2804 		SCTP_TCB_LOCK(stcb);
2805 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2806 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2807 			SCTP_SOCKET_UNLOCK(so, 1);
2808 			return;
2809 		}
2810 	}
2811 #endif
2812 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2813 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2814 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2815 		socantrcvmore(stcb->sctp_socket);
2816 	}
2817 	sorwakeup(stcb->sctp_socket);
2818 	sowwakeup(stcb->sctp_socket);
2819 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2820 	if (!so_locked) {
2821 		SCTP_SOCKET_UNLOCK(so, 1);
2822 	}
2823 #endif
2824 }
2825 
2826 static void
2827 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2828     struct sockaddr *sa, uint32_t error, int so_locked
2829 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2830     SCTP_UNUSED
2831 #endif
2832 )
2833 {
2834 	struct mbuf *m_notify;
2835 	struct sctp_paddr_change *spc;
2836 	struct sctp_queued_to_read *control;
2837 
2838 	if ((stcb == NULL) ||
2839 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2840 		/* event not enabled */
2841 		return;
2842 	}
2843 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2844 	if (m_notify == NULL)
2845 		return;
2846 	SCTP_BUF_LEN(m_notify) = 0;
2847 	spc = mtod(m_notify, struct sctp_paddr_change *);
2848 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2849 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2850 	spc->spc_flags = 0;
2851 	spc->spc_length = sizeof(struct sctp_paddr_change);
2852 	switch (sa->sa_family) {
2853 #ifdef INET
2854 	case AF_INET:
2855 #ifdef INET6
2856 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2857 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2858 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2859 		} else {
2860 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2861 		}
2862 #else
2863 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2864 #endif
2865 		break;
2866 #endif
2867 #ifdef INET6
2868 	case AF_INET6:
2869 		{
2870 			struct sockaddr_in6 *sin6;
2871 
2872 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2873 
2874 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2875 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2876 				if (sin6->sin6_scope_id == 0) {
2877 					/* recover scope_id for user */
2878 					(void)sa6_recoverscope(sin6);
2879 				} else {
2880 					/* clear embedded scope_id for user */
2881 					in6_clearscope(&sin6->sin6_addr);
2882 				}
2883 			}
2884 			break;
2885 		}
2886 #endif
2887 	default:
2888 		/* TSNH */
2889 		break;
2890 	}
2891 	spc->spc_state = state;
2892 	spc->spc_error = error;
2893 	spc->spc_assoc_id = sctp_get_associd(stcb);
2894 
2895 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2896 	SCTP_BUF_NEXT(m_notify) = NULL;
2897 
2898 	/* append to socket */
2899 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2900 	    0, 0, stcb->asoc.context, 0, 0, 0,
2901 	    m_notify);
2902 	if (control == NULL) {
2903 		/* no memory */
2904 		sctp_m_freem(m_notify);
2905 		return;
2906 	}
2907 	control->length = SCTP_BUF_LEN(m_notify);
2908 	control->spec_flags = M_NOTIFICATION;
2909 	/* not that we need this */
2910 	control->tail_mbuf = m_notify;
2911 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2912 	    control,
2913 	    &stcb->sctp_socket->so_rcv, 1,
2914 	    SCTP_READ_LOCK_NOT_HELD,
2915 	    so_locked);
2916 }
2917 
2918 
2919 static void
2920 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2921     struct sctp_tmit_chunk *chk, int so_locked
2922 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2923     SCTP_UNUSED
2924 #endif
2925 )
2926 {
2927 	struct mbuf *m_notify;
2928 	struct sctp_send_failed *ssf;
2929 	struct sctp_send_failed_event *ssfe;
2930 	struct sctp_queued_to_read *control;
2931 	struct sctp_chunkhdr *chkhdr;
2932 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2933 
2934 	if ((stcb == NULL) ||
2935 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2936 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2937 		/* event not enabled */
2938 		return;
2939 	}
2940 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2941 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2942 	} else {
2943 		notifhdr_len = sizeof(struct sctp_send_failed);
2944 	}
2945 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2946 	if (m_notify == NULL)
2947 		/* no space left */
2948 		return;
2949 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2950 	if (stcb->asoc.idata_supported) {
2951 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2952 	} else {
2953 		chkhdr_len = sizeof(struct sctp_data_chunk);
2954 	}
2955 	/* Use some defaults in case we can't access the chunk header */
2956 	if (chk->send_size >= chkhdr_len) {
2957 		payload_len = chk->send_size - chkhdr_len;
2958 	} else {
2959 		payload_len = 0;
2960 	}
2961 	padding_len = 0;
2962 	if (chk->data != NULL) {
2963 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2964 		if (chkhdr != NULL) {
2965 			chk_len = ntohs(chkhdr->chunk_length);
2966 			if ((chk_len >= chkhdr_len) &&
2967 			    (chk->send_size >= chk_len) &&
2968 			    (chk->send_size - chk_len < 4)) {
2969 				padding_len = chk->send_size - chk_len;
2970 				payload_len = chk->send_size - chkhdr_len - padding_len;
2971 			}
2972 		}
2973 	}
2974 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2975 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2976 		memset(ssfe, 0, notifhdr_len);
2977 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2978 		if (sent) {
2979 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2980 		} else {
2981 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2982 		}
2983 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2984 		ssfe->ssfe_error = error;
2985 		/* not exactly what the user sent in, but should be close :) */
2986 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2987 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2988 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2989 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2990 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2991 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2992 	} else {
2993 		ssf = mtod(m_notify, struct sctp_send_failed *);
2994 		memset(ssf, 0, notifhdr_len);
2995 		ssf->ssf_type = SCTP_SEND_FAILED;
2996 		if (sent) {
2997 			ssf->ssf_flags = SCTP_DATA_SENT;
2998 		} else {
2999 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3000 		}
3001 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3002 		ssf->ssf_error = error;
3003 		/* not exactly what the user sent in, but should be close :) */
3004 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3005 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3006 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3007 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3008 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3009 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3010 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3011 	}
3012 	if (chk->data != NULL) {
3013 		/* Trim off the sctp chunk header (it should be there) */
3014 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3015 			m_adj(chk->data, chkhdr_len);
3016 			m_adj(chk->data, -padding_len);
3017 			sctp_mbuf_crush(chk->data);
3018 			chk->send_size -= (chkhdr_len + padding_len);
3019 		}
3020 	}
3021 	SCTP_BUF_NEXT(m_notify) = chk->data;
3022 	/* Steal off the mbuf */
3023 	chk->data = NULL;
3024 	/*
3025 	 * For this case, we check the actual socket buffer, since the assoc
3026 	 * is going away we don't want to overfill the socket buffer for a
3027 	 * non-reader
3028 	 */
3029 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3030 		sctp_m_freem(m_notify);
3031 		return;
3032 	}
3033 	/* append to socket */
3034 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3035 	    0, 0, stcb->asoc.context, 0, 0, 0,
3036 	    m_notify);
3037 	if (control == NULL) {
3038 		/* no memory */
3039 		sctp_m_freem(m_notify);
3040 		return;
3041 	}
3042 	control->length = SCTP_BUF_LEN(m_notify);
3043 	control->spec_flags = M_NOTIFICATION;
3044 	/* not that we need this */
3045 	control->tail_mbuf = m_notify;
3046 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3047 	    control,
3048 	    &stcb->sctp_socket->so_rcv, 1,
3049 	    SCTP_READ_LOCK_NOT_HELD,
3050 	    so_locked);
3051 }
3052 
3053 
3054 static void
3055 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3056     struct sctp_stream_queue_pending *sp, int so_locked
3057 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3058     SCTP_UNUSED
3059 #endif
3060 )
3061 {
3062 	struct mbuf *m_notify;
3063 	struct sctp_send_failed *ssf;
3064 	struct sctp_send_failed_event *ssfe;
3065 	struct sctp_queued_to_read *control;
3066 	int notifhdr_len;
3067 
3068 	if ((stcb == NULL) ||
3069 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3070 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3071 		/* event not enabled */
3072 		return;
3073 	}
3074 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3075 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3076 	} else {
3077 		notifhdr_len = sizeof(struct sctp_send_failed);
3078 	}
3079 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3080 	if (m_notify == NULL) {
3081 		/* no space left */
3082 		return;
3083 	}
3084 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3085 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3086 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3087 		memset(ssfe, 0, notifhdr_len);
3088 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3089 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3090 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3091 		ssfe->ssfe_error = error;
3092 		/* not exactly what the user sent in, but should be close :) */
3093 		ssfe->ssfe_info.snd_sid = sp->sid;
3094 		if (sp->some_taken) {
3095 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3096 		} else {
3097 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3098 		}
3099 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3100 		ssfe->ssfe_info.snd_context = sp->context;
3101 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3102 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3103 	} else {
3104 		ssf = mtod(m_notify, struct sctp_send_failed *);
3105 		memset(ssf, 0, notifhdr_len);
3106 		ssf->ssf_type = SCTP_SEND_FAILED;
3107 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3108 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3109 		ssf->ssf_error = error;
3110 		/* not exactly what the user sent in, but should be close :) */
3111 		ssf->ssf_info.sinfo_stream = sp->sid;
3112 		ssf->ssf_info.sinfo_ssn = 0;
3113 		if (sp->some_taken) {
3114 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3115 		} else {
3116 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3117 		}
3118 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3119 		ssf->ssf_info.sinfo_context = sp->context;
3120 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3121 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3122 	}
3123 	SCTP_BUF_NEXT(m_notify) = sp->data;
3124 
3125 	/* Steal off the mbuf */
3126 	sp->data = NULL;
3127 	/*
3128 	 * For this case, we check the actual socket buffer, since the assoc
3129 	 * is going away we don't want to overfill the socket buffer for a
3130 	 * non-reader
3131 	 */
3132 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3133 		sctp_m_freem(m_notify);
3134 		return;
3135 	}
3136 	/* append to socket */
3137 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3138 	    0, 0, stcb->asoc.context, 0, 0, 0,
3139 	    m_notify);
3140 	if (control == NULL) {
3141 		/* no memory */
3142 		sctp_m_freem(m_notify);
3143 		return;
3144 	}
3145 	control->length = SCTP_BUF_LEN(m_notify);
3146 	control->spec_flags = M_NOTIFICATION;
3147 	/* not that we need this */
3148 	control->tail_mbuf = m_notify;
3149 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3150 	    control,
3151 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3152 }
3153 
3154 
3155 
3156 static void
3157 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3158 {
3159 	struct mbuf *m_notify;
3160 	struct sctp_adaptation_event *sai;
3161 	struct sctp_queued_to_read *control;
3162 
3163 	if ((stcb == NULL) ||
3164 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3165 		/* event not enabled */
3166 		return;
3167 	}
3168 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3169 	if (m_notify == NULL)
3170 		/* no space left */
3171 		return;
3172 	SCTP_BUF_LEN(m_notify) = 0;
3173 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3174 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3175 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3176 	sai->sai_flags = 0;
3177 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3178 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3179 	sai->sai_assoc_id = sctp_get_associd(stcb);
3180 
3181 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3182 	SCTP_BUF_NEXT(m_notify) = NULL;
3183 
3184 	/* append to socket */
3185 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3186 	    0, 0, stcb->asoc.context, 0, 0, 0,
3187 	    m_notify);
3188 	if (control == NULL) {
3189 		/* no memory */
3190 		sctp_m_freem(m_notify);
3191 		return;
3192 	}
3193 	control->length = SCTP_BUF_LEN(m_notify);
3194 	control->spec_flags = M_NOTIFICATION;
3195 	/* not that we need this */
3196 	control->tail_mbuf = m_notify;
3197 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3198 	    control,
3199 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3200 }
3201 
3202 /* This always must be called with the read-queue LOCKED in the INP */
3203 static void
3204 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3205     uint32_t val, int so_locked
3206 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3207     SCTP_UNUSED
3208 #endif
3209 )
3210 {
3211 	struct mbuf *m_notify;
3212 	struct sctp_pdapi_event *pdapi;
3213 	struct sctp_queued_to_read *control;
3214 	struct sockbuf *sb;
3215 
3216 	if ((stcb == NULL) ||
3217 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3218 		/* event not enabled */
3219 		return;
3220 	}
3221 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3222 		return;
3223 	}
3224 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3225 	if (m_notify == NULL)
3226 		/* no space left */
3227 		return;
3228 	SCTP_BUF_LEN(m_notify) = 0;
3229 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3230 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3231 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3232 	pdapi->pdapi_flags = 0;
3233 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3234 	pdapi->pdapi_indication = error;
3235 	pdapi->pdapi_stream = (val >> 16);
3236 	pdapi->pdapi_seq = (val & 0x0000ffff);
3237 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3238 
3239 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3240 	SCTP_BUF_NEXT(m_notify) = NULL;
3241 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3242 	    0, 0, stcb->asoc.context, 0, 0, 0,
3243 	    m_notify);
3244 	if (control == NULL) {
3245 		/* no memory */
3246 		sctp_m_freem(m_notify);
3247 		return;
3248 	}
3249 	control->length = SCTP_BUF_LEN(m_notify);
3250 	control->spec_flags = M_NOTIFICATION;
3251 	/* not that we need this */
3252 	control->tail_mbuf = m_notify;
3253 	sb = &stcb->sctp_socket->so_rcv;
3254 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3255 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3256 	}
3257 	sctp_sballoc(stcb, sb, m_notify);
3258 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3259 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3260 	}
3261 	control->end_added = 1;
3262 	if (stcb->asoc.control_pdapi)
3263 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3264 	else {
3265 		/* we really should not see this case */
3266 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3267 	}
3268 	if (stcb->sctp_ep && stcb->sctp_socket) {
3269 		/* This should always be the case */
3270 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3271 		struct socket *so;
3272 
3273 		so = SCTP_INP_SO(stcb->sctp_ep);
3274 		if (!so_locked) {
3275 			atomic_add_int(&stcb->asoc.refcnt, 1);
3276 			SCTP_TCB_UNLOCK(stcb);
3277 			SCTP_SOCKET_LOCK(so, 1);
3278 			SCTP_TCB_LOCK(stcb);
3279 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3280 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3281 				SCTP_SOCKET_UNLOCK(so, 1);
3282 				return;
3283 			}
3284 		}
3285 #endif
3286 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3287 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3288 		if (!so_locked) {
3289 			SCTP_SOCKET_UNLOCK(so, 1);
3290 		}
3291 #endif
3292 	}
3293 }
3294 
3295 static void
3296 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3297 {
3298 	struct mbuf *m_notify;
3299 	struct sctp_shutdown_event *sse;
3300 	struct sctp_queued_to_read *control;
3301 
3302 	/*
3303 	 * For TCP model AND UDP connected sockets we will send an error up
3304 	 * when an SHUTDOWN completes
3305 	 */
3306 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3307 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3308 		/* mark socket closed for read/write and wakeup! */
3309 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3310 		struct socket *so;
3311 
3312 		so = SCTP_INP_SO(stcb->sctp_ep);
3313 		atomic_add_int(&stcb->asoc.refcnt, 1);
3314 		SCTP_TCB_UNLOCK(stcb);
3315 		SCTP_SOCKET_LOCK(so, 1);
3316 		SCTP_TCB_LOCK(stcb);
3317 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3318 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3319 			SCTP_SOCKET_UNLOCK(so, 1);
3320 			return;
3321 		}
3322 #endif
3323 		socantsendmore(stcb->sctp_socket);
3324 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3325 		SCTP_SOCKET_UNLOCK(so, 1);
3326 #endif
3327 	}
3328 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3329 		/* event not enabled */
3330 		return;
3331 	}
3332 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3333 	if (m_notify == NULL)
3334 		/* no space left */
3335 		return;
3336 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3337 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3338 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3339 	sse->sse_flags = 0;
3340 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3341 	sse->sse_assoc_id = sctp_get_associd(stcb);
3342 
3343 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3344 	SCTP_BUF_NEXT(m_notify) = NULL;
3345 
3346 	/* append to socket */
3347 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3348 	    0, 0, stcb->asoc.context, 0, 0, 0,
3349 	    m_notify);
3350 	if (control == NULL) {
3351 		/* no memory */
3352 		sctp_m_freem(m_notify);
3353 		return;
3354 	}
3355 	control->length = SCTP_BUF_LEN(m_notify);
3356 	control->spec_flags = M_NOTIFICATION;
3357 	/* not that we need this */
3358 	control->tail_mbuf = m_notify;
3359 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3360 	    control,
3361 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3362 }
3363 
3364 static void
3365 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3366     int so_locked
3367 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3368     SCTP_UNUSED
3369 #endif
3370 )
3371 {
3372 	struct mbuf *m_notify;
3373 	struct sctp_sender_dry_event *event;
3374 	struct sctp_queued_to_read *control;
3375 
3376 	if ((stcb == NULL) ||
3377 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3378 		/* event not enabled */
3379 		return;
3380 	}
3381 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3382 	if (m_notify == NULL) {
3383 		/* no space left */
3384 		return;
3385 	}
3386 	SCTP_BUF_LEN(m_notify) = 0;
3387 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3388 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3389 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3390 	event->sender_dry_flags = 0;
3391 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3392 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3393 
3394 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3395 	SCTP_BUF_NEXT(m_notify) = NULL;
3396 
3397 	/* append to socket */
3398 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3399 	    0, 0, stcb->asoc.context, 0, 0, 0,
3400 	    m_notify);
3401 	if (control == NULL) {
3402 		/* no memory */
3403 		sctp_m_freem(m_notify);
3404 		return;
3405 	}
3406 	control->length = SCTP_BUF_LEN(m_notify);
3407 	control->spec_flags = M_NOTIFICATION;
3408 	/* not that we need this */
3409 	control->tail_mbuf = m_notify;
3410 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3411 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3412 }
3413 
3414 
3415 void
3416 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3417 {
3418 	struct mbuf *m_notify;
3419 	struct sctp_queued_to_read *control;
3420 	struct sctp_stream_change_event *stradd;
3421 
3422 	if ((stcb == NULL) ||
3423 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3424 		/* event not enabled */
3425 		return;
3426 	}
3427 	if ((stcb->asoc.peer_req_out) && flag) {
3428 		/* Peer made the request, don't tell the local user */
3429 		stcb->asoc.peer_req_out = 0;
3430 		return;
3431 	}
3432 	stcb->asoc.peer_req_out = 0;
3433 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3434 	if (m_notify == NULL)
3435 		/* no space left */
3436 		return;
3437 	SCTP_BUF_LEN(m_notify) = 0;
3438 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3439 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3440 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3441 	stradd->strchange_flags = flag;
3442 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3443 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3444 	stradd->strchange_instrms = numberin;
3445 	stradd->strchange_outstrms = numberout;
3446 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3447 	SCTP_BUF_NEXT(m_notify) = NULL;
3448 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3449 		/* no space */
3450 		sctp_m_freem(m_notify);
3451 		return;
3452 	}
3453 	/* append to socket */
3454 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3455 	    0, 0, stcb->asoc.context, 0, 0, 0,
3456 	    m_notify);
3457 	if (control == NULL) {
3458 		/* no memory */
3459 		sctp_m_freem(m_notify);
3460 		return;
3461 	}
3462 	control->length = SCTP_BUF_LEN(m_notify);
3463 	control->spec_flags = M_NOTIFICATION;
3464 	/* not that we need this */
3465 	control->tail_mbuf = m_notify;
3466 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3467 	    control,
3468 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3469 }
3470 
3471 void
3472 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3473 {
3474 	struct mbuf *m_notify;
3475 	struct sctp_queued_to_read *control;
3476 	struct sctp_assoc_reset_event *strasoc;
3477 
3478 	if ((stcb == NULL) ||
3479 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3480 		/* event not enabled */
3481 		return;
3482 	}
3483 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3484 	if (m_notify == NULL)
3485 		/* no space left */
3486 		return;
3487 	SCTP_BUF_LEN(m_notify) = 0;
3488 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3489 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3490 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3491 	strasoc->assocreset_flags = flag;
3492 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3493 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3494 	strasoc->assocreset_local_tsn = sending_tsn;
3495 	strasoc->assocreset_remote_tsn = recv_tsn;
3496 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3497 	SCTP_BUF_NEXT(m_notify) = NULL;
3498 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3499 		/* no space */
3500 		sctp_m_freem(m_notify);
3501 		return;
3502 	}
3503 	/* append to socket */
3504 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3505 	    0, 0, stcb->asoc.context, 0, 0, 0,
3506 	    m_notify);
3507 	if (control == NULL) {
3508 		/* no memory */
3509 		sctp_m_freem(m_notify);
3510 		return;
3511 	}
3512 	control->length = SCTP_BUF_LEN(m_notify);
3513 	control->spec_flags = M_NOTIFICATION;
3514 	/* not that we need this */
3515 	control->tail_mbuf = m_notify;
3516 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3517 	    control,
3518 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3519 }
3520 
3521 
3522 
3523 static void
3524 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3525     int number_entries, uint16_t *list, int flag)
3526 {
3527 	struct mbuf *m_notify;
3528 	struct sctp_queued_to_read *control;
3529 	struct sctp_stream_reset_event *strreset;
3530 	int len;
3531 
3532 	if ((stcb == NULL) ||
3533 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3534 		/* event not enabled */
3535 		return;
3536 	}
3537 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3538 	if (m_notify == NULL)
3539 		/* no space left */
3540 		return;
3541 	SCTP_BUF_LEN(m_notify) = 0;
3542 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3543 	if (len > M_TRAILINGSPACE(m_notify)) {
3544 		/* never enough room */
3545 		sctp_m_freem(m_notify);
3546 		return;
3547 	}
3548 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3549 	memset(strreset, 0, len);
3550 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3551 	strreset->strreset_flags = flag;
3552 	strreset->strreset_length = len;
3553 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3554 	if (number_entries) {
3555 		int i;
3556 
3557 		for (i = 0; i < number_entries; i++) {
3558 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3559 		}
3560 	}
3561 	SCTP_BUF_LEN(m_notify) = len;
3562 	SCTP_BUF_NEXT(m_notify) = NULL;
3563 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3564 		/* no space */
3565 		sctp_m_freem(m_notify);
3566 		return;
3567 	}
3568 	/* append to socket */
3569 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3570 	    0, 0, stcb->asoc.context, 0, 0, 0,
3571 	    m_notify);
3572 	if (control == NULL) {
3573 		/* no memory */
3574 		sctp_m_freem(m_notify);
3575 		return;
3576 	}
3577 	control->length = SCTP_BUF_LEN(m_notify);
3578 	control->spec_flags = M_NOTIFICATION;
3579 	/* not that we need this */
3580 	control->tail_mbuf = m_notify;
3581 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3582 	    control,
3583 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3584 }
3585 
3586 
3587 static void
3588 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3589 {
3590 	struct mbuf *m_notify;
3591 	struct sctp_remote_error *sre;
3592 	struct sctp_queued_to_read *control;
3593 	unsigned int notif_len;
3594 	uint16_t chunk_len;
3595 
3596 	if ((stcb == NULL) ||
3597 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3598 		return;
3599 	}
3600 	if (chunk != NULL) {
3601 		chunk_len = ntohs(chunk->ch.chunk_length);
3602 	} else {
3603 		chunk_len = 0;
3604 	}
3605 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3606 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3607 	if (m_notify == NULL) {
3608 		/* Retry with smaller value. */
3609 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3610 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3611 		if (m_notify == NULL) {
3612 			return;
3613 		}
3614 	}
3615 	SCTP_BUF_NEXT(m_notify) = NULL;
3616 	sre = mtod(m_notify, struct sctp_remote_error *);
3617 	memset(sre, 0, notif_len);
3618 	sre->sre_type = SCTP_REMOTE_ERROR;
3619 	sre->sre_flags = 0;
3620 	sre->sre_length = sizeof(struct sctp_remote_error);
3621 	sre->sre_error = error;
3622 	sre->sre_assoc_id = sctp_get_associd(stcb);
3623 	if (notif_len > sizeof(struct sctp_remote_error)) {
3624 		memcpy(sre->sre_data, chunk, chunk_len);
3625 		sre->sre_length += chunk_len;
3626 	}
3627 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3628 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3629 	    0, 0, stcb->asoc.context, 0, 0, 0,
3630 	    m_notify);
3631 	if (control != NULL) {
3632 		control->length = SCTP_BUF_LEN(m_notify);
3633 		control->spec_flags = M_NOTIFICATION;
3634 		/* not that we need this */
3635 		control->tail_mbuf = m_notify;
3636 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3637 		    control,
3638 		    &stcb->sctp_socket->so_rcv, 1,
3639 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3640 	} else {
3641 		sctp_m_freem(m_notify);
3642 	}
3643 }
3644 
3645 
3646 void
3647 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3648     uint32_t error, void *data, int so_locked
3649 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3650     SCTP_UNUSED
3651 #endif
3652 )
3653 {
3654 	if ((stcb == NULL) ||
3655 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3656 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3657 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3658 		/* If the socket is gone we are out of here */
3659 		return;
3660 	}
3661 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3662 		return;
3663 	}
3664 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3665 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3666 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3667 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3668 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3669 			/* Don't report these in front states */
3670 			return;
3671 		}
3672 	}
3673 	switch (notification) {
3674 	case SCTP_NOTIFY_ASSOC_UP:
3675 		if (stcb->asoc.assoc_up_sent == 0) {
3676 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3677 			stcb->asoc.assoc_up_sent = 1;
3678 		}
3679 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3680 			sctp_notify_adaptation_layer(stcb);
3681 		}
3682 		if (stcb->asoc.auth_supported == 0) {
3683 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3684 			    NULL, so_locked);
3685 		}
3686 		break;
3687 	case SCTP_NOTIFY_ASSOC_DOWN:
3688 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3689 		break;
3690 	case SCTP_NOTIFY_INTERFACE_DOWN:
3691 		{
3692 			struct sctp_nets *net;
3693 
3694 			net = (struct sctp_nets *)data;
3695 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3696 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3697 			break;
3698 		}
3699 	case SCTP_NOTIFY_INTERFACE_UP:
3700 		{
3701 			struct sctp_nets *net;
3702 
3703 			net = (struct sctp_nets *)data;
3704 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3705 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3706 			break;
3707 		}
3708 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3709 		{
3710 			struct sctp_nets *net;
3711 
3712 			net = (struct sctp_nets *)data;
3713 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3714 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3715 			break;
3716 		}
3717 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3718 		sctp_notify_send_failed2(stcb, error,
3719 		    (struct sctp_stream_queue_pending *)data, so_locked);
3720 		break;
3721 	case SCTP_NOTIFY_SENT_DG_FAIL:
3722 		sctp_notify_send_failed(stcb, 1, error,
3723 		    (struct sctp_tmit_chunk *)data, so_locked);
3724 		break;
3725 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3726 		sctp_notify_send_failed(stcb, 0, error,
3727 		    (struct sctp_tmit_chunk *)data, so_locked);
3728 		break;
3729 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3730 		{
3731 			uint32_t val;
3732 
3733 			val = *((uint32_t *)data);
3734 
3735 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3736 			break;
3737 		}
3738 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3739 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3740 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3741 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3742 		} else {
3743 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3744 		}
3745 		break;
3746 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3747 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3748 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3749 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3750 		} else {
3751 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3752 		}
3753 		break;
3754 	case SCTP_NOTIFY_ASSOC_RESTART:
3755 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3756 		if (stcb->asoc.auth_supported == 0) {
3757 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3758 			    NULL, so_locked);
3759 		}
3760 		break;
3761 	case SCTP_NOTIFY_STR_RESET_SEND:
3762 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_RECV:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3766 		break;
3767 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3768 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3769 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3770 		break;
3771 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3772 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3773 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3774 		break;
3775 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3776 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3777 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3778 		break;
3779 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3780 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3781 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3782 		break;
3783 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3784 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3785 		    error, so_locked);
3786 		break;
3787 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3788 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3789 		    error, so_locked);
3790 		break;
3791 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3792 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3793 		    error, so_locked);
3794 		break;
3795 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3796 		sctp_notify_shutdown_event(stcb);
3797 		break;
3798 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3799 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3800 		    (uint16_t)(uintptr_t)data,
3801 		    so_locked);
3802 		break;
3803 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3804 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3805 		    (uint16_t)(uintptr_t)data,
3806 		    so_locked);
3807 		break;
3808 	case SCTP_NOTIFY_NO_PEER_AUTH:
3809 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3810 		    (uint16_t)(uintptr_t)data,
3811 		    so_locked);
3812 		break;
3813 	case SCTP_NOTIFY_SENDER_DRY:
3814 		sctp_notify_sender_dry_event(stcb, so_locked);
3815 		break;
3816 	case SCTP_NOTIFY_REMOTE_ERROR:
3817 		sctp_notify_remote_error(stcb, error, data);
3818 		break;
3819 	default:
3820 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3821 		    __func__, notification, notification);
3822 		break;
3823 	}			/* end switch */
3824 }
3825 
3826 void
3827 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3828 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3829     SCTP_UNUSED
3830 #endif
3831 )
3832 {
3833 	struct sctp_association *asoc;
3834 	struct sctp_stream_out *outs;
3835 	struct sctp_tmit_chunk *chk, *nchk;
3836 	struct sctp_stream_queue_pending *sp, *nsp;
3837 	int i;
3838 
3839 	if (stcb == NULL) {
3840 		return;
3841 	}
3842 	asoc = &stcb->asoc;
3843 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3844 		/* already being freed */
3845 		return;
3846 	}
3847 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3848 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3849 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3850 		return;
3851 	}
3852 	/* now through all the gunk freeing chunks */
3853 	if (holds_lock == 0) {
3854 		SCTP_TCB_SEND_LOCK(stcb);
3855 	}
3856 	/* sent queue SHOULD be empty */
3857 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3858 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3859 		asoc->sent_queue_cnt--;
3860 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3861 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3862 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3863 #ifdef INVARIANTS
3864 			} else {
3865 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3866 #endif
3867 			}
3868 		}
3869 		if (chk->data != NULL) {
3870 			sctp_free_bufspace(stcb, asoc, chk, 1);
3871 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3872 			    error, chk, so_locked);
3873 			if (chk->data) {
3874 				sctp_m_freem(chk->data);
3875 				chk->data = NULL;
3876 			}
3877 		}
3878 		sctp_free_a_chunk(stcb, chk, so_locked);
3879 		/* sa_ignore FREED_MEMORY */
3880 	}
3881 	/* pending send queue SHOULD be empty */
3882 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3883 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3884 		asoc->send_queue_cnt--;
3885 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3886 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3887 #ifdef INVARIANTS
3888 		} else {
3889 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3890 #endif
3891 		}
3892 		if (chk->data != NULL) {
3893 			sctp_free_bufspace(stcb, asoc, chk, 1);
3894 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3895 			    error, chk, so_locked);
3896 			if (chk->data) {
3897 				sctp_m_freem(chk->data);
3898 				chk->data = NULL;
3899 			}
3900 		}
3901 		sctp_free_a_chunk(stcb, chk, so_locked);
3902 		/* sa_ignore FREED_MEMORY */
3903 	}
3904 	for (i = 0; i < asoc->streamoutcnt; i++) {
3905 		/* For each stream */
3906 		outs = &asoc->strmout[i];
3907 		/* clean up any sends there */
3908 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3909 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3910 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3911 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3912 			sctp_free_spbufspace(stcb, asoc, sp);
3913 			if (sp->data) {
3914 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3915 				    error, (void *)sp, so_locked);
3916 				if (sp->data) {
3917 					sctp_m_freem(sp->data);
3918 					sp->data = NULL;
3919 					sp->tail_mbuf = NULL;
3920 					sp->length = 0;
3921 				}
3922 			}
3923 			if (sp->net) {
3924 				sctp_free_remote_addr(sp->net);
3925 				sp->net = NULL;
3926 			}
3927 			/* Free the chunk */
3928 			sctp_free_a_strmoq(stcb, sp, so_locked);
3929 			/* sa_ignore FREED_MEMORY */
3930 		}
3931 	}
3932 
3933 	if (holds_lock == 0) {
3934 		SCTP_TCB_SEND_UNLOCK(stcb);
3935 	}
3936 }
3937 
3938 void
3939 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3940     struct sctp_abort_chunk *abort, int so_locked
3941 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3942     SCTP_UNUSED
3943 #endif
3944 )
3945 {
3946 	if (stcb == NULL) {
3947 		return;
3948 	}
3949 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3950 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3951 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3952 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3953 	}
3954 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3955 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3956 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3957 		return;
3958 	}
3959 	/* Tell them we lost the asoc */
3960 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3961 	if (from_peer) {
3962 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3963 	} else {
3964 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3965 	}
3966 }
3967 
3968 void
3969 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3970     struct mbuf *m, int iphlen,
3971     struct sockaddr *src, struct sockaddr *dst,
3972     struct sctphdr *sh, struct mbuf *op_err,
3973     uint8_t mflowtype, uint32_t mflowid,
3974     uint32_t vrf_id, uint16_t port)
3975 {
3976 	uint32_t vtag;
3977 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3978 	struct socket *so;
3979 #endif
3980 
3981 	vtag = 0;
3982 	if (stcb != NULL) {
3983 		vtag = stcb->asoc.peer_vtag;
3984 		vrf_id = stcb->asoc.vrf_id;
3985 	}
3986 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3987 	    mflowtype, mflowid, inp->fibnum,
3988 	    vrf_id, port);
3989 	if (stcb != NULL) {
3990 		/* We have a TCB to abort, send notification too */
3991 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3992 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3993 		/* Ok, now lets free it */
3994 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3995 		so = SCTP_INP_SO(inp);
3996 		atomic_add_int(&stcb->asoc.refcnt, 1);
3997 		SCTP_TCB_UNLOCK(stcb);
3998 		SCTP_SOCKET_LOCK(so, 1);
3999 		SCTP_TCB_LOCK(stcb);
4000 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4001 #endif
4002 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4003 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4004 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4005 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4006 		}
4007 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4008 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4009 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4010 		SCTP_SOCKET_UNLOCK(so, 1);
4011 #endif
4012 	}
4013 }
4014 #ifdef SCTP_ASOCLOG_OF_TSNS
4015 void
4016 sctp_print_out_track_log(struct sctp_tcb *stcb)
4017 {
4018 #ifdef NOSIY_PRINTS
4019 	int i;
4020 
4021 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4022 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4023 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4024 		SCTP_PRINTF("None rcvd\n");
4025 		goto none_in;
4026 	}
4027 	if (stcb->asoc.tsn_in_wrapped) {
4028 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4029 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4030 			    stcb->asoc.in_tsnlog[i].tsn,
4031 			    stcb->asoc.in_tsnlog[i].strm,
4032 			    stcb->asoc.in_tsnlog[i].seq,
4033 			    stcb->asoc.in_tsnlog[i].flgs,
4034 			    stcb->asoc.in_tsnlog[i].sz);
4035 		}
4036 	}
4037 	if (stcb->asoc.tsn_in_at) {
4038 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4039 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4040 			    stcb->asoc.in_tsnlog[i].tsn,
4041 			    stcb->asoc.in_tsnlog[i].strm,
4042 			    stcb->asoc.in_tsnlog[i].seq,
4043 			    stcb->asoc.in_tsnlog[i].flgs,
4044 			    stcb->asoc.in_tsnlog[i].sz);
4045 		}
4046 	}
4047 none_in:
4048 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4049 	if ((stcb->asoc.tsn_out_at == 0) &&
4050 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4051 		SCTP_PRINTF("None sent\n");
4052 	}
4053 	if (stcb->asoc.tsn_out_wrapped) {
4054 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4055 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4056 			    stcb->asoc.out_tsnlog[i].tsn,
4057 			    stcb->asoc.out_tsnlog[i].strm,
4058 			    stcb->asoc.out_tsnlog[i].seq,
4059 			    stcb->asoc.out_tsnlog[i].flgs,
4060 			    stcb->asoc.out_tsnlog[i].sz);
4061 		}
4062 	}
4063 	if (stcb->asoc.tsn_out_at) {
4064 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4065 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4066 			    stcb->asoc.out_tsnlog[i].tsn,
4067 			    stcb->asoc.out_tsnlog[i].strm,
4068 			    stcb->asoc.out_tsnlog[i].seq,
4069 			    stcb->asoc.out_tsnlog[i].flgs,
4070 			    stcb->asoc.out_tsnlog[i].sz);
4071 		}
4072 	}
4073 #endif
4074 }
4075 #endif
4076 
4077 void
4078 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4079     struct mbuf *op_err,
4080     int so_locked
4081 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4082     SCTP_UNUSED
4083 #endif
4084 )
4085 {
4086 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4087 	struct socket *so;
4088 #endif
4089 
4090 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4091 	so = SCTP_INP_SO(inp);
4092 #endif
4093 	if (stcb == NULL) {
4094 		/* Got to have a TCB */
4095 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4096 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4097 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4098 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4099 			}
4100 		}
4101 		return;
4102 	} else {
4103 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4104 	}
4105 	/* notify the peer */
4106 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4107 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4108 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4109 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4110 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4111 	}
4112 	/* notify the ulp */
4113 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4114 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4115 	}
4116 	/* now free the asoc */
4117 #ifdef SCTP_ASOCLOG_OF_TSNS
4118 	sctp_print_out_track_log(stcb);
4119 #endif
4120 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4121 	if (!so_locked) {
4122 		atomic_add_int(&stcb->asoc.refcnt, 1);
4123 		SCTP_TCB_UNLOCK(stcb);
4124 		SCTP_SOCKET_LOCK(so, 1);
4125 		SCTP_TCB_LOCK(stcb);
4126 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4127 	}
4128 #endif
4129 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4130 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4131 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4132 	if (!so_locked) {
4133 		SCTP_SOCKET_UNLOCK(so, 1);
4134 	}
4135 #endif
4136 }
4137 
4138 void
4139 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4140     struct sockaddr *src, struct sockaddr *dst,
4141     struct sctphdr *sh, struct sctp_inpcb *inp,
4142     struct mbuf *cause,
4143     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4144     uint32_t vrf_id, uint16_t port)
4145 {
4146 	struct sctp_chunkhdr *ch, chunk_buf;
4147 	unsigned int chk_length;
4148 	int contains_init_chunk;
4149 
4150 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4151 	/* Generate a TO address for future reference */
4152 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4153 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4154 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4155 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4156 		}
4157 	}
4158 	contains_init_chunk = 0;
4159 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4160 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4161 	while (ch != NULL) {
4162 		chk_length = ntohs(ch->chunk_length);
4163 		if (chk_length < sizeof(*ch)) {
4164 			/* break to abort land */
4165 			break;
4166 		}
4167 		switch (ch->chunk_type) {
4168 		case SCTP_INIT:
4169 			contains_init_chunk = 1;
4170 			break;
4171 		case SCTP_PACKET_DROPPED:
4172 			/* we don't respond to pkt-dropped */
4173 			return;
4174 		case SCTP_ABORT_ASSOCIATION:
4175 			/* we don't respond with an ABORT to an ABORT */
4176 			return;
4177 		case SCTP_SHUTDOWN_COMPLETE:
4178 			/*
4179 			 * we ignore it since we are not waiting for it and
4180 			 * peer is gone
4181 			 */
4182 			return;
4183 		case SCTP_SHUTDOWN_ACK:
4184 			sctp_send_shutdown_complete2(src, dst, sh,
4185 			    mflowtype, mflowid, fibnum,
4186 			    vrf_id, port);
4187 			return;
4188 		default:
4189 			break;
4190 		}
4191 		offset += SCTP_SIZE32(chk_length);
4192 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4193 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4194 	}
4195 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4196 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4197 	    (contains_init_chunk == 0))) {
4198 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4199 		    mflowtype, mflowid, fibnum,
4200 		    vrf_id, port);
4201 	}
4202 }
4203 
4204 /*
4205  * check the inbound datagram to make sure there is not an abort inside it,
4206  * if there is return 1, else return 0.
4207  */
4208 int
4209 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4210 {
4211 	struct sctp_chunkhdr *ch;
4212 	struct sctp_init_chunk *init_chk, chunk_buf;
4213 	int offset;
4214 	unsigned int chk_length;
4215 
4216 	offset = iphlen + sizeof(struct sctphdr);
4217 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4218 	    (uint8_t *)&chunk_buf);
4219 	while (ch != NULL) {
4220 		chk_length = ntohs(ch->chunk_length);
4221 		if (chk_length < sizeof(*ch)) {
4222 			/* packet is probably corrupt */
4223 			break;
4224 		}
4225 		/* we seem to be ok, is it an abort? */
4226 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4227 			/* yep, tell them */
4228 			return (1);
4229 		}
4230 		if (ch->chunk_type == SCTP_INITIATION) {
4231 			/* need to update the Vtag */
4232 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4233 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4234 			if (init_chk != NULL) {
4235 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4236 			}
4237 		}
4238 		/* Nope, move to the next chunk */
4239 		offset += SCTP_SIZE32(chk_length);
4240 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4241 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4242 	}
4243 	return (0);
4244 }
4245 
4246 /*
4247  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4248  * set (i.e. it's 0) so, create this function to compare link local scopes
4249  */
4250 #ifdef INET6
4251 uint32_t
4252 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4253 {
4254 	struct sockaddr_in6 a, b;
4255 
4256 	/* save copies */
4257 	a = *addr1;
4258 	b = *addr2;
4259 
4260 	if (a.sin6_scope_id == 0)
4261 		if (sa6_recoverscope(&a)) {
4262 			/* can't get scope, so can't match */
4263 			return (0);
4264 		}
4265 	if (b.sin6_scope_id == 0)
4266 		if (sa6_recoverscope(&b)) {
4267 			/* can't get scope, so can't match */
4268 			return (0);
4269 		}
4270 	if (a.sin6_scope_id != b.sin6_scope_id)
4271 		return (0);
4272 
4273 	return (1);
4274 }
4275 
4276 /*
4277  * returns a sockaddr_in6 with embedded scope recovered and removed
4278  */
4279 struct sockaddr_in6 *
4280 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4281 {
4282 	/* check and strip embedded scope junk */
4283 	if (addr->sin6_family == AF_INET6) {
4284 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4285 			if (addr->sin6_scope_id == 0) {
4286 				*store = *addr;
4287 				if (!sa6_recoverscope(store)) {
4288 					/* use the recovered scope */
4289 					addr = store;
4290 				}
4291 			} else {
4292 				/* else, return the original "to" addr */
4293 				in6_clearscope(&addr->sin6_addr);
4294 			}
4295 		}
4296 	}
4297 	return (addr);
4298 }
4299 #endif
4300 
4301 /*
4302  * are the two addresses the same?  currently a "scopeless" check returns: 1
4303  * if same, 0 if not
4304  */
4305 int
4306 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4307 {
4308 
4309 	/* must be valid */
4310 	if (sa1 == NULL || sa2 == NULL)
4311 		return (0);
4312 
4313 	/* must be the same family */
4314 	if (sa1->sa_family != sa2->sa_family)
4315 		return (0);
4316 
4317 	switch (sa1->sa_family) {
4318 #ifdef INET6
4319 	case AF_INET6:
4320 		{
4321 			/* IPv6 addresses */
4322 			struct sockaddr_in6 *sin6_1, *sin6_2;
4323 
4324 			sin6_1 = (struct sockaddr_in6 *)sa1;
4325 			sin6_2 = (struct sockaddr_in6 *)sa2;
4326 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4327 			    sin6_2));
4328 		}
4329 #endif
4330 #ifdef INET
4331 	case AF_INET:
4332 		{
4333 			/* IPv4 addresses */
4334 			struct sockaddr_in *sin_1, *sin_2;
4335 
4336 			sin_1 = (struct sockaddr_in *)sa1;
4337 			sin_2 = (struct sockaddr_in *)sa2;
4338 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4339 		}
4340 #endif
4341 	default:
4342 		/* we don't do these... */
4343 		return (0);
4344 	}
4345 }
4346 
4347 void
4348 sctp_print_address(struct sockaddr *sa)
4349 {
4350 #ifdef INET6
4351 	char ip6buf[INET6_ADDRSTRLEN];
4352 #endif
4353 
4354 	switch (sa->sa_family) {
4355 #ifdef INET6
4356 	case AF_INET6:
4357 		{
4358 			struct sockaddr_in6 *sin6;
4359 
4360 			sin6 = (struct sockaddr_in6 *)sa;
4361 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4362 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4363 			    ntohs(sin6->sin6_port),
4364 			    sin6->sin6_scope_id);
4365 			break;
4366 		}
4367 #endif
4368 #ifdef INET
4369 	case AF_INET:
4370 		{
4371 			struct sockaddr_in *sin;
4372 			unsigned char *p;
4373 
4374 			sin = (struct sockaddr_in *)sa;
4375 			p = (unsigned char *)&sin->sin_addr;
4376 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4377 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4378 			break;
4379 		}
4380 #endif
4381 	default:
4382 		SCTP_PRINTF("?\n");
4383 		break;
4384 	}
4385 }
4386 
4387 void
4388 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4389     struct sctp_inpcb *new_inp,
4390     struct sctp_tcb *stcb,
4391     int waitflags)
4392 {
4393 	/*
4394 	 * go through our old INP and pull off any control structures that
4395 	 * belong to stcb and move then to the new inp.
4396 	 */
4397 	struct socket *old_so, *new_so;
4398 	struct sctp_queued_to_read *control, *nctl;
4399 	struct sctp_readhead tmp_queue;
4400 	struct mbuf *m;
4401 	int error = 0;
4402 
4403 	old_so = old_inp->sctp_socket;
4404 	new_so = new_inp->sctp_socket;
4405 	TAILQ_INIT(&tmp_queue);
4406 	error = sblock(&old_so->so_rcv, waitflags);
4407 	if (error) {
4408 		/*
4409 		 * Gak, can't get sblock, we have a problem. data will be
4410 		 * left stranded.. and we don't dare look at it since the
4411 		 * other thread may be reading something. Oh well, its a
4412 		 * screwed up app that does a peeloff OR a accept while
4413 		 * reading from the main socket... actually its only the
4414 		 * peeloff() case, since I think read will fail on a
4415 		 * listening socket..
4416 		 */
4417 		return;
4418 	}
4419 	/* lock the socket buffers */
4420 	SCTP_INP_READ_LOCK(old_inp);
4421 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4422 		/* Pull off all for out target stcb */
4423 		if (control->stcb == stcb) {
4424 			/* remove it we want it */
4425 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4426 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4427 			m = control->data;
4428 			while (m) {
4429 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4430 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4431 				}
4432 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4433 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4434 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4435 				}
4436 				m = SCTP_BUF_NEXT(m);
4437 			}
4438 		}
4439 	}
4440 	SCTP_INP_READ_UNLOCK(old_inp);
4441 	/* Remove the sb-lock on the old socket */
4442 
4443 	sbunlock(&old_so->so_rcv);
4444 	/* Now we move them over to the new socket buffer */
4445 	SCTP_INP_READ_LOCK(new_inp);
4446 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4447 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4448 		m = control->data;
4449 		while (m) {
4450 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4451 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4452 			}
4453 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4454 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4455 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4456 			}
4457 			m = SCTP_BUF_NEXT(m);
4458 		}
4459 	}
4460 	SCTP_INP_READ_UNLOCK(new_inp);
4461 }
4462 
4463 void
4464 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4465     struct sctp_tcb *stcb,
4466     int so_locked
4467 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4468     SCTP_UNUSED
4469 #endif
4470 )
4471 {
4472 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4473 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4474 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4475 		} else {
4476 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4477 			struct socket *so;
4478 
4479 			so = SCTP_INP_SO(inp);
4480 			if (!so_locked) {
4481 				if (stcb) {
4482 					atomic_add_int(&stcb->asoc.refcnt, 1);
4483 					SCTP_TCB_UNLOCK(stcb);
4484 				}
4485 				SCTP_SOCKET_LOCK(so, 1);
4486 				if (stcb) {
4487 					SCTP_TCB_LOCK(stcb);
4488 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4489 				}
4490 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4491 					SCTP_SOCKET_UNLOCK(so, 1);
4492 					return;
4493 				}
4494 			}
4495 #endif
4496 			sctp_sorwakeup(inp, inp->sctp_socket);
4497 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4498 			if (!so_locked) {
4499 				SCTP_SOCKET_UNLOCK(so, 1);
4500 			}
4501 #endif
4502 		}
4503 	}
4504 }
4505 
4506 void
4507 sctp_add_to_readq(struct sctp_inpcb *inp,
4508     struct sctp_tcb *stcb,
4509     struct sctp_queued_to_read *control,
4510     struct sockbuf *sb,
4511     int end,
4512     int inp_read_lock_held,
4513     int so_locked
4514 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4515     SCTP_UNUSED
4516 #endif
4517 )
4518 {
4519 	/*
4520 	 * Here we must place the control on the end of the socket read
4521 	 * queue AND increment sb_cc so that select will work properly on
4522 	 * read.
4523 	 */
4524 	struct mbuf *m, *prev = NULL;
4525 
4526 	if (inp == NULL) {
4527 		/* Gak, TSNH!! */
4528 #ifdef INVARIANTS
4529 		panic("Gak, inp NULL on add_to_readq");
4530 #endif
4531 		return;
4532 	}
4533 	if (inp_read_lock_held == 0)
4534 		SCTP_INP_READ_LOCK(inp);
4535 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4536 		sctp_free_remote_addr(control->whoFrom);
4537 		if (control->data) {
4538 			sctp_m_freem(control->data);
4539 			control->data = NULL;
4540 		}
4541 		sctp_free_a_readq(stcb, control);
4542 		if (inp_read_lock_held == 0)
4543 			SCTP_INP_READ_UNLOCK(inp);
4544 		return;
4545 	}
4546 	if (!(control->spec_flags & M_NOTIFICATION)) {
4547 		atomic_add_int(&inp->total_recvs, 1);
4548 		if (!control->do_not_ref_stcb) {
4549 			atomic_add_int(&stcb->total_recvs, 1);
4550 		}
4551 	}
4552 	m = control->data;
4553 	control->held_length = 0;
4554 	control->length = 0;
4555 	while (m) {
4556 		if (SCTP_BUF_LEN(m) == 0) {
4557 			/* Skip mbufs with NO length */
4558 			if (prev == NULL) {
4559 				/* First one */
4560 				control->data = sctp_m_free(m);
4561 				m = control->data;
4562 			} else {
4563 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4564 				m = SCTP_BUF_NEXT(prev);
4565 			}
4566 			if (m == NULL) {
4567 				control->tail_mbuf = prev;
4568 			}
4569 			continue;
4570 		}
4571 		prev = m;
4572 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4573 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4574 		}
4575 		sctp_sballoc(stcb, sb, m);
4576 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4577 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4578 		}
4579 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4580 		m = SCTP_BUF_NEXT(m);
4581 	}
4582 	if (prev != NULL) {
4583 		control->tail_mbuf = prev;
4584 	} else {
4585 		/* Everything got collapsed out?? */
4586 		sctp_free_remote_addr(control->whoFrom);
4587 		sctp_free_a_readq(stcb, control);
4588 		if (inp_read_lock_held == 0)
4589 			SCTP_INP_READ_UNLOCK(inp);
4590 		return;
4591 	}
4592 	if (end) {
4593 		control->end_added = 1;
4594 	}
4595 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4596 	control->on_read_q = 1;
4597 	if (inp_read_lock_held == 0)
4598 		SCTP_INP_READ_UNLOCK(inp);
4599 	if (inp && inp->sctp_socket) {
4600 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4601 	}
4602 }
4603 
4604 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4605  *************ALTERNATE ROUTING CODE
4606  */
4607 
4608 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4609  *************ALTERNATE ROUTING CODE
4610  */
4611 
4612 struct mbuf *
4613 sctp_generate_cause(uint16_t code, char *info)
4614 {
4615 	struct mbuf *m;
4616 	struct sctp_gen_error_cause *cause;
4617 	size_t info_len;
4618 	uint16_t len;
4619 
4620 	if ((code == 0) || (info == NULL)) {
4621 		return (NULL);
4622 	}
4623 	info_len = strlen(info);
4624 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4625 		return (NULL);
4626 	}
4627 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4628 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4629 	if (m != NULL) {
4630 		SCTP_BUF_LEN(m) = len;
4631 		cause = mtod(m, struct sctp_gen_error_cause *);
4632 		cause->code = htons(code);
4633 		cause->length = htons(len);
4634 		memcpy(cause->info, info, info_len);
4635 	}
4636 	return (m);
4637 }
4638 
4639 struct mbuf *
4640 sctp_generate_no_user_data_cause(uint32_t tsn)
4641 {
4642 	struct mbuf *m;
4643 	struct sctp_error_no_user_data *no_user_data_cause;
4644 	uint16_t len;
4645 
4646 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4647 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4648 	if (m != NULL) {
4649 		SCTP_BUF_LEN(m) = len;
4650 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4651 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4652 		no_user_data_cause->cause.length = htons(len);
4653 		no_user_data_cause->tsn = htonl(tsn);
4654 	}
4655 	return (m);
4656 }
4657 
4658 #ifdef SCTP_MBCNT_LOGGING
4659 void
4660 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4661     struct sctp_tmit_chunk *tp1, int chk_cnt)
4662 {
4663 	if (tp1->data == NULL) {
4664 		return;
4665 	}
4666 	asoc->chunks_on_out_queue -= chk_cnt;
4667 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4668 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4669 		    asoc->total_output_queue_size,
4670 		    tp1->book_size,
4671 		    0,
4672 		    tp1->mbcnt);
4673 	}
4674 	if (asoc->total_output_queue_size >= tp1->book_size) {
4675 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4676 	} else {
4677 		asoc->total_output_queue_size = 0;
4678 	}
4679 
4680 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4681 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4682 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4683 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4684 		} else {
4685 			stcb->sctp_socket->so_snd.sb_cc = 0;
4686 
4687 		}
4688 	}
4689 }
4690 
4691 #endif
4692 
4693 int
4694 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4695     uint8_t sent, int so_locked
4696 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4697     SCTP_UNUSED
4698 #endif
4699 )
4700 {
4701 	struct sctp_stream_out *strq;
4702 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4703 	struct sctp_stream_queue_pending *sp;
4704 	uint32_t mid;
4705 	uint16_t sid;
4706 	uint8_t foundeom = 0;
4707 	int ret_sz = 0;
4708 	int notdone;
4709 	int do_wakeup_routine = 0;
4710 
4711 	sid = tp1->rec.data.sid;
4712 	mid = tp1->rec.data.mid;
4713 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4714 		stcb->asoc.abandoned_sent[0]++;
4715 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4716 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4717 #if defined(SCTP_DETAILED_STR_STATS)
4718 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4719 #endif
4720 	} else {
4721 		stcb->asoc.abandoned_unsent[0]++;
4722 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4723 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4724 #if defined(SCTP_DETAILED_STR_STATS)
4725 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4726 #endif
4727 	}
4728 	do {
4729 		ret_sz += tp1->book_size;
4730 		if (tp1->data != NULL) {
4731 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4732 				sctp_flight_size_decrease(tp1);
4733 				sctp_total_flight_decrease(stcb, tp1);
4734 			}
4735 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4736 			stcb->asoc.peers_rwnd += tp1->send_size;
4737 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4738 			if (sent) {
4739 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4740 			} else {
4741 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4742 			}
4743 			if (tp1->data) {
4744 				sctp_m_freem(tp1->data);
4745 				tp1->data = NULL;
4746 			}
4747 			do_wakeup_routine = 1;
4748 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4749 				stcb->asoc.sent_queue_cnt_removeable--;
4750 			}
4751 		}
4752 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4753 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4754 		    SCTP_DATA_NOT_FRAG) {
4755 			/* not frag'ed we ae done   */
4756 			notdone = 0;
4757 			foundeom = 1;
4758 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4759 			/* end of frag, we are done */
4760 			notdone = 0;
4761 			foundeom = 1;
4762 		} else {
4763 			/*
4764 			 * Its a begin or middle piece, we must mark all of
4765 			 * it
4766 			 */
4767 			notdone = 1;
4768 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4769 		}
4770 	} while (tp1 && notdone);
4771 	if (foundeom == 0) {
4772 		/*
4773 		 * The multi-part message was scattered across the send and
4774 		 * sent queue.
4775 		 */
4776 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4777 			if ((tp1->rec.data.sid != sid) ||
4778 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4779 				break;
4780 			}
4781 			/*
4782 			 * save to chk in case we have some on stream out
4783 			 * queue. If so and we have an un-transmitted one we
4784 			 * don't have to fudge the TSN.
4785 			 */
4786 			chk = tp1;
4787 			ret_sz += tp1->book_size;
4788 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4789 			if (sent) {
4790 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4791 			} else {
4792 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4793 			}
4794 			if (tp1->data) {
4795 				sctp_m_freem(tp1->data);
4796 				tp1->data = NULL;
4797 			}
4798 			/* No flight involved here book the size to 0 */
4799 			tp1->book_size = 0;
4800 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4801 				foundeom = 1;
4802 			}
4803 			do_wakeup_routine = 1;
4804 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4805 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4806 			/*
4807 			 * on to the sent queue so we can wait for it to be
4808 			 * passed by.
4809 			 */
4810 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4811 			    sctp_next);
4812 			stcb->asoc.send_queue_cnt--;
4813 			stcb->asoc.sent_queue_cnt++;
4814 		}
4815 	}
4816 	if (foundeom == 0) {
4817 		/*
4818 		 * Still no eom found. That means there is stuff left on the
4819 		 * stream out queue.. yuck.
4820 		 */
4821 		SCTP_TCB_SEND_LOCK(stcb);
4822 		strq = &stcb->asoc.strmout[sid];
4823 		sp = TAILQ_FIRST(&strq->outqueue);
4824 		if (sp != NULL) {
4825 			sp->discard_rest = 1;
4826 			/*
4827 			 * We may need to put a chunk on the queue that
4828 			 * holds the TSN that would have been sent with the
4829 			 * LAST bit.
4830 			 */
4831 			if (chk == NULL) {
4832 				/* Yep, we have to */
4833 				sctp_alloc_a_chunk(stcb, chk);
4834 				if (chk == NULL) {
4835 					/*
4836 					 * we are hosed. All we can do is
4837 					 * nothing.. which will cause an
4838 					 * abort if the peer is paying
4839 					 * attention.
4840 					 */
4841 					goto oh_well;
4842 				}
4843 				memset(chk, 0, sizeof(*chk));
4844 				chk->rec.data.rcv_flags = 0;
4845 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4846 				chk->asoc = &stcb->asoc;
4847 				if (stcb->asoc.idata_supported == 0) {
4848 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4849 						chk->rec.data.mid = 0;
4850 					} else {
4851 						chk->rec.data.mid = strq->next_mid_ordered;
4852 					}
4853 				} else {
4854 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4855 						chk->rec.data.mid = strq->next_mid_unordered;
4856 					} else {
4857 						chk->rec.data.mid = strq->next_mid_ordered;
4858 					}
4859 				}
4860 				chk->rec.data.sid = sp->sid;
4861 				chk->rec.data.ppid = sp->ppid;
4862 				chk->rec.data.context = sp->context;
4863 				chk->flags = sp->act_flags;
4864 				chk->whoTo = NULL;
4865 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4866 				strq->chunks_on_queues++;
4867 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4868 				stcb->asoc.sent_queue_cnt++;
4869 				stcb->asoc.pr_sctp_cnt++;
4870 			}
4871 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4872 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4873 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4874 			}
4875 			if (stcb->asoc.idata_supported == 0) {
4876 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4877 					strq->next_mid_ordered++;
4878 				}
4879 			} else {
4880 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4881 					strq->next_mid_unordered++;
4882 				} else {
4883 					strq->next_mid_ordered++;
4884 				}
4885 			}
4886 	oh_well:
4887 			if (sp->data) {
4888 				/*
4889 				 * Pull any data to free up the SB and allow
4890 				 * sender to "add more" while we will throw
4891 				 * away :-)
4892 				 */
4893 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4894 				ret_sz += sp->length;
4895 				do_wakeup_routine = 1;
4896 				sp->some_taken = 1;
4897 				sctp_m_freem(sp->data);
4898 				sp->data = NULL;
4899 				sp->tail_mbuf = NULL;
4900 				sp->length = 0;
4901 			}
4902 		}
4903 		SCTP_TCB_SEND_UNLOCK(stcb);
4904 	}
4905 	if (do_wakeup_routine) {
4906 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4907 		struct socket *so;
4908 
4909 		so = SCTP_INP_SO(stcb->sctp_ep);
4910 		if (!so_locked) {
4911 			atomic_add_int(&stcb->asoc.refcnt, 1);
4912 			SCTP_TCB_UNLOCK(stcb);
4913 			SCTP_SOCKET_LOCK(so, 1);
4914 			SCTP_TCB_LOCK(stcb);
4915 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4916 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4917 				/* assoc was freed while we were unlocked */
4918 				SCTP_SOCKET_UNLOCK(so, 1);
4919 				return (ret_sz);
4920 			}
4921 		}
4922 #endif
4923 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4924 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4925 		if (!so_locked) {
4926 			SCTP_SOCKET_UNLOCK(so, 1);
4927 		}
4928 #endif
4929 	}
4930 	return (ret_sz);
4931 }
4932 
4933 /*
4934  * checks to see if the given address, sa, is one that is currently known by
4935  * the kernel note: can't distinguish the same address on multiple interfaces
4936  * and doesn't handle multiple addresses with different zone/scope id's note:
4937  * ifa_ifwithaddr() compares the entire sockaddr struct
4938  */
4939 struct sctp_ifa *
4940 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4941     int holds_lock)
4942 {
4943 	struct sctp_laddr *laddr;
4944 
4945 	if (holds_lock == 0) {
4946 		SCTP_INP_RLOCK(inp);
4947 	}
4948 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4949 		if (laddr->ifa == NULL)
4950 			continue;
4951 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4952 			continue;
4953 #ifdef INET
4954 		if (addr->sa_family == AF_INET) {
4955 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4956 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4957 				/* found him. */
4958 				if (holds_lock == 0) {
4959 					SCTP_INP_RUNLOCK(inp);
4960 				}
4961 				return (laddr->ifa);
4962 				break;
4963 			}
4964 		}
4965 #endif
4966 #ifdef INET6
4967 		if (addr->sa_family == AF_INET6) {
4968 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4969 			    &laddr->ifa->address.sin6)) {
4970 				/* found him. */
4971 				if (holds_lock == 0) {
4972 					SCTP_INP_RUNLOCK(inp);
4973 				}
4974 				return (laddr->ifa);
4975 				break;
4976 			}
4977 		}
4978 #endif
4979 	}
4980 	if (holds_lock == 0) {
4981 		SCTP_INP_RUNLOCK(inp);
4982 	}
4983 	return (NULL);
4984 }
4985 
4986 uint32_t
4987 sctp_get_ifa_hash_val(struct sockaddr *addr)
4988 {
4989 	switch (addr->sa_family) {
4990 #ifdef INET
4991 	case AF_INET:
4992 		{
4993 			struct sockaddr_in *sin;
4994 
4995 			sin = (struct sockaddr_in *)addr;
4996 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4997 		}
4998 #endif
4999 #ifdef INET6
5000 	case AF_INET6:
5001 		{
5002 			struct sockaddr_in6 *sin6;
5003 			uint32_t hash_of_addr;
5004 
5005 			sin6 = (struct sockaddr_in6 *)addr;
5006 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5007 			    sin6->sin6_addr.s6_addr32[1] +
5008 			    sin6->sin6_addr.s6_addr32[2] +
5009 			    sin6->sin6_addr.s6_addr32[3]);
5010 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5011 			return (hash_of_addr);
5012 		}
5013 #endif
5014 	default:
5015 		break;
5016 	}
5017 	return (0);
5018 }
5019 
5020 struct sctp_ifa *
5021 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5022 {
5023 	struct sctp_ifa *sctp_ifap;
5024 	struct sctp_vrf *vrf;
5025 	struct sctp_ifalist *hash_head;
5026 	uint32_t hash_of_addr;
5027 
5028 	if (holds_lock == 0)
5029 		SCTP_IPI_ADDR_RLOCK();
5030 
5031 	vrf = sctp_find_vrf(vrf_id);
5032 	if (vrf == NULL) {
5033 		if (holds_lock == 0)
5034 			SCTP_IPI_ADDR_RUNLOCK();
5035 		return (NULL);
5036 	}
5037 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5038 
5039 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5040 	if (hash_head == NULL) {
5041 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5042 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5043 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5044 		sctp_print_address(addr);
5045 		SCTP_PRINTF("No such bucket for address\n");
5046 		if (holds_lock == 0)
5047 			SCTP_IPI_ADDR_RUNLOCK();
5048 
5049 		return (NULL);
5050 	}
5051 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5052 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5053 			continue;
5054 #ifdef INET
5055 		if (addr->sa_family == AF_INET) {
5056 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5057 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5058 				/* found him. */
5059 				if (holds_lock == 0)
5060 					SCTP_IPI_ADDR_RUNLOCK();
5061 				return (sctp_ifap);
5062 				break;
5063 			}
5064 		}
5065 #endif
5066 #ifdef INET6
5067 		if (addr->sa_family == AF_INET6) {
5068 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5069 			    &sctp_ifap->address.sin6)) {
5070 				/* found him. */
5071 				if (holds_lock == 0)
5072 					SCTP_IPI_ADDR_RUNLOCK();
5073 				return (sctp_ifap);
5074 				break;
5075 			}
5076 		}
5077 #endif
5078 	}
5079 	if (holds_lock == 0)
5080 		SCTP_IPI_ADDR_RUNLOCK();
5081 	return (NULL);
5082 }
5083 
5084 static void
5085 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5086     uint32_t rwnd_req)
5087 {
5088 	/* User pulled some data, do we need a rwnd update? */
5089 	int r_unlocked = 0;
5090 	uint32_t dif, rwnd;
5091 	struct socket *so = NULL;
5092 
5093 	if (stcb == NULL)
5094 		return;
5095 
5096 	atomic_add_int(&stcb->asoc.refcnt, 1);
5097 
5098 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5099 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5100 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5101 		/* Pre-check If we are freeing no update */
5102 		goto no_lock;
5103 	}
5104 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5105 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5106 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5107 		goto out;
5108 	}
5109 	so = stcb->sctp_socket;
5110 	if (so == NULL) {
5111 		goto out;
5112 	}
5113 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5114 	/* Have you have freed enough to look */
5115 	*freed_so_far = 0;
5116 	/* Yep, its worth a look and the lock overhead */
5117 
5118 	/* Figure out what the rwnd would be */
5119 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5120 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5121 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5122 	} else {
5123 		dif = 0;
5124 	}
5125 	if (dif >= rwnd_req) {
5126 		if (hold_rlock) {
5127 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5128 			r_unlocked = 1;
5129 		}
5130 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5131 			/*
5132 			 * One last check before we allow the guy possibly
5133 			 * to get in. There is a race, where the guy has not
5134 			 * reached the gate. In that case
5135 			 */
5136 			goto out;
5137 		}
5138 		SCTP_TCB_LOCK(stcb);
5139 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5140 			/* No reports here */
5141 			SCTP_TCB_UNLOCK(stcb);
5142 			goto out;
5143 		}
5144 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5145 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5146 
5147 		sctp_chunk_output(stcb->sctp_ep, stcb,
5148 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5149 		/* make sure no timer is running */
5150 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5151 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5152 		SCTP_TCB_UNLOCK(stcb);
5153 	} else {
5154 		/* Update how much we have pending */
5155 		stcb->freed_by_sorcv_sincelast = dif;
5156 	}
5157 out:
5158 	if (so && r_unlocked && hold_rlock) {
5159 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5160 	}
5161 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5162 no_lock:
5163 	atomic_add_int(&stcb->asoc.refcnt, -1);
5164 	return;
5165 }
5166 
5167 int
5168 sctp_sorecvmsg(struct socket *so,
5169     struct uio *uio,
5170     struct mbuf **mp,
5171     struct sockaddr *from,
5172     int fromlen,
5173     int *msg_flags,
5174     struct sctp_sndrcvinfo *sinfo,
5175     int filling_sinfo)
5176 {
5177 	/*
5178 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5179 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5180 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5181 	 * On the way out we may send out any combination of:
5182 	 * MSG_NOTIFICATION MSG_EOR
5183 	 *
5184 	 */
5185 	struct sctp_inpcb *inp = NULL;
5186 	int my_len = 0;
5187 	int cp_len = 0, error = 0;
5188 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5189 	struct mbuf *m = NULL;
5190 	struct sctp_tcb *stcb = NULL;
5191 	int wakeup_read_socket = 0;
5192 	int freecnt_applied = 0;
5193 	int out_flags = 0, in_flags = 0;
5194 	int block_allowed = 1;
5195 	uint32_t freed_so_far = 0;
5196 	uint32_t copied_so_far = 0;
5197 	int in_eeor_mode = 0;
5198 	int no_rcv_needed = 0;
5199 	uint32_t rwnd_req = 0;
5200 	int hold_sblock = 0;
5201 	int hold_rlock = 0;
5202 	ssize_t slen = 0;
5203 	uint32_t held_length = 0;
5204 	int sockbuf_lock = 0;
5205 
5206 	if (uio == NULL) {
5207 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5208 		return (EINVAL);
5209 	}
5210 	if (msg_flags) {
5211 		in_flags = *msg_flags;
5212 		if (in_flags & MSG_PEEK)
5213 			SCTP_STAT_INCR(sctps_read_peeks);
5214 	} else {
5215 		in_flags = 0;
5216 	}
5217 	slen = uio->uio_resid;
5218 
5219 	/* Pull in and set up our int flags */
5220 	if (in_flags & MSG_OOB) {
5221 		/* Out of band's NOT supported */
5222 		return (EOPNOTSUPP);
5223 	}
5224 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5225 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5226 		return (EINVAL);
5227 	}
5228 	if ((in_flags & (MSG_DONTWAIT
5229 	    | MSG_NBIO
5230 	    )) ||
5231 	    SCTP_SO_IS_NBIO(so)) {
5232 		block_allowed = 0;
5233 	}
5234 	/* setup the endpoint */
5235 	inp = (struct sctp_inpcb *)so->so_pcb;
5236 	if (inp == NULL) {
5237 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5238 		return (EFAULT);
5239 	}
5240 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5241 	/* Must be at least a MTU's worth */
5242 	if (rwnd_req < SCTP_MIN_RWND)
5243 		rwnd_req = SCTP_MIN_RWND;
5244 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5245 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5246 		sctp_misc_ints(SCTP_SORECV_ENTER,
5247 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5248 	}
5249 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5250 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5251 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5252 	}
5253 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5254 	if (error) {
5255 		goto release_unlocked;
5256 	}
5257 	sockbuf_lock = 1;
5258 restart:
5259 
5260 
5261 restart_nosblocks:
5262 	if (hold_sblock == 0) {
5263 		SOCKBUF_LOCK(&so->so_rcv);
5264 		hold_sblock = 1;
5265 	}
5266 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5267 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5268 		goto out;
5269 	}
5270 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5271 		if (so->so_error) {
5272 			error = so->so_error;
5273 			if ((in_flags & MSG_PEEK) == 0)
5274 				so->so_error = 0;
5275 			goto out;
5276 		} else {
5277 			if (so->so_rcv.sb_cc == 0) {
5278 				/* indicate EOF */
5279 				error = 0;
5280 				goto out;
5281 			}
5282 		}
5283 	}
5284 	if (so->so_rcv.sb_cc <= held_length) {
5285 		if (so->so_error) {
5286 			error = so->so_error;
5287 			if ((in_flags & MSG_PEEK) == 0) {
5288 				so->so_error = 0;
5289 			}
5290 			goto out;
5291 		}
5292 		if ((so->so_rcv.sb_cc == 0) &&
5293 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5294 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5295 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5296 				/*
5297 				 * For active open side clear flags for
5298 				 * re-use passive open is blocked by
5299 				 * connect.
5300 				 */
5301 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5302 					/*
5303 					 * You were aborted, passive side
5304 					 * always hits here
5305 					 */
5306 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5307 					error = ECONNRESET;
5308 				}
5309 				so->so_state &= ~(SS_ISCONNECTING |
5310 				    SS_ISDISCONNECTING |
5311 				    SS_ISCONFIRMING |
5312 				    SS_ISCONNECTED);
5313 				if (error == 0) {
5314 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5315 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5316 						error = ENOTCONN;
5317 					}
5318 				}
5319 				goto out;
5320 			}
5321 		}
5322 		if (block_allowed) {
5323 			error = sbwait(&so->so_rcv);
5324 			if (error) {
5325 				goto out;
5326 			}
5327 			held_length = 0;
5328 			goto restart_nosblocks;
5329 		} else {
5330 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5331 			error = EWOULDBLOCK;
5332 			goto out;
5333 		}
5334 	}
5335 	if (hold_sblock == 1) {
5336 		SOCKBUF_UNLOCK(&so->so_rcv);
5337 		hold_sblock = 0;
5338 	}
5339 	/* we possibly have data we can read */
5340 	/* sa_ignore FREED_MEMORY */
5341 	control = TAILQ_FIRST(&inp->read_queue);
5342 	if (control == NULL) {
5343 		/*
5344 		 * This could be happening since the appender did the
5345 		 * increment but as not yet did the tailq insert onto the
5346 		 * read_queue
5347 		 */
5348 		if (hold_rlock == 0) {
5349 			SCTP_INP_READ_LOCK(inp);
5350 		}
5351 		control = TAILQ_FIRST(&inp->read_queue);
5352 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5353 #ifdef INVARIANTS
5354 			panic("Huh, its non zero and nothing on control?");
5355 #endif
5356 			so->so_rcv.sb_cc = 0;
5357 		}
5358 		SCTP_INP_READ_UNLOCK(inp);
5359 		hold_rlock = 0;
5360 		goto restart;
5361 	}
5362 	if ((control->length == 0) &&
5363 	    (control->do_not_ref_stcb)) {
5364 		/*
5365 		 * Clean up code for freeing assoc that left behind a
5366 		 * pdapi.. maybe a peer in EEOR that just closed after
5367 		 * sending and never indicated a EOR.
5368 		 */
5369 		if (hold_rlock == 0) {
5370 			hold_rlock = 1;
5371 			SCTP_INP_READ_LOCK(inp);
5372 		}
5373 		control->held_length = 0;
5374 		if (control->data) {
5375 			/* Hmm there is data here .. fix */
5376 			struct mbuf *m_tmp;
5377 			int cnt = 0;
5378 
5379 			m_tmp = control->data;
5380 			while (m_tmp) {
5381 				cnt += SCTP_BUF_LEN(m_tmp);
5382 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5383 					control->tail_mbuf = m_tmp;
5384 					control->end_added = 1;
5385 				}
5386 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5387 			}
5388 			control->length = cnt;
5389 		} else {
5390 			/* remove it */
5391 			TAILQ_REMOVE(&inp->read_queue, control, next);
5392 			/* Add back any hiddend data */
5393 			sctp_free_remote_addr(control->whoFrom);
5394 			sctp_free_a_readq(stcb, control);
5395 		}
5396 		if (hold_rlock) {
5397 			hold_rlock = 0;
5398 			SCTP_INP_READ_UNLOCK(inp);
5399 		}
5400 		goto restart;
5401 	}
5402 	if ((control->length == 0) &&
5403 	    (control->end_added == 1)) {
5404 		/*
5405 		 * Do we also need to check for (control->pdapi_aborted ==
5406 		 * 1)?
5407 		 */
5408 		if (hold_rlock == 0) {
5409 			hold_rlock = 1;
5410 			SCTP_INP_READ_LOCK(inp);
5411 		}
5412 		TAILQ_REMOVE(&inp->read_queue, control, next);
5413 		if (control->data) {
5414 #ifdef INVARIANTS
5415 			panic("control->data not null but control->length == 0");
5416 #else
5417 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5418 			sctp_m_freem(control->data);
5419 			control->data = NULL;
5420 #endif
5421 		}
5422 		if (control->aux_data) {
5423 			sctp_m_free(control->aux_data);
5424 			control->aux_data = NULL;
5425 		}
5426 #ifdef INVARIANTS
5427 		if (control->on_strm_q) {
5428 			panic("About to free ctl:%p so:%p and its in %d",
5429 			    control, so, control->on_strm_q);
5430 		}
5431 #endif
5432 		sctp_free_remote_addr(control->whoFrom);
5433 		sctp_free_a_readq(stcb, control);
5434 		if (hold_rlock) {
5435 			hold_rlock = 0;
5436 			SCTP_INP_READ_UNLOCK(inp);
5437 		}
5438 		goto restart;
5439 	}
5440 	if (control->length == 0) {
5441 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5442 		    (filling_sinfo)) {
5443 			/* find a more suitable one then this */
5444 			ctl = TAILQ_NEXT(control, next);
5445 			while (ctl) {
5446 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5447 				    (ctl->some_taken ||
5448 				    (ctl->spec_flags & M_NOTIFICATION) ||
5449 				    ((ctl->do_not_ref_stcb == 0) &&
5450 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5451 				    ) {
5452 					/*-
5453 					 * If we have a different TCB next, and there is data
5454 					 * present. If we have already taken some (pdapi), OR we can
5455 					 * ref the tcb and no delivery as started on this stream, we
5456 					 * take it. Note we allow a notification on a different
5457 					 * assoc to be delivered..
5458 					 */
5459 					control = ctl;
5460 					goto found_one;
5461 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5462 					    (ctl->length) &&
5463 					    ((ctl->some_taken) ||
5464 					    ((ctl->do_not_ref_stcb == 0) &&
5465 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5466 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5467 					/*-
5468 					 * If we have the same tcb, and there is data present, and we
5469 					 * have the strm interleave feature present. Then if we have
5470 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5471 					 * not started a delivery for this stream, we can take it.
5472 					 * Note we do NOT allow a notificaiton on the same assoc to
5473 					 * be delivered.
5474 					 */
5475 					control = ctl;
5476 					goto found_one;
5477 				}
5478 				ctl = TAILQ_NEXT(ctl, next);
5479 			}
5480 		}
5481 		/*
5482 		 * if we reach here, not suitable replacement is available
5483 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5484 		 * into the our held count, and its time to sleep again.
5485 		 */
5486 		held_length = so->so_rcv.sb_cc;
5487 		control->held_length = so->so_rcv.sb_cc;
5488 		goto restart;
5489 	}
5490 	/* Clear the held length since there is something to read */
5491 	control->held_length = 0;
5492 found_one:
5493 	/*
5494 	 * If we reach here, control has a some data for us to read off.
5495 	 * Note that stcb COULD be NULL.
5496 	 */
5497 	if (hold_rlock == 0) {
5498 		hold_rlock = 1;
5499 		SCTP_INP_READ_LOCK(inp);
5500 	}
5501 	control->some_taken++;
5502 	stcb = control->stcb;
5503 	if (stcb) {
5504 		if ((control->do_not_ref_stcb == 0) &&
5505 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5506 			if (freecnt_applied == 0)
5507 				stcb = NULL;
5508 		} else if (control->do_not_ref_stcb == 0) {
5509 			/* you can't free it on me please */
5510 			/*
5511 			 * The lock on the socket buffer protects us so the
5512 			 * free code will stop. But since we used the
5513 			 * socketbuf lock and the sender uses the tcb_lock
5514 			 * to increment, we need to use the atomic add to
5515 			 * the refcnt
5516 			 */
5517 			if (freecnt_applied) {
5518 #ifdef INVARIANTS
5519 				panic("refcnt already incremented");
5520 #else
5521 				SCTP_PRINTF("refcnt already incremented?\n");
5522 #endif
5523 			} else {
5524 				atomic_add_int(&stcb->asoc.refcnt, 1);
5525 				freecnt_applied = 1;
5526 			}
5527 			/*
5528 			 * Setup to remember how much we have not yet told
5529 			 * the peer our rwnd has opened up. Note we grab the
5530 			 * value from the tcb from last time. Note too that
5531 			 * sack sending clears this when a sack is sent,
5532 			 * which is fine. Once we hit the rwnd_req, we then
5533 			 * will go to the sctp_user_rcvd() that will not
5534 			 * lock until it KNOWs it MUST send a WUP-SACK.
5535 			 */
5536 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5537 			stcb->freed_by_sorcv_sincelast = 0;
5538 		}
5539 	}
5540 	if (stcb &&
5541 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5542 	    control->do_not_ref_stcb == 0) {
5543 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5544 	}
5545 	/* First lets get off the sinfo and sockaddr info */
5546 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5547 		sinfo->sinfo_stream = control->sinfo_stream;
5548 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5549 		sinfo->sinfo_flags = control->sinfo_flags;
5550 		sinfo->sinfo_ppid = control->sinfo_ppid;
5551 		sinfo->sinfo_context = control->sinfo_context;
5552 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5553 		sinfo->sinfo_tsn = control->sinfo_tsn;
5554 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5555 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5556 		nxt = TAILQ_NEXT(control, next);
5557 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5558 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5559 			struct sctp_extrcvinfo *s_extra;
5560 
5561 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5562 			if ((nxt) &&
5563 			    (nxt->length)) {
5564 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5565 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5566 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5567 				}
5568 				if (nxt->spec_flags & M_NOTIFICATION) {
5569 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5570 				}
5571 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5572 				s_extra->serinfo_next_length = nxt->length;
5573 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5574 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5575 				if (nxt->tail_mbuf != NULL) {
5576 					if (nxt->end_added) {
5577 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5578 					}
5579 				}
5580 			} else {
5581 				/*
5582 				 * we explicitly 0 this, since the memcpy
5583 				 * got some other things beyond the older
5584 				 * sinfo_ that is on the control's structure
5585 				 * :-D
5586 				 */
5587 				nxt = NULL;
5588 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5589 				s_extra->serinfo_next_aid = 0;
5590 				s_extra->serinfo_next_length = 0;
5591 				s_extra->serinfo_next_ppid = 0;
5592 				s_extra->serinfo_next_stream = 0;
5593 			}
5594 		}
5595 		/*
5596 		 * update off the real current cum-ack, if we have an stcb.
5597 		 */
5598 		if ((control->do_not_ref_stcb == 0) && stcb)
5599 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5600 		/*
5601 		 * mask off the high bits, we keep the actual chunk bits in
5602 		 * there.
5603 		 */
5604 		sinfo->sinfo_flags &= 0x00ff;
5605 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5606 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5607 		}
5608 	}
5609 #ifdef SCTP_ASOCLOG_OF_TSNS
5610 	{
5611 		int index, newindex;
5612 		struct sctp_pcbtsn_rlog *entry;
5613 
5614 		do {
5615 			index = inp->readlog_index;
5616 			newindex = index + 1;
5617 			if (newindex >= SCTP_READ_LOG_SIZE) {
5618 				newindex = 0;
5619 			}
5620 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5621 		entry = &inp->readlog[index];
5622 		entry->vtag = control->sinfo_assoc_id;
5623 		entry->strm = control->sinfo_stream;
5624 		entry->seq = (uint16_t)control->mid;
5625 		entry->sz = control->length;
5626 		entry->flgs = control->sinfo_flags;
5627 	}
5628 #endif
5629 	if ((fromlen > 0) && (from != NULL)) {
5630 		union sctp_sockstore store;
5631 		size_t len;
5632 
5633 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5634 #ifdef INET6
5635 		case AF_INET6:
5636 			len = sizeof(struct sockaddr_in6);
5637 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5638 			store.sin6.sin6_port = control->port_from;
5639 			break;
5640 #endif
5641 #ifdef INET
5642 		case AF_INET:
5643 #ifdef INET6
5644 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5645 				len = sizeof(struct sockaddr_in6);
5646 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5647 				    &store.sin6);
5648 				store.sin6.sin6_port = control->port_from;
5649 			} else {
5650 				len = sizeof(struct sockaddr_in);
5651 				store.sin = control->whoFrom->ro._l_addr.sin;
5652 				store.sin.sin_port = control->port_from;
5653 			}
5654 #else
5655 			len = sizeof(struct sockaddr_in);
5656 			store.sin = control->whoFrom->ro._l_addr.sin;
5657 			store.sin.sin_port = control->port_from;
5658 #endif
5659 			break;
5660 #endif
5661 		default:
5662 			len = 0;
5663 			break;
5664 		}
5665 		memcpy(from, &store, min((size_t)fromlen, len));
5666 #ifdef INET6
5667 		{
5668 			struct sockaddr_in6 lsa6, *from6;
5669 
5670 			from6 = (struct sockaddr_in6 *)from;
5671 			sctp_recover_scope_mac(from6, (&lsa6));
5672 		}
5673 #endif
5674 	}
5675 	if (hold_rlock) {
5676 		SCTP_INP_READ_UNLOCK(inp);
5677 		hold_rlock = 0;
5678 	}
5679 	if (hold_sblock) {
5680 		SOCKBUF_UNLOCK(&so->so_rcv);
5681 		hold_sblock = 0;
5682 	}
5683 	/* now copy out what data we can */
5684 	if (mp == NULL) {
5685 		/* copy out each mbuf in the chain up to length */
5686 get_more_data:
5687 		m = control->data;
5688 		while (m) {
5689 			/* Move out all we can */
5690 			cp_len = (int)uio->uio_resid;
5691 			my_len = (int)SCTP_BUF_LEN(m);
5692 			if (cp_len > my_len) {
5693 				/* not enough in this buf */
5694 				cp_len = my_len;
5695 			}
5696 			if (hold_rlock) {
5697 				SCTP_INP_READ_UNLOCK(inp);
5698 				hold_rlock = 0;
5699 			}
5700 			if (cp_len > 0)
5701 				error = uiomove(mtod(m, char *), cp_len, uio);
5702 			/* re-read */
5703 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5704 				goto release;
5705 			}
5706 			if ((control->do_not_ref_stcb == 0) && stcb &&
5707 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5708 				no_rcv_needed = 1;
5709 			}
5710 			if (error) {
5711 				/* error we are out of here */
5712 				goto release;
5713 			}
5714 			SCTP_INP_READ_LOCK(inp);
5715 			hold_rlock = 1;
5716 			if (cp_len == SCTP_BUF_LEN(m)) {
5717 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5718 				    (control->end_added)) {
5719 					out_flags |= MSG_EOR;
5720 					if ((control->do_not_ref_stcb == 0) &&
5721 					    (control->stcb != NULL) &&
5722 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5723 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5724 				}
5725 				if (control->spec_flags & M_NOTIFICATION) {
5726 					out_flags |= MSG_NOTIFICATION;
5727 				}
5728 				/* we ate up the mbuf */
5729 				if (in_flags & MSG_PEEK) {
5730 					/* just looking */
5731 					m = SCTP_BUF_NEXT(m);
5732 					copied_so_far += cp_len;
5733 				} else {
5734 					/* dispose of the mbuf */
5735 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5736 						sctp_sblog(&so->so_rcv,
5737 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5738 					}
5739 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5740 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5741 						sctp_sblog(&so->so_rcv,
5742 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5743 					}
5744 					copied_so_far += cp_len;
5745 					freed_so_far += cp_len;
5746 					freed_so_far += MSIZE;
5747 					atomic_subtract_int(&control->length, cp_len);
5748 					control->data = sctp_m_free(m);
5749 					m = control->data;
5750 					/*
5751 					 * been through it all, must hold sb
5752 					 * lock ok to null tail
5753 					 */
5754 					if (control->data == NULL) {
5755 #ifdef INVARIANTS
5756 						if ((control->end_added == 0) ||
5757 						    (TAILQ_NEXT(control, next) == NULL)) {
5758 							/*
5759 							 * If the end is not
5760 							 * added, OR the
5761 							 * next is NOT null
5762 							 * we MUST have the
5763 							 * lock.
5764 							 */
5765 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5766 								panic("Hmm we don't own the lock?");
5767 							}
5768 						}
5769 #endif
5770 						control->tail_mbuf = NULL;
5771 #ifdef INVARIANTS
5772 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5773 							panic("end_added, nothing left and no MSG_EOR");
5774 						}
5775 #endif
5776 					}
5777 				}
5778 			} else {
5779 				/* Do we need to trim the mbuf? */
5780 				if (control->spec_flags & M_NOTIFICATION) {
5781 					out_flags |= MSG_NOTIFICATION;
5782 				}
5783 				if ((in_flags & MSG_PEEK) == 0) {
5784 					SCTP_BUF_RESV_UF(m, cp_len);
5785 					SCTP_BUF_LEN(m) -= cp_len;
5786 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5787 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5788 					}
5789 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5790 					if ((control->do_not_ref_stcb == 0) &&
5791 					    stcb) {
5792 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5793 					}
5794 					copied_so_far += cp_len;
5795 					freed_so_far += cp_len;
5796 					freed_so_far += MSIZE;
5797 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5798 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5799 						    SCTP_LOG_SBRESULT, 0);
5800 					}
5801 					atomic_subtract_int(&control->length, cp_len);
5802 				} else {
5803 					copied_so_far += cp_len;
5804 				}
5805 			}
5806 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5807 				break;
5808 			}
5809 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5810 			    (control->do_not_ref_stcb == 0) &&
5811 			    (freed_so_far >= rwnd_req)) {
5812 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5813 			}
5814 		}		/* end while(m) */
5815 		/*
5816 		 * At this point we have looked at it all and we either have
5817 		 * a MSG_EOR/or read all the user wants... <OR>
5818 		 * control->length == 0.
5819 		 */
5820 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5821 			/* we are done with this control */
5822 			if (control->length == 0) {
5823 				if (control->data) {
5824 #ifdef INVARIANTS
5825 					panic("control->data not null at read eor?");
5826 #else
5827 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5828 					sctp_m_freem(control->data);
5829 					control->data = NULL;
5830 #endif
5831 				}
5832 		done_with_control:
5833 				if (hold_rlock == 0) {
5834 					SCTP_INP_READ_LOCK(inp);
5835 					hold_rlock = 1;
5836 				}
5837 				TAILQ_REMOVE(&inp->read_queue, control, next);
5838 				/* Add back any hiddend data */
5839 				if (control->held_length) {
5840 					held_length = 0;
5841 					control->held_length = 0;
5842 					wakeup_read_socket = 1;
5843 				}
5844 				if (control->aux_data) {
5845 					sctp_m_free(control->aux_data);
5846 					control->aux_data = NULL;
5847 				}
5848 				no_rcv_needed = control->do_not_ref_stcb;
5849 				sctp_free_remote_addr(control->whoFrom);
5850 				control->data = NULL;
5851 #ifdef INVARIANTS
5852 				if (control->on_strm_q) {
5853 					panic("About to free ctl:%p so:%p and its in %d",
5854 					    control, so, control->on_strm_q);
5855 				}
5856 #endif
5857 				sctp_free_a_readq(stcb, control);
5858 				control = NULL;
5859 				if ((freed_so_far >= rwnd_req) &&
5860 				    (no_rcv_needed == 0))
5861 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5862 
5863 			} else {
5864 				/*
5865 				 * The user did not read all of this
5866 				 * message, turn off the returned MSG_EOR
5867 				 * since we are leaving more behind on the
5868 				 * control to read.
5869 				 */
5870 #ifdef INVARIANTS
5871 				if (control->end_added &&
5872 				    (control->data == NULL) &&
5873 				    (control->tail_mbuf == NULL)) {
5874 					panic("Gak, control->length is corrupt?");
5875 				}
5876 #endif
5877 				no_rcv_needed = control->do_not_ref_stcb;
5878 				out_flags &= ~MSG_EOR;
5879 			}
5880 		}
5881 		if (out_flags & MSG_EOR) {
5882 			goto release;
5883 		}
5884 		if ((uio->uio_resid == 0) ||
5885 		    ((in_eeor_mode) &&
5886 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5887 			goto release;
5888 		}
5889 		/*
5890 		 * If I hit here the receiver wants more and this message is
5891 		 * NOT done (pd-api). So two questions. Can we block? if not
5892 		 * we are done. Did the user NOT set MSG_WAITALL?
5893 		 */
5894 		if (block_allowed == 0) {
5895 			goto release;
5896 		}
5897 		/*
5898 		 * We need to wait for more data a few things: - We don't
5899 		 * sbunlock() so we don't get someone else reading. - We
5900 		 * must be sure to account for the case where what is added
5901 		 * is NOT to our control when we wakeup.
5902 		 */
5903 
5904 		/*
5905 		 * Do we need to tell the transport a rwnd update might be
5906 		 * needed before we go to sleep?
5907 		 */
5908 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5909 		    ((freed_so_far >= rwnd_req) &&
5910 		    (control->do_not_ref_stcb == 0) &&
5911 		    (no_rcv_needed == 0))) {
5912 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5913 		}
5914 wait_some_more:
5915 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5916 			goto release;
5917 		}
5918 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5919 			goto release;
5920 
5921 		if (hold_rlock == 1) {
5922 			SCTP_INP_READ_UNLOCK(inp);
5923 			hold_rlock = 0;
5924 		}
5925 		if (hold_sblock == 0) {
5926 			SOCKBUF_LOCK(&so->so_rcv);
5927 			hold_sblock = 1;
5928 		}
5929 		if ((copied_so_far) && (control->length == 0) &&
5930 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5931 			goto release;
5932 		}
5933 		if (so->so_rcv.sb_cc <= control->held_length) {
5934 			error = sbwait(&so->so_rcv);
5935 			if (error) {
5936 				goto release;
5937 			}
5938 			control->held_length = 0;
5939 		}
5940 		if (hold_sblock) {
5941 			SOCKBUF_UNLOCK(&so->so_rcv);
5942 			hold_sblock = 0;
5943 		}
5944 		if (control->length == 0) {
5945 			/* still nothing here */
5946 			if (control->end_added == 1) {
5947 				/* he aborted, or is done i.e.did a shutdown */
5948 				out_flags |= MSG_EOR;
5949 				if (control->pdapi_aborted) {
5950 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5951 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5952 
5953 					out_flags |= MSG_TRUNC;
5954 				} else {
5955 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5956 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5957 				}
5958 				goto done_with_control;
5959 			}
5960 			if (so->so_rcv.sb_cc > held_length) {
5961 				control->held_length = so->so_rcv.sb_cc;
5962 				held_length = 0;
5963 			}
5964 			goto wait_some_more;
5965 		} else if (control->data == NULL) {
5966 			/*
5967 			 * we must re-sync since data is probably being
5968 			 * added
5969 			 */
5970 			SCTP_INP_READ_LOCK(inp);
5971 			if ((control->length > 0) && (control->data == NULL)) {
5972 				/*
5973 				 * big trouble.. we have the lock and its
5974 				 * corrupt?
5975 				 */
5976 #ifdef INVARIANTS
5977 				panic("Impossible data==NULL length !=0");
5978 #endif
5979 				out_flags |= MSG_EOR;
5980 				out_flags |= MSG_TRUNC;
5981 				control->length = 0;
5982 				SCTP_INP_READ_UNLOCK(inp);
5983 				goto done_with_control;
5984 			}
5985 			SCTP_INP_READ_UNLOCK(inp);
5986 			/* We will fall around to get more data */
5987 		}
5988 		goto get_more_data;
5989 	} else {
5990 		/*-
5991 		 * Give caller back the mbuf chain,
5992 		 * store in uio_resid the length
5993 		 */
5994 		wakeup_read_socket = 0;
5995 		if ((control->end_added == 0) ||
5996 		    (TAILQ_NEXT(control, next) == NULL)) {
5997 			/* Need to get rlock */
5998 			if (hold_rlock == 0) {
5999 				SCTP_INP_READ_LOCK(inp);
6000 				hold_rlock = 1;
6001 			}
6002 		}
6003 		if (control->end_added) {
6004 			out_flags |= MSG_EOR;
6005 			if ((control->do_not_ref_stcb == 0) &&
6006 			    (control->stcb != NULL) &&
6007 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6008 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6009 		}
6010 		if (control->spec_flags & M_NOTIFICATION) {
6011 			out_flags |= MSG_NOTIFICATION;
6012 		}
6013 		uio->uio_resid = control->length;
6014 		*mp = control->data;
6015 		m = control->data;
6016 		while (m) {
6017 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6018 				sctp_sblog(&so->so_rcv,
6019 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6020 			}
6021 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6022 			freed_so_far += SCTP_BUF_LEN(m);
6023 			freed_so_far += MSIZE;
6024 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6025 				sctp_sblog(&so->so_rcv,
6026 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6027 			}
6028 			m = SCTP_BUF_NEXT(m);
6029 		}
6030 		control->data = control->tail_mbuf = NULL;
6031 		control->length = 0;
6032 		if (out_flags & MSG_EOR) {
6033 			/* Done with this control */
6034 			goto done_with_control;
6035 		}
6036 	}
6037 release:
6038 	if (hold_rlock == 1) {
6039 		SCTP_INP_READ_UNLOCK(inp);
6040 		hold_rlock = 0;
6041 	}
6042 	if (hold_sblock == 1) {
6043 		SOCKBUF_UNLOCK(&so->so_rcv);
6044 		hold_sblock = 0;
6045 	}
6046 	sbunlock(&so->so_rcv);
6047 	sockbuf_lock = 0;
6048 
6049 release_unlocked:
6050 	if (hold_sblock) {
6051 		SOCKBUF_UNLOCK(&so->so_rcv);
6052 		hold_sblock = 0;
6053 	}
6054 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6055 		if ((freed_so_far >= rwnd_req) &&
6056 		    (control && (control->do_not_ref_stcb == 0)) &&
6057 		    (no_rcv_needed == 0))
6058 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6059 	}
6060 out:
6061 	if (msg_flags) {
6062 		*msg_flags = out_flags;
6063 	}
6064 	if (((out_flags & MSG_EOR) == 0) &&
6065 	    ((in_flags & MSG_PEEK) == 0) &&
6066 	    (sinfo) &&
6067 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6068 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6069 		struct sctp_extrcvinfo *s_extra;
6070 
6071 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6072 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6073 	}
6074 	if (hold_rlock == 1) {
6075 		SCTP_INP_READ_UNLOCK(inp);
6076 	}
6077 	if (hold_sblock) {
6078 		SOCKBUF_UNLOCK(&so->so_rcv);
6079 	}
6080 	if (sockbuf_lock) {
6081 		sbunlock(&so->so_rcv);
6082 	}
6083 	if (freecnt_applied) {
6084 		/*
6085 		 * The lock on the socket buffer protects us so the free
6086 		 * code will stop. But since we used the socketbuf lock and
6087 		 * the sender uses the tcb_lock to increment, we need to use
6088 		 * the atomic add to the refcnt.
6089 		 */
6090 		if (stcb == NULL) {
6091 #ifdef INVARIANTS
6092 			panic("stcb for refcnt has gone NULL?");
6093 			goto stage_left;
6094 #else
6095 			goto stage_left;
6096 #endif
6097 		}
6098 		/* Save the value back for next time */
6099 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6100 		atomic_add_int(&stcb->asoc.refcnt, -1);
6101 	}
6102 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6103 		if (stcb) {
6104 			sctp_misc_ints(SCTP_SORECV_DONE,
6105 			    freed_so_far,
6106 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6107 			    stcb->asoc.my_rwnd,
6108 			    so->so_rcv.sb_cc);
6109 		} else {
6110 			sctp_misc_ints(SCTP_SORECV_DONE,
6111 			    freed_so_far,
6112 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6113 			    0,
6114 			    so->so_rcv.sb_cc);
6115 		}
6116 	}
6117 stage_left:
6118 	if (wakeup_read_socket) {
6119 		sctp_sorwakeup(inp, so);
6120 	}
6121 	return (error);
6122 }
6123 
6124 
6125 #ifdef SCTP_MBUF_LOGGING
6126 struct mbuf *
6127 sctp_m_free(struct mbuf *m)
6128 {
6129 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6130 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6131 	}
6132 	return (m_free(m));
6133 }
6134 
6135 void
6136 sctp_m_freem(struct mbuf *mb)
6137 {
6138 	while (mb != NULL)
6139 		mb = sctp_m_free(mb);
6140 }
6141 
6142 #endif
6143 
6144 int
6145 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6146 {
6147 	/*
6148 	 * Given a local address. For all associations that holds the
6149 	 * address, request a peer-set-primary.
6150 	 */
6151 	struct sctp_ifa *ifa;
6152 	struct sctp_laddr *wi;
6153 
6154 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6155 	if (ifa == NULL) {
6156 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6157 		return (EADDRNOTAVAIL);
6158 	}
6159 	/*
6160 	 * Now that we have the ifa we must awaken the iterator with this
6161 	 * message.
6162 	 */
6163 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6164 	if (wi == NULL) {
6165 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6166 		return (ENOMEM);
6167 	}
6168 	/* Now incr the count and int wi structure */
6169 	SCTP_INCR_LADDR_COUNT();
6170 	memset(wi, 0, sizeof(*wi));
6171 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6172 	wi->ifa = ifa;
6173 	wi->action = SCTP_SET_PRIM_ADDR;
6174 	atomic_add_int(&ifa->refcount, 1);
6175 
6176 	/* Now add it to the work queue */
6177 	SCTP_WQ_ADDR_LOCK();
6178 	/*
6179 	 * Should this really be a tailq? As it is we will process the
6180 	 * newest first :-0
6181 	 */
6182 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6183 	SCTP_WQ_ADDR_UNLOCK();
6184 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6185 	    (struct sctp_inpcb *)NULL,
6186 	    (struct sctp_tcb *)NULL,
6187 	    (struct sctp_nets *)NULL);
6188 	return (0);
6189 }
6190 
6191 
6192 int
6193 sctp_soreceive(struct socket *so,
6194     struct sockaddr **psa,
6195     struct uio *uio,
6196     struct mbuf **mp0,
6197     struct mbuf **controlp,
6198     int *flagsp)
6199 {
6200 	int error, fromlen;
6201 	uint8_t sockbuf[256];
6202 	struct sockaddr *from;
6203 	struct sctp_extrcvinfo sinfo;
6204 	int filling_sinfo = 1;
6205 	struct sctp_inpcb *inp;
6206 
6207 	inp = (struct sctp_inpcb *)so->so_pcb;
6208 	/* pickup the assoc we are reading from */
6209 	if (inp == NULL) {
6210 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6211 		return (EINVAL);
6212 	}
6213 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6214 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6216 	    (controlp == NULL)) {
6217 		/* user does not want the sndrcv ctl */
6218 		filling_sinfo = 0;
6219 	}
6220 	if (psa) {
6221 		from = (struct sockaddr *)sockbuf;
6222 		fromlen = sizeof(sockbuf);
6223 		from->sa_len = 0;
6224 	} else {
6225 		from = NULL;
6226 		fromlen = 0;
6227 	}
6228 
6229 	if (filling_sinfo) {
6230 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6231 	}
6232 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6233 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6234 	if (controlp != NULL) {
6235 		/* copy back the sinfo in a CMSG format */
6236 		if (filling_sinfo)
6237 			*controlp = sctp_build_ctl_nchunk(inp,
6238 			    (struct sctp_sndrcvinfo *)&sinfo);
6239 		else
6240 			*controlp = NULL;
6241 	}
6242 	if (psa) {
6243 		/* copy back the address info */
6244 		if (from && from->sa_len) {
6245 			*psa = sodupsockaddr(from, M_NOWAIT);
6246 		} else {
6247 			*psa = NULL;
6248 		}
6249 	}
6250 	return (error);
6251 }
6252 
6253 
6254 
6255 
6256 
6257 int
6258 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6259     int totaddr, int *error)
6260 {
6261 	int added = 0;
6262 	int i;
6263 	struct sctp_inpcb *inp;
6264 	struct sockaddr *sa;
6265 	size_t incr = 0;
6266 #ifdef INET
6267 	struct sockaddr_in *sin;
6268 #endif
6269 #ifdef INET6
6270 	struct sockaddr_in6 *sin6;
6271 #endif
6272 
6273 	sa = addr;
6274 	inp = stcb->sctp_ep;
6275 	*error = 0;
6276 	for (i = 0; i < totaddr; i++) {
6277 		switch (sa->sa_family) {
6278 #ifdef INET
6279 		case AF_INET:
6280 			incr = sizeof(struct sockaddr_in);
6281 			sin = (struct sockaddr_in *)sa;
6282 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6283 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6284 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6285 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6286 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6287 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6288 				*error = EINVAL;
6289 				goto out_now;
6290 			}
6291 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6292 			    SCTP_DONOT_SETSCOPE,
6293 			    SCTP_ADDR_IS_CONFIRMED)) {
6294 				/* assoc gone no un-lock */
6295 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6296 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6297 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6298 				*error = ENOBUFS;
6299 				goto out_now;
6300 			}
6301 			added++;
6302 			break;
6303 #endif
6304 #ifdef INET6
6305 		case AF_INET6:
6306 			incr = sizeof(struct sockaddr_in6);
6307 			sin6 = (struct sockaddr_in6 *)sa;
6308 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6309 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6310 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6311 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6312 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6313 				*error = EINVAL;
6314 				goto out_now;
6315 			}
6316 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6317 			    SCTP_DONOT_SETSCOPE,
6318 			    SCTP_ADDR_IS_CONFIRMED)) {
6319 				/* assoc gone no un-lock */
6320 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6321 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6322 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6323 				*error = ENOBUFS;
6324 				goto out_now;
6325 			}
6326 			added++;
6327 			break;
6328 #endif
6329 		default:
6330 			break;
6331 		}
6332 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6333 	}
6334 out_now:
6335 	return (added);
6336 }
6337 
6338 struct sctp_tcb *
6339 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6340     unsigned int *totaddr,
6341     unsigned int *num_v4, unsigned int *num_v6, int *error,
6342     unsigned int limit, int *bad_addr)
6343 {
6344 	struct sockaddr *sa;
6345 	struct sctp_tcb *stcb = NULL;
6346 	unsigned int incr, at, i;
6347 
6348 	at = 0;
6349 	sa = addr;
6350 	*error = *num_v6 = *num_v4 = 0;
6351 	/* account and validate addresses */
6352 	for (i = 0; i < *totaddr; i++) {
6353 		switch (sa->sa_family) {
6354 #ifdef INET
6355 		case AF_INET:
6356 			incr = (unsigned int)sizeof(struct sockaddr_in);
6357 			if (sa->sa_len != incr) {
6358 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6359 				*error = EINVAL;
6360 				*bad_addr = 1;
6361 				return (NULL);
6362 			}
6363 			(*num_v4) += 1;
6364 			break;
6365 #endif
6366 #ifdef INET6
6367 		case AF_INET6:
6368 			{
6369 				struct sockaddr_in6 *sin6;
6370 
6371 				sin6 = (struct sockaddr_in6 *)sa;
6372 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6373 					/* Must be non-mapped for connectx */
6374 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6375 					*error = EINVAL;
6376 					*bad_addr = 1;
6377 					return (NULL);
6378 				}
6379 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6380 				if (sa->sa_len != incr) {
6381 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6382 					*error = EINVAL;
6383 					*bad_addr = 1;
6384 					return (NULL);
6385 				}
6386 				(*num_v6) += 1;
6387 				break;
6388 			}
6389 #endif
6390 		default:
6391 			*totaddr = i;
6392 			incr = 0;
6393 			/* we are done */
6394 			break;
6395 		}
6396 		if (i == *totaddr) {
6397 			break;
6398 		}
6399 		SCTP_INP_INCR_REF(inp);
6400 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6401 		if (stcb != NULL) {
6402 			/* Already have or am bring up an association */
6403 			return (stcb);
6404 		} else {
6405 			SCTP_INP_DECR_REF(inp);
6406 		}
6407 		if ((at + incr) > limit) {
6408 			*totaddr = i;
6409 			break;
6410 		}
6411 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6412 	}
6413 	return ((struct sctp_tcb *)NULL);
6414 }
6415 
6416 /*
6417  * sctp_bindx(ADD) for one address.
6418  * assumes all arguments are valid/checked by caller.
6419  */
6420 void
6421 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6422     struct sockaddr *sa, sctp_assoc_t assoc_id,
6423     uint32_t vrf_id, int *error, void *p)
6424 {
6425 	struct sockaddr *addr_touse;
6426 #if defined(INET) && defined(INET6)
6427 	struct sockaddr_in sin;
6428 #endif
6429 
6430 	/* see if we're bound all already! */
6431 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6432 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6433 		*error = EINVAL;
6434 		return;
6435 	}
6436 	addr_touse = sa;
6437 #ifdef INET6
6438 	if (sa->sa_family == AF_INET6) {
6439 #ifdef INET
6440 		struct sockaddr_in6 *sin6;
6441 
6442 #endif
6443 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6444 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6445 			*error = EINVAL;
6446 			return;
6447 		}
6448 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6449 			/* can only bind v6 on PF_INET6 sockets */
6450 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451 			*error = EINVAL;
6452 			return;
6453 		}
6454 #ifdef INET
6455 		sin6 = (struct sockaddr_in6 *)addr_touse;
6456 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6457 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6458 			    SCTP_IPV6_V6ONLY(inp)) {
6459 				/* can't bind v4-mapped on PF_INET sockets */
6460 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6461 				*error = EINVAL;
6462 				return;
6463 			}
6464 			in6_sin6_2_sin(&sin, sin6);
6465 			addr_touse = (struct sockaddr *)&sin;
6466 		}
6467 #endif
6468 	}
6469 #endif
6470 #ifdef INET
6471 	if (sa->sa_family == AF_INET) {
6472 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6473 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6474 			*error = EINVAL;
6475 			return;
6476 		}
6477 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6478 		    SCTP_IPV6_V6ONLY(inp)) {
6479 			/* can't bind v4 on PF_INET sockets */
6480 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6481 			*error = EINVAL;
6482 			return;
6483 		}
6484 	}
6485 #endif
6486 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6487 		if (p == NULL) {
6488 			/* Can't get proc for Net/Open BSD */
6489 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6490 			*error = EINVAL;
6491 			return;
6492 		}
6493 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6494 		return;
6495 	}
6496 	/*
6497 	 * No locks required here since bind and mgmt_ep_sa all do their own
6498 	 * locking. If we do something for the FIX: below we may need to
6499 	 * lock in that case.
6500 	 */
6501 	if (assoc_id == 0) {
6502 		/* add the address */
6503 		struct sctp_inpcb *lep;
6504 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6505 
6506 		/* validate the incoming port */
6507 		if ((lsin->sin_port != 0) &&
6508 		    (lsin->sin_port != inp->sctp_lport)) {
6509 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6510 			*error = EINVAL;
6511 			return;
6512 		} else {
6513 			/* user specified 0 port, set it to existing port */
6514 			lsin->sin_port = inp->sctp_lport;
6515 		}
6516 
6517 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6518 		if (lep != NULL) {
6519 			/*
6520 			 * We must decrement the refcount since we have the
6521 			 * ep already and are binding. No remove going on
6522 			 * here.
6523 			 */
6524 			SCTP_INP_DECR_REF(lep);
6525 		}
6526 		if (lep == inp) {
6527 			/* already bound to it.. ok */
6528 			return;
6529 		} else if (lep == NULL) {
6530 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6531 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6532 			    SCTP_ADD_IP_ADDRESS,
6533 			    vrf_id, NULL);
6534 		} else {
6535 			*error = EADDRINUSE;
6536 		}
6537 		if (*error)
6538 			return;
6539 	} else {
6540 		/*
6541 		 * FIX: decide whether we allow assoc based bindx
6542 		 */
6543 	}
6544 }
6545 
6546 /*
6547  * sctp_bindx(DELETE) for one address.
6548  * assumes all arguments are valid/checked by caller.
6549  */
6550 void
6551 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6552     struct sockaddr *sa, sctp_assoc_t assoc_id,
6553     uint32_t vrf_id, int *error)
6554 {
6555 	struct sockaddr *addr_touse;
6556 #if defined(INET) && defined(INET6)
6557 	struct sockaddr_in sin;
6558 #endif
6559 
6560 	/* see if we're bound all already! */
6561 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6562 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563 		*error = EINVAL;
6564 		return;
6565 	}
6566 	addr_touse = sa;
6567 #ifdef INET6
6568 	if (sa->sa_family == AF_INET6) {
6569 #ifdef INET
6570 		struct sockaddr_in6 *sin6;
6571 #endif
6572 
6573 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6574 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6575 			*error = EINVAL;
6576 			return;
6577 		}
6578 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6579 			/* can only bind v6 on PF_INET6 sockets */
6580 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6581 			*error = EINVAL;
6582 			return;
6583 		}
6584 #ifdef INET
6585 		sin6 = (struct sockaddr_in6 *)addr_touse;
6586 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6587 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6588 			    SCTP_IPV6_V6ONLY(inp)) {
6589 				/* can't bind mapped-v4 on PF_INET sockets */
6590 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6591 				*error = EINVAL;
6592 				return;
6593 			}
6594 			in6_sin6_2_sin(&sin, sin6);
6595 			addr_touse = (struct sockaddr *)&sin;
6596 		}
6597 #endif
6598 	}
6599 #endif
6600 #ifdef INET
6601 	if (sa->sa_family == AF_INET) {
6602 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6603 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6604 			*error = EINVAL;
6605 			return;
6606 		}
6607 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6608 		    SCTP_IPV6_V6ONLY(inp)) {
6609 			/* can't bind v4 on PF_INET sockets */
6610 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6611 			*error = EINVAL;
6612 			return;
6613 		}
6614 	}
6615 #endif
6616 	/*
6617 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6618 	 * below is ever changed we may need to lock before calling
6619 	 * association level binding.
6620 	 */
6621 	if (assoc_id == 0) {
6622 		/* delete the address */
6623 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6624 		    SCTP_DEL_IP_ADDRESS,
6625 		    vrf_id, NULL);
6626 	} else {
6627 		/*
6628 		 * FIX: decide whether we allow assoc based bindx
6629 		 */
6630 	}
6631 }
6632 
6633 /*
6634  * returns the valid local address count for an assoc, taking into account
6635  * all scoping rules
6636  */
6637 int
6638 sctp_local_addr_count(struct sctp_tcb *stcb)
6639 {
6640 	int loopback_scope;
6641 #if defined(INET)
6642 	int ipv4_local_scope, ipv4_addr_legal;
6643 #endif
6644 #if defined (INET6)
6645 	int local_scope, site_scope, ipv6_addr_legal;
6646 #endif
6647 	struct sctp_vrf *vrf;
6648 	struct sctp_ifn *sctp_ifn;
6649 	struct sctp_ifa *sctp_ifa;
6650 	int count = 0;
6651 
6652 	/* Turn on all the appropriate scopes */
6653 	loopback_scope = stcb->asoc.scope.loopback_scope;
6654 #if defined(INET)
6655 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6656 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6657 #endif
6658 #if defined(INET6)
6659 	local_scope = stcb->asoc.scope.local_scope;
6660 	site_scope = stcb->asoc.scope.site_scope;
6661 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6662 #endif
6663 	SCTP_IPI_ADDR_RLOCK();
6664 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6665 	if (vrf == NULL) {
6666 		/* no vrf, no addresses */
6667 		SCTP_IPI_ADDR_RUNLOCK();
6668 		return (0);
6669 	}
6670 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6671 		/*
6672 		 * bound all case: go through all ifns on the vrf
6673 		 */
6674 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6675 			if ((loopback_scope == 0) &&
6676 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6677 				continue;
6678 			}
6679 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6680 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6681 					continue;
6682 				switch (sctp_ifa->address.sa.sa_family) {
6683 #ifdef INET
6684 				case AF_INET:
6685 					if (ipv4_addr_legal) {
6686 						struct sockaddr_in *sin;
6687 
6688 						sin = &sctp_ifa->address.sin;
6689 						if (sin->sin_addr.s_addr == 0) {
6690 							/*
6691 							 * skip unspecified
6692 							 * addrs
6693 							 */
6694 							continue;
6695 						}
6696 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6697 						    &sin->sin_addr) != 0) {
6698 							continue;
6699 						}
6700 						if ((ipv4_local_scope == 0) &&
6701 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6702 							continue;
6703 						}
6704 						/* count this one */
6705 						count++;
6706 					} else {
6707 						continue;
6708 					}
6709 					break;
6710 #endif
6711 #ifdef INET6
6712 				case AF_INET6:
6713 					if (ipv6_addr_legal) {
6714 						struct sockaddr_in6 *sin6;
6715 
6716 						sin6 = &sctp_ifa->address.sin6;
6717 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6718 							continue;
6719 						}
6720 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6721 						    &sin6->sin6_addr) != 0) {
6722 							continue;
6723 						}
6724 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6725 							if (local_scope == 0)
6726 								continue;
6727 							if (sin6->sin6_scope_id == 0) {
6728 								if (sa6_recoverscope(sin6) != 0)
6729 									/*
6730 									 *
6731 									 * bad
6732 									 * link
6733 									 *
6734 									 * local
6735 									 *
6736 									 * address
6737 									 */
6738 									continue;
6739 							}
6740 						}
6741 						if ((site_scope == 0) &&
6742 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6743 							continue;
6744 						}
6745 						/* count this one */
6746 						count++;
6747 					}
6748 					break;
6749 #endif
6750 				default:
6751 					/* TSNH */
6752 					break;
6753 				}
6754 			}
6755 		}
6756 	} else {
6757 		/*
6758 		 * subset bound case
6759 		 */
6760 		struct sctp_laddr *laddr;
6761 
6762 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6763 		    sctp_nxt_addr) {
6764 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6765 				continue;
6766 			}
6767 			/* count this one */
6768 			count++;
6769 		}
6770 	}
6771 	SCTP_IPI_ADDR_RUNLOCK();
6772 	return (count);
6773 }
6774 
6775 #if defined(SCTP_LOCAL_TRACE_BUF)
6776 
6777 void
6778 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6779 {
6780 	uint32_t saveindex, newindex;
6781 
6782 	do {
6783 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6784 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6785 			newindex = 1;
6786 		} else {
6787 			newindex = saveindex + 1;
6788 		}
6789 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6790 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6791 		saveindex = 0;
6792 	}
6793 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6794 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6795 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6796 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6797 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6798 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6799 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6800 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6801 }
6802 
6803 #endif
6804 static void
6805 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6806     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6807 {
6808 	struct ip *iph;
6809 #ifdef INET6
6810 	struct ip6_hdr *ip6;
6811 #endif
6812 	struct mbuf *sp, *last;
6813 	struct udphdr *uhdr;
6814 	uint16_t port;
6815 
6816 	if ((m->m_flags & M_PKTHDR) == 0) {
6817 		/* Can't handle one that is not a pkt hdr */
6818 		goto out;
6819 	}
6820 	/* Pull the src port */
6821 	iph = mtod(m, struct ip *);
6822 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6823 	port = uhdr->uh_sport;
6824 	/*
6825 	 * Split out the mbuf chain. Leave the IP header in m, place the
6826 	 * rest in the sp.
6827 	 */
6828 	sp = m_split(m, off, M_NOWAIT);
6829 	if (sp == NULL) {
6830 		/* Gak, drop packet, we can't do a split */
6831 		goto out;
6832 	}
6833 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6834 		/* Gak, packet can't have an SCTP header in it - too small */
6835 		m_freem(sp);
6836 		goto out;
6837 	}
6838 	/* Now pull up the UDP header and SCTP header together */
6839 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6840 	if (sp == NULL) {
6841 		/* Gak pullup failed */
6842 		goto out;
6843 	}
6844 	/* Trim out the UDP header */
6845 	m_adj(sp, sizeof(struct udphdr));
6846 
6847 	/* Now reconstruct the mbuf chain */
6848 	for (last = m; last->m_next; last = last->m_next);
6849 	last->m_next = sp;
6850 	m->m_pkthdr.len += sp->m_pkthdr.len;
6851 	/*
6852 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6853 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6854 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6855 	 * SCTP checksum. Therefore, clear the bit.
6856 	 */
6857 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6858 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6859 	    m->m_pkthdr.len,
6860 	    if_name(m->m_pkthdr.rcvif),
6861 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6862 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6863 	iph = mtod(m, struct ip *);
6864 	switch (iph->ip_v) {
6865 #ifdef INET
6866 	case IPVERSION:
6867 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6868 		sctp_input_with_port(m, off, port);
6869 		break;
6870 #endif
6871 #ifdef INET6
6872 	case IPV6_VERSION >> 4:
6873 		ip6 = mtod(m, struct ip6_hdr *);
6874 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6875 		sctp6_input_with_port(&m, &off, port);
6876 		break;
6877 #endif
6878 	default:
6879 		goto out;
6880 		break;
6881 	}
6882 	return;
6883 out:
6884 	m_freem(m);
6885 }
6886 
6887 #ifdef INET
6888 static void
6889 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6890 {
6891 	struct ip *outer_ip, *inner_ip;
6892 	struct sctphdr *sh;
6893 	struct icmp *icmp;
6894 	struct udphdr *udp;
6895 	struct sctp_inpcb *inp;
6896 	struct sctp_tcb *stcb;
6897 	struct sctp_nets *net;
6898 	struct sctp_init_chunk *ch;
6899 	struct sockaddr_in src, dst;
6900 	uint8_t type, code;
6901 
6902 	inner_ip = (struct ip *)vip;
6903 	icmp = (struct icmp *)((caddr_t)inner_ip -
6904 	    (sizeof(struct icmp) - sizeof(struct ip)));
6905 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6906 	if (ntohs(outer_ip->ip_len) <
6907 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6908 		return;
6909 	}
6910 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6911 	sh = (struct sctphdr *)(udp + 1);
6912 	memset(&src, 0, sizeof(struct sockaddr_in));
6913 	src.sin_family = AF_INET;
6914 	src.sin_len = sizeof(struct sockaddr_in);
6915 	src.sin_port = sh->src_port;
6916 	src.sin_addr = inner_ip->ip_src;
6917 	memset(&dst, 0, sizeof(struct sockaddr_in));
6918 	dst.sin_family = AF_INET;
6919 	dst.sin_len = sizeof(struct sockaddr_in);
6920 	dst.sin_port = sh->dest_port;
6921 	dst.sin_addr = inner_ip->ip_dst;
6922 	/*
6923 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6924 	 * holds our local endpoint address. Thus we reverse the dst and the
6925 	 * src in the lookup.
6926 	 */
6927 	inp = NULL;
6928 	net = NULL;
6929 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6930 	    (struct sockaddr *)&src,
6931 	    &inp, &net, 1,
6932 	    SCTP_DEFAULT_VRFID);
6933 	if ((stcb != NULL) &&
6934 	    (net != NULL) &&
6935 	    (inp != NULL)) {
6936 		/* Check the UDP port numbers */
6937 		if ((udp->uh_dport != net->port) ||
6938 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6939 			SCTP_TCB_UNLOCK(stcb);
6940 			return;
6941 		}
6942 		/* Check the verification tag */
6943 		if (ntohl(sh->v_tag) != 0) {
6944 			/*
6945 			 * This must be the verification tag used for
6946 			 * sending out packets. We don't consider packets
6947 			 * reflecting the verification tag.
6948 			 */
6949 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6950 				SCTP_TCB_UNLOCK(stcb);
6951 				return;
6952 			}
6953 		} else {
6954 			if (ntohs(outer_ip->ip_len) >=
6955 			    sizeof(struct ip) +
6956 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6957 				/*
6958 				 * In this case we can check if we got an
6959 				 * INIT chunk and if the initiate tag
6960 				 * matches.
6961 				 */
6962 				ch = (struct sctp_init_chunk *)(sh + 1);
6963 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6964 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6965 					SCTP_TCB_UNLOCK(stcb);
6966 					return;
6967 				}
6968 			} else {
6969 				SCTP_TCB_UNLOCK(stcb);
6970 				return;
6971 			}
6972 		}
6973 		type = icmp->icmp_type;
6974 		code = icmp->icmp_code;
6975 		if ((type == ICMP_UNREACH) &&
6976 		    (code == ICMP_UNREACH_PORT)) {
6977 			code = ICMP_UNREACH_PROTOCOL;
6978 		}
6979 		sctp_notify(inp, stcb, net, type, code,
6980 		    ntohs(inner_ip->ip_len),
6981 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6982 	} else {
6983 		if ((stcb == NULL) && (inp != NULL)) {
6984 			/* reduce ref-count */
6985 			SCTP_INP_WLOCK(inp);
6986 			SCTP_INP_DECR_REF(inp);
6987 			SCTP_INP_WUNLOCK(inp);
6988 		}
6989 		if (stcb) {
6990 			SCTP_TCB_UNLOCK(stcb);
6991 		}
6992 	}
6993 	return;
6994 }
6995 #endif
6996 
6997 #ifdef INET6
6998 static void
6999 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7000 {
7001 	struct ip6ctlparam *ip6cp;
7002 	struct sctp_inpcb *inp;
7003 	struct sctp_tcb *stcb;
7004 	struct sctp_nets *net;
7005 	struct sctphdr sh;
7006 	struct udphdr udp;
7007 	struct sockaddr_in6 src, dst;
7008 	uint8_t type, code;
7009 
7010 	ip6cp = (struct ip6ctlparam *)d;
7011 	/*
7012 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7013 	 */
7014 	if (ip6cp->ip6c_m == NULL) {
7015 		return;
7016 	}
7017 	/*
7018 	 * Check if we can safely examine the ports and the verification tag
7019 	 * of the SCTP common header.
7020 	 */
7021 	if (ip6cp->ip6c_m->m_pkthdr.len <
7022 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7023 		return;
7024 	}
7025 	/* Copy out the UDP header. */
7026 	memset(&udp, 0, sizeof(struct udphdr));
7027 	m_copydata(ip6cp->ip6c_m,
7028 	    ip6cp->ip6c_off,
7029 	    sizeof(struct udphdr),
7030 	    (caddr_t)&udp);
7031 	/* Copy out the port numbers and the verification tag. */
7032 	memset(&sh, 0, sizeof(struct sctphdr));
7033 	m_copydata(ip6cp->ip6c_m,
7034 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7035 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7036 	    (caddr_t)&sh);
7037 	memset(&src, 0, sizeof(struct sockaddr_in6));
7038 	src.sin6_family = AF_INET6;
7039 	src.sin6_len = sizeof(struct sockaddr_in6);
7040 	src.sin6_port = sh.src_port;
7041 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7042 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7043 		return;
7044 	}
7045 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7046 	dst.sin6_family = AF_INET6;
7047 	dst.sin6_len = sizeof(struct sockaddr_in6);
7048 	dst.sin6_port = sh.dest_port;
7049 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7050 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7051 		return;
7052 	}
7053 	inp = NULL;
7054 	net = NULL;
7055 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7056 	    (struct sockaddr *)&src,
7057 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7058 	if ((stcb != NULL) &&
7059 	    (net != NULL) &&
7060 	    (inp != NULL)) {
7061 		/* Check the UDP port numbers */
7062 		if ((udp.uh_dport != net->port) ||
7063 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7064 			SCTP_TCB_UNLOCK(stcb);
7065 			return;
7066 		}
7067 		/* Check the verification tag */
7068 		if (ntohl(sh.v_tag) != 0) {
7069 			/*
7070 			 * This must be the verification tag used for
7071 			 * sending out packets. We don't consider packets
7072 			 * reflecting the verification tag.
7073 			 */
7074 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7075 				SCTP_TCB_UNLOCK(stcb);
7076 				return;
7077 			}
7078 		} else {
7079 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7080 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7081 			    sizeof(struct sctphdr) +
7082 			    sizeof(struct sctp_chunkhdr) +
7083 			    offsetof(struct sctp_init, a_rwnd)) {
7084 				/*
7085 				 * In this case we can check if we got an
7086 				 * INIT chunk and if the initiate tag
7087 				 * matches.
7088 				 */
7089 				uint32_t initiate_tag;
7090 				uint8_t chunk_type;
7091 
7092 				m_copydata(ip6cp->ip6c_m,
7093 				    ip6cp->ip6c_off +
7094 				    sizeof(struct udphdr) +
7095 				    sizeof(struct sctphdr),
7096 				    sizeof(uint8_t),
7097 				    (caddr_t)&chunk_type);
7098 				m_copydata(ip6cp->ip6c_m,
7099 				    ip6cp->ip6c_off +
7100 				    sizeof(struct udphdr) +
7101 				    sizeof(struct sctphdr) +
7102 				    sizeof(struct sctp_chunkhdr),
7103 				    sizeof(uint32_t),
7104 				    (caddr_t)&initiate_tag);
7105 				if ((chunk_type != SCTP_INITIATION) ||
7106 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7107 					SCTP_TCB_UNLOCK(stcb);
7108 					return;
7109 				}
7110 			} else {
7111 				SCTP_TCB_UNLOCK(stcb);
7112 				return;
7113 			}
7114 		}
7115 		type = ip6cp->ip6c_icmp6->icmp6_type;
7116 		code = ip6cp->ip6c_icmp6->icmp6_code;
7117 		if ((type == ICMP6_DST_UNREACH) &&
7118 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7119 			type = ICMP6_PARAM_PROB;
7120 			code = ICMP6_PARAMPROB_NEXTHEADER;
7121 		}
7122 		sctp6_notify(inp, stcb, net, type, code,
7123 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7124 	} else {
7125 		if ((stcb == NULL) && (inp != NULL)) {
7126 			/* reduce inp's ref-count */
7127 			SCTP_INP_WLOCK(inp);
7128 			SCTP_INP_DECR_REF(inp);
7129 			SCTP_INP_WUNLOCK(inp);
7130 		}
7131 		if (stcb) {
7132 			SCTP_TCB_UNLOCK(stcb);
7133 		}
7134 	}
7135 }
7136 #endif
7137 
7138 void
7139 sctp_over_udp_stop(void)
7140 {
7141 	/*
7142 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7143 	 * for writting!
7144 	 */
7145 #ifdef INET
7146 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7147 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7148 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7149 	}
7150 #endif
7151 #ifdef INET6
7152 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7153 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7154 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7155 	}
7156 #endif
7157 }
7158 
7159 int
7160 sctp_over_udp_start(void)
7161 {
7162 	uint16_t port;
7163 	int ret;
7164 #ifdef INET
7165 	struct sockaddr_in sin;
7166 #endif
7167 #ifdef INET6
7168 	struct sockaddr_in6 sin6;
7169 #endif
7170 	/*
7171 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7172 	 * for writting!
7173 	 */
7174 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7175 	if (ntohs(port) == 0) {
7176 		/* Must have a port set */
7177 		return (EINVAL);
7178 	}
7179 #ifdef INET
7180 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7181 		/* Already running -- must stop first */
7182 		return (EALREADY);
7183 	}
7184 #endif
7185 #ifdef INET6
7186 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7187 		/* Already running -- must stop first */
7188 		return (EALREADY);
7189 	}
7190 #endif
7191 #ifdef INET
7192 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7193 	    SOCK_DGRAM, IPPROTO_UDP,
7194 	    curthread->td_ucred, curthread))) {
7195 		sctp_over_udp_stop();
7196 		return (ret);
7197 	}
7198 	/* Call the special UDP hook. */
7199 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7200 	    sctp_recv_udp_tunneled_packet,
7201 	    sctp_recv_icmp_tunneled_packet,
7202 	    NULL))) {
7203 		sctp_over_udp_stop();
7204 		return (ret);
7205 	}
7206 	/* Ok, we have a socket, bind it to the port. */
7207 	memset(&sin, 0, sizeof(struct sockaddr_in));
7208 	sin.sin_len = sizeof(struct sockaddr_in);
7209 	sin.sin_family = AF_INET;
7210 	sin.sin_port = htons(port);
7211 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7212 	    (struct sockaddr *)&sin, curthread))) {
7213 		sctp_over_udp_stop();
7214 		return (ret);
7215 	}
7216 #endif
7217 #ifdef INET6
7218 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7219 	    SOCK_DGRAM, IPPROTO_UDP,
7220 	    curthread->td_ucred, curthread))) {
7221 		sctp_over_udp_stop();
7222 		return (ret);
7223 	}
7224 	/* Call the special UDP hook. */
7225 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7226 	    sctp_recv_udp_tunneled_packet,
7227 	    sctp_recv_icmp6_tunneled_packet,
7228 	    NULL))) {
7229 		sctp_over_udp_stop();
7230 		return (ret);
7231 	}
7232 	/* Ok, we have a socket, bind it to the port. */
7233 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7234 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7235 	sin6.sin6_family = AF_INET6;
7236 	sin6.sin6_port = htons(port);
7237 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7238 	    (struct sockaddr *)&sin6, curthread))) {
7239 		sctp_over_udp_stop();
7240 		return (ret);
7241 	}
7242 #endif
7243 	return (0);
7244 }
7245 
7246 /*
7247  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7248  * If all arguments are zero, zero is returned.
7249  */
7250 uint32_t
7251 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7252 {
7253 	if (mtu1 > 0) {
7254 		if (mtu2 > 0) {
7255 			if (mtu3 > 0) {
7256 				return (min(mtu1, min(mtu2, mtu3)));
7257 			} else {
7258 				return (min(mtu1, mtu2));
7259 			}
7260 		} else {
7261 			if (mtu3 > 0) {
7262 				return (min(mtu1, mtu3));
7263 			} else {
7264 				return (mtu1);
7265 			}
7266 		}
7267 	} else {
7268 		if (mtu2 > 0) {
7269 			if (mtu3 > 0) {
7270 				return (min(mtu2, mtu3));
7271 			} else {
7272 				return (mtu2);
7273 			}
7274 		} else {
7275 			return (mtu3);
7276 		}
7277 	}
7278 }
7279 
7280 void
7281 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7282 {
7283 	struct in_conninfo inc;
7284 
7285 	memset(&inc, 0, sizeof(struct in_conninfo));
7286 	inc.inc_fibnum = fibnum;
7287 	switch (addr->sa.sa_family) {
7288 #ifdef INET
7289 	case AF_INET:
7290 		inc.inc_faddr = addr->sin.sin_addr;
7291 		break;
7292 #endif
7293 #ifdef INET6
7294 	case AF_INET6:
7295 		inc.inc_flags |= INC_ISIPV6;
7296 		inc.inc6_faddr = addr->sin6.sin6_addr;
7297 		break;
7298 #endif
7299 	default:
7300 		return;
7301 	}
7302 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7303 }
7304 
7305 uint32_t
7306 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7307 {
7308 	struct in_conninfo inc;
7309 
7310 	memset(&inc, 0, sizeof(struct in_conninfo));
7311 	inc.inc_fibnum = fibnum;
7312 	switch (addr->sa.sa_family) {
7313 #ifdef INET
7314 	case AF_INET:
7315 		inc.inc_faddr = addr->sin.sin_addr;
7316 		break;
7317 #endif
7318 #ifdef INET6
7319 	case AF_INET6:
7320 		inc.inc_flags |= INC_ISIPV6;
7321 		inc.inc6_faddr = addr->sin6.sin6_addr;
7322 		break;
7323 #endif
7324 	default:
7325 		return (0);
7326 	}
7327 	return ((uint32_t)tcp_hc_getmtu(&inc));
7328 }
7329