xref: /freebsd/sys/netinet/sctputil.c (revision 7f9dff23d3092aa33ad45b2b63e52469b3c13a6e)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 #ifdef INET6
56 #include <netinet/icmp6.h>
57 #endif
58 
59 
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 
64 extern const struct sctp_cc_functions sctp_cc_functions[];
65 extern const struct sctp_ss_functions sctp_ss_functions[];
66 
67 void
68 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->rtt / 1000;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 }
128 
129 void
130 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
131 {
132 	struct sctp_cwnd_log sctp_clog;
133 
134 	sctp_clog.x.strlog.stcb = stcb;
135 	sctp_clog.x.strlog.n_tsn = tsn;
136 	sctp_clog.x.strlog.n_sseq = sseq;
137 	sctp_clog.x.strlog.e_tsn = 0;
138 	sctp_clog.x.strlog.e_sseq = 0;
139 	sctp_clog.x.strlog.strm = stream;
140 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
141 	    SCTP_LOG_EVENT_STRM,
142 	    from,
143 	    sctp_clog.x.misc.log1,
144 	    sctp_clog.x.misc.log2,
145 	    sctp_clog.x.misc.log3,
146 	    sctp_clog.x.misc.log4);
147 }
148 
149 void
150 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
151 {
152 	struct sctp_cwnd_log sctp_clog;
153 
154 	sctp_clog.x.nagle.stcb = (void *)stcb;
155 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
156 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
157 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
158 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
159 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
160 	    SCTP_LOG_EVENT_NAGLE,
161 	    action,
162 	    sctp_clog.x.misc.log1,
163 	    sctp_clog.x.misc.log2,
164 	    sctp_clog.x.misc.log3,
165 	    sctp_clog.x.misc.log4);
166 }
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 }
222 
223 #ifdef SCTP_MBUF_LOGGING
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 void
250 sctp_log_mbc(struct mbuf *m, int from)
251 {
252 	struct mbuf *mat;
253 
254 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
255 		sctp_log_mb(mat, from);
256 	}
257 }
258 #endif
259 
260 void
261 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
262 {
263 	struct sctp_cwnd_log sctp_clog;
264 
265 	if (control == NULL) {
266 		SCTP_PRINTF("Gak log of NULL?\n");
267 		return;
268 	}
269 	sctp_clog.x.strlog.stcb = control->stcb;
270 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
271 	sctp_clog.x.strlog.n_sseq = (uint16_t) control->mid;
272 	sctp_clog.x.strlog.strm = control->sinfo_stream;
273 	if (poschk != NULL) {
274 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
275 		sctp_clog.x.strlog.e_sseq = (uint16_t) poschk->mid;
276 	} else {
277 		sctp_clog.x.strlog.e_tsn = 0;
278 		sctp_clog.x.strlog.e_sseq = 0;
279 	}
280 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
281 	    SCTP_LOG_EVENT_STRM,
282 	    from,
283 	    sctp_clog.x.misc.log1,
284 	    sctp_clog.x.misc.log2,
285 	    sctp_clog.x.misc.log3,
286 	    sctp_clog.x.misc.log4);
287 }
288 
289 void
290 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
291 {
292 	struct sctp_cwnd_log sctp_clog;
293 
294 	sctp_clog.x.cwnd.net = net;
295 	if (stcb->asoc.send_queue_cnt > 255)
296 		sctp_clog.x.cwnd.cnt_in_send = 255;
297 	else
298 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
299 	if (stcb->asoc.stream_queue_cnt > 255)
300 		sctp_clog.x.cwnd.cnt_in_str = 255;
301 	else
302 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
303 
304 	if (net) {
305 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
306 		sctp_clog.x.cwnd.inflight = net->flight_size;
307 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
308 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
309 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
310 	}
311 	if (SCTP_CWNDLOG_PRESEND == from) {
312 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
313 	}
314 	sctp_clog.x.cwnd.cwnd_augment = augment;
315 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
316 	    SCTP_LOG_EVENT_CWND,
317 	    from,
318 	    sctp_clog.x.misc.log1,
319 	    sctp_clog.x.misc.log2,
320 	    sctp_clog.x.misc.log3,
321 	    sctp_clog.x.misc.log4);
322 }
323 
324 void
325 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
326 {
327 	struct sctp_cwnd_log sctp_clog;
328 
329 	memset(&sctp_clog, 0, sizeof(sctp_clog));
330 	if (inp) {
331 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
332 
333 	} else {
334 		sctp_clog.x.lock.sock = (void *)NULL;
335 	}
336 	sctp_clog.x.lock.inp = (void *)inp;
337 	if (stcb) {
338 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
339 	} else {
340 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	if (inp) {
343 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
344 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
345 	} else {
346 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
350 	if (inp && (inp->sctp_socket)) {
351 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
352 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
353 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
354 	} else {
355 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
356 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
357 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
358 	}
359 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
360 	    SCTP_LOG_LOCK_EVENT,
361 	    from,
362 	    sctp_clog.x.misc.log1,
363 	    sctp_clog.x.misc.log2,
364 	    sctp_clog.x.misc.log3,
365 	    sctp_clog.x.misc.log4);
366 }
367 
368 void
369 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
370 {
371 	struct sctp_cwnd_log sctp_clog;
372 
373 	memset(&sctp_clog, 0, sizeof(sctp_clog));
374 	sctp_clog.x.cwnd.net = net;
375 	sctp_clog.x.cwnd.cwnd_new_value = error;
376 	sctp_clog.x.cwnd.inflight = net->flight_size;
377 	sctp_clog.x.cwnd.cwnd_augment = burst;
378 	if (stcb->asoc.send_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_send = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
382 	if (stcb->asoc.stream_queue_cnt > 255)
383 		sctp_clog.x.cwnd.cnt_in_str = 255;
384 	else
385 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
386 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
387 	    SCTP_LOG_EVENT_MAXBURST,
388 	    from,
389 	    sctp_clog.x.misc.log1,
390 	    sctp_clog.x.misc.log2,
391 	    sctp_clog.x.misc.log3,
392 	    sctp_clog.x.misc.log4);
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 #ifdef SCTP_MBCNT_LOGGING
432 static void
433 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
434 {
435 	struct sctp_cwnd_log sctp_clog;
436 
437 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
438 	sctp_clog.x.mbcnt.size_change = book;
439 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
440 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
441 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
442 	    SCTP_LOG_EVENT_MBCNT,
443 	    from,
444 	    sctp_clog.x.misc.log1,
445 	    sctp_clog.x.misc.log2,
446 	    sctp_clog.x.misc.log3,
447 	    sctp_clog.x.misc.log4);
448 }
449 #endif
450 
451 void
452 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
453 {
454 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
455 	    SCTP_LOG_MISC_EVENT,
456 	    from,
457 	    a, b, c, d);
458 }
459 
460 void
461 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
462 {
463 	struct sctp_cwnd_log sctp_clog;
464 
465 	sctp_clog.x.wake.stcb = (void *)stcb;
466 	sctp_clog.x.wake.wake_cnt = wake_cnt;
467 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
468 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
469 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
470 
471 	if (stcb->asoc.stream_queue_cnt < 0xff)
472 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
473 	else
474 		sctp_clog.x.wake.stream_qcnt = 0xff;
475 
476 	if (stcb->asoc.chunks_on_out_queue < 0xff)
477 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
478 	else
479 		sctp_clog.x.wake.chunks_on_oque = 0xff;
480 
481 	sctp_clog.x.wake.sctpflags = 0;
482 	/* set in the defered mode stuff */
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
484 		sctp_clog.x.wake.sctpflags |= 1;
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
486 		sctp_clog.x.wake.sctpflags |= 2;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
488 		sctp_clog.x.wake.sctpflags |= 4;
489 	/* what about the sb */
490 	if (stcb->sctp_socket) {
491 		struct socket *so = stcb->sctp_socket;
492 
493 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
494 	} else {
495 		sctp_clog.x.wake.sbflags = 0xff;
496 	}
497 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
498 	    SCTP_LOG_EVENT_WAKE,
499 	    from,
500 	    sctp_clog.x.misc.log1,
501 	    sctp_clog.x.misc.log2,
502 	    sctp_clog.x.misc.log3,
503 	    sctp_clog.x.misc.log4);
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 }
526 
527 int
528 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
529 {
530 	/* May need to fix this if ktrdump does not work */
531 	return (0);
532 }
533 
534 #ifdef SCTP_AUDITING_ENABLED
535 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
536 static int sctp_audit_indx = 0;
537 
538 static
539 void
540 sctp_print_audit_report(void)
541 {
542 	int i;
543 	int cnt;
544 
545 	cnt = 0;
546 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
547 		if ((sctp_audit_data[i][0] == 0xe0) &&
548 		    (sctp_audit_data[i][1] == 0x01)) {
549 			cnt = 0;
550 			SCTP_PRINTF("\n");
551 		} else if (sctp_audit_data[i][0] == 0xf0) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
555 		    (sctp_audit_data[i][1] == 0x01)) {
556 			SCTP_PRINTF("\n");
557 			cnt = 0;
558 		}
559 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
560 		    (uint32_t) sctp_audit_data[i][1]);
561 		cnt++;
562 		if ((cnt % 14) == 0)
563 			SCTP_PRINTF("\n");
564 	}
565 	for (i = 0; i < sctp_audit_indx; i++) {
566 		if ((sctp_audit_data[i][0] == 0xe0) &&
567 		    (sctp_audit_data[i][1] == 0x01)) {
568 			cnt = 0;
569 			SCTP_PRINTF("\n");
570 		} else if (sctp_audit_data[i][0] == 0xf0) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
574 		    (sctp_audit_data[i][1] == 0x01)) {
575 			SCTP_PRINTF("\n");
576 			cnt = 0;
577 		}
578 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
579 		    (uint32_t) sctp_audit_data[i][1]);
580 		cnt++;
581 		if ((cnt % 14) == 0)
582 			SCTP_PRINTF("\n");
583 	}
584 	SCTP_PRINTF("\n");
585 }
586 
587 void
588 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
589     struct sctp_nets *net)
590 {
591 	int resend_cnt, tot_out, rep, tot_book_cnt;
592 	struct sctp_nets *lnet;
593 	struct sctp_tmit_chunk *chk;
594 
595 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
596 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
597 	sctp_audit_indx++;
598 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
599 		sctp_audit_indx = 0;
600 	}
601 	if (inp == NULL) {
602 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
603 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
604 		sctp_audit_indx++;
605 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
606 			sctp_audit_indx = 0;
607 		}
608 		return;
609 	}
610 	if (stcb == NULL) {
611 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
612 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
613 		sctp_audit_indx++;
614 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
615 			sctp_audit_indx = 0;
616 		}
617 		return;
618 	}
619 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
620 	sctp_audit_data[sctp_audit_indx][1] =
621 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
622 	sctp_audit_indx++;
623 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
624 		sctp_audit_indx = 0;
625 	}
626 	rep = 0;
627 	tot_book_cnt = 0;
628 	resend_cnt = tot_out = 0;
629 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
630 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
631 			resend_cnt++;
632 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
633 			tot_out += chk->book_size;
634 			tot_book_cnt++;
635 		}
636 	}
637 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
638 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
639 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
640 		sctp_audit_indx++;
641 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
642 			sctp_audit_indx = 0;
643 		}
644 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
645 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
646 		rep = 1;
647 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
648 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
649 		sctp_audit_data[sctp_audit_indx][1] =
650 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
651 		sctp_audit_indx++;
652 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
653 			sctp_audit_indx = 0;
654 		}
655 	}
656 	if (tot_out != stcb->asoc.total_flight) {
657 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
658 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
659 		sctp_audit_indx++;
660 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
661 			sctp_audit_indx = 0;
662 		}
663 		rep = 1;
664 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
665 		    (int)stcb->asoc.total_flight);
666 		stcb->asoc.total_flight = tot_out;
667 	}
668 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
669 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
670 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
671 		sctp_audit_indx++;
672 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
673 			sctp_audit_indx = 0;
674 		}
675 		rep = 1;
676 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
677 
678 		stcb->asoc.total_flight_count = tot_book_cnt;
679 	}
680 	tot_out = 0;
681 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
682 		tot_out += lnet->flight_size;
683 	}
684 	if (tot_out != stcb->asoc.total_flight) {
685 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
686 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
687 		sctp_audit_indx++;
688 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
689 			sctp_audit_indx = 0;
690 		}
691 		rep = 1;
692 		SCTP_PRINTF("real flight:%d net total was %d\n",
693 		    stcb->asoc.total_flight, tot_out);
694 		/* now corrective action */
695 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
696 
697 			tot_out = 0;
698 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
699 				if ((chk->whoTo == lnet) &&
700 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
701 					tot_out += chk->book_size;
702 				}
703 			}
704 			if (lnet->flight_size != tot_out) {
705 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
706 				    (void *)lnet, lnet->flight_size,
707 				    tot_out);
708 				lnet->flight_size = tot_out;
709 			}
710 		}
711 	}
712 	if (rep) {
713 		sctp_print_audit_report();
714 	}
715 }
716 
717 void
718 sctp_audit_log(uint8_t ev, uint8_t fd)
719 {
720 
721 	sctp_audit_data[sctp_audit_indx][0] = ev;
722 	sctp_audit_data[sctp_audit_indx][1] = fd;
723 	sctp_audit_indx++;
724 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
725 		sctp_audit_indx = 0;
726 	}
727 }
728 
729 #endif
730 
731 /*
732  * sctp_stop_timers_for_shutdown() should be called
733  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
734  * state to make sure that all timers are stopped.
735  */
736 void
737 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
738 {
739 	struct sctp_association *asoc;
740 	struct sctp_nets *net;
741 
742 	asoc = &stcb->asoc;
743 
744 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
749 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
750 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
751 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
752 	}
753 }
754 
755 /*
756  * a list of sizes based on typical mtu's, used only if next hop size not
757  * returned.
758  */
759 static uint32_t sctp_mtu_sizes[] = {
760 	68,
761 	296,
762 	508,
763 	512,
764 	544,
765 	576,
766 	1006,
767 	1492,
768 	1500,
769 	1536,
770 	2002,
771 	2048,
772 	4352,
773 	4464,
774 	8166,
775 	17914,
776 	32000,
777 	65535
778 };
779 
780 /*
781  * Return the largest MTU smaller than val. If there is no
782  * entry, just return val.
783  */
784 uint32_t
785 sctp_get_prev_mtu(uint32_t val)
786 {
787 	uint32_t i;
788 
789 	if (val <= sctp_mtu_sizes[0]) {
790 		return (val);
791 	}
792 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
793 		if (val <= sctp_mtu_sizes[i]) {
794 			break;
795 		}
796 	}
797 	return (sctp_mtu_sizes[i - 1]);
798 }
799 
800 /*
801  * Return the smallest MTU larger than val. If there is no
802  * entry, just return val.
803  */
804 uint32_t
805 sctp_get_next_mtu(uint32_t val)
806 {
807 	/* select another MTU that is just bigger than this one */
808 	uint32_t i;
809 
810 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
811 		if (val < sctp_mtu_sizes[i]) {
812 			return (sctp_mtu_sizes[i]);
813 		}
814 	}
815 	return (val);
816 }
817 
818 void
819 sctp_fill_random_store(struct sctp_pcb *m)
820 {
821 	/*
822 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
823 	 * our counter. The result becomes our good random numbers and we
824 	 * then setup to give these out. Note that we do no locking to
825 	 * protect this. This is ok, since if competing folks call this we
826 	 * will get more gobbled gook in the random store which is what we
827 	 * want. There is a danger that two guys will use the same random
828 	 * numbers, but thats ok too since that is random as well :->
829 	 */
830 	m->store_at = 0;
831 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
832 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
833 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
834 	m->random_counter++;
835 }
836 
837 uint32_t
838 sctp_select_initial_TSN(struct sctp_pcb *inp)
839 {
840 	/*
841 	 * A true implementation should use random selection process to get
842 	 * the initial stream sequence number, using RFC1750 as a good
843 	 * guideline
844 	 */
845 	uint32_t x, *xp;
846 	uint8_t *p;
847 	int store_at, new_store;
848 
849 	if (inp->initial_sequence_debug != 0) {
850 		uint32_t ret;
851 
852 		ret = inp->initial_sequence_debug;
853 		inp->initial_sequence_debug++;
854 		return (ret);
855 	}
856 retry:
857 	store_at = inp->store_at;
858 	new_store = store_at + sizeof(uint32_t);
859 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
860 		new_store = 0;
861 	}
862 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
863 		goto retry;
864 	}
865 	if (new_store == 0) {
866 		/* Refill the random store */
867 		sctp_fill_random_store(inp);
868 	}
869 	p = &inp->random_store[store_at];
870 	xp = (uint32_t *) p;
871 	x = *xp;
872 	return (x);
873 }
874 
875 uint32_t
876 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
877 {
878 	uint32_t x;
879 	struct timeval now;
880 
881 	if (check) {
882 		(void)SCTP_GETTIME_TIMEVAL(&now);
883 	}
884 	for (;;) {
885 		x = sctp_select_initial_TSN(&inp->sctp_ep);
886 		if (x == 0) {
887 			/* we never use 0 */
888 			continue;
889 		}
890 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
891 			break;
892 		}
893 	}
894 	return (x);
895 }
896 
897 int32_t
898 sctp_map_assoc_state(int kernel_state)
899 {
900 	int32_t user_state;
901 
902 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
903 		user_state = SCTP_CLOSED;
904 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
905 		user_state = SCTP_SHUTDOWN_PENDING;
906 	} else {
907 		switch (kernel_state & SCTP_STATE_MASK) {
908 		case SCTP_STATE_EMPTY:
909 			user_state = SCTP_CLOSED;
910 			break;
911 		case SCTP_STATE_INUSE:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_COOKIE_WAIT:
915 			user_state = SCTP_COOKIE_WAIT;
916 			break;
917 		case SCTP_STATE_COOKIE_ECHOED:
918 			user_state = SCTP_COOKIE_ECHOED;
919 			break;
920 		case SCTP_STATE_OPEN:
921 			user_state = SCTP_ESTABLISHED;
922 			break;
923 		case SCTP_STATE_SHUTDOWN_SENT:
924 			user_state = SCTP_SHUTDOWN_SENT;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_RECEIVED:
927 			user_state = SCTP_SHUTDOWN_RECEIVED;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
930 			user_state = SCTP_SHUTDOWN_ACK_SENT;
931 			break;
932 		default:
933 			user_state = SCTP_CLOSED;
934 			break;
935 		}
936 	}
937 	return (user_state);
938 }
939 
940 int
941 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
942     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
943 {
944 	struct sctp_association *asoc;
945 
946 	/*
947 	 * Anything set to zero is taken care of by the allocation routine's
948 	 * bzero
949 	 */
950 
951 	/*
952 	 * Up front select what scoping to apply on addresses I tell my peer
953 	 * Not sure what to do with these right now, we will need to come up
954 	 * with a way to set them. We may need to pass them through from the
955 	 * caller in the sctp_aloc_assoc() function.
956 	 */
957 	int i;
958 #if defined(SCTP_DETAILED_STR_STATS)
959 	int j;
960 #endif
961 
962 	asoc = &stcb->asoc;
963 	/* init all variables to a known value. */
964 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
965 	asoc->max_burst = inp->sctp_ep.max_burst;
966 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
967 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
968 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
969 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
970 	asoc->ecn_supported = inp->ecn_supported;
971 	asoc->prsctp_supported = inp->prsctp_supported;
972 	asoc->idata_supported = inp->idata_supported;
973 	asoc->auth_supported = inp->auth_supported;
974 	asoc->asconf_supported = inp->asconf_supported;
975 	asoc->reconfig_supported = inp->reconfig_supported;
976 	asoc->nrsack_supported = inp->nrsack_supported;
977 	asoc->pktdrop_supported = inp->pktdrop_supported;
978 	asoc->idata_supported = inp->idata_supported;
979 	asoc->sctp_cmt_pf = (uint8_t) 0;
980 	asoc->sctp_frag_point = inp->sctp_frag_point;
981 	asoc->sctp_features = inp->sctp_features;
982 	asoc->default_dscp = inp->sctp_ep.default_dscp;
983 	asoc->max_cwnd = inp->max_cwnd;
984 #ifdef INET6
985 	if (inp->sctp_ep.default_flowlabel) {
986 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
987 	} else {
988 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
989 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
990 			asoc->default_flowlabel &= 0x000fffff;
991 			asoc->default_flowlabel |= 0x80000000;
992 		} else {
993 			asoc->default_flowlabel = 0;
994 		}
995 	}
996 #endif
997 	asoc->sb_send_resv = 0;
998 	if (override_tag) {
999 		asoc->my_vtag = override_tag;
1000 	} else {
1001 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1002 	}
1003 	/* Get the nonce tags */
1004 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1005 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1006 	asoc->vrf_id = vrf_id;
1007 
1008 #ifdef SCTP_ASOCLOG_OF_TSNS
1009 	asoc->tsn_in_at = 0;
1010 	asoc->tsn_out_at = 0;
1011 	asoc->tsn_in_wrapped = 0;
1012 	asoc->tsn_out_wrapped = 0;
1013 	asoc->cumack_log_at = 0;
1014 	asoc->cumack_log_atsnt = 0;
1015 #endif
1016 #ifdef SCTP_FS_SPEC_LOG
1017 	asoc->fs_index = 0;
1018 #endif
1019 	asoc->refcnt = 0;
1020 	asoc->assoc_up_sent = 0;
1021 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1022 	    sctp_select_initial_TSN(&inp->sctp_ep);
1023 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1024 	/* we are optimisitic here */
1025 	asoc->peer_supports_nat = 0;
1026 	asoc->sent_queue_retran_cnt = 0;
1027 
1028 	/* for CMT */
1029 	asoc->last_net_cmt_send_started = NULL;
1030 
1031 	/* This will need to be adjusted */
1032 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1033 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1034 	asoc->asconf_seq_in = asoc->last_acked_seq;
1035 
1036 	/* here we are different, we hold the next one we expect */
1037 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1038 
1039 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1040 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1041 
1042 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1043 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1044 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1045 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1046 	asoc->free_chunk_cnt = 0;
1047 
1048 	asoc->iam_blocking = 0;
1049 	asoc->context = inp->sctp_context;
1050 	asoc->local_strreset_support = inp->local_strreset_support;
1051 	asoc->def_send = inp->def_send;
1052 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1053 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1054 	asoc->pr_sctp_cnt = 0;
1055 	asoc->total_output_queue_size = 0;
1056 
1057 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1058 		asoc->scope.ipv6_addr_legal = 1;
1059 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1060 			asoc->scope.ipv4_addr_legal = 1;
1061 		} else {
1062 			asoc->scope.ipv4_addr_legal = 0;
1063 		}
1064 	} else {
1065 		asoc->scope.ipv6_addr_legal = 0;
1066 		asoc->scope.ipv4_addr_legal = 1;
1067 	}
1068 
1069 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1070 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1071 
1072 	asoc->smallest_mtu = inp->sctp_frag_point;
1073 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1074 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1075 
1076 	asoc->stream_locked_on = 0;
1077 	asoc->ecn_echo_cnt_onq = 0;
1078 	asoc->stream_locked = 0;
1079 
1080 	asoc->send_sack = 1;
1081 
1082 	LIST_INIT(&asoc->sctp_restricted_addrs);
1083 
1084 	TAILQ_INIT(&asoc->nets);
1085 	TAILQ_INIT(&asoc->pending_reply_queue);
1086 	TAILQ_INIT(&asoc->asconf_ack_sent);
1087 	/* Setup to fill the hb random cache at first HB */
1088 	asoc->hb_random_idx = 4;
1089 
1090 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1091 
1092 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1093 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1094 
1095 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1096 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1097 
1098 	/*
1099 	 * Now the stream parameters, here we allocate space for all streams
1100 	 * that we request by default.
1101 	 */
1102 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1103 	    o_strms;
1104 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1105 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1106 	    SCTP_M_STRMO);
1107 	if (asoc->strmout == NULL) {
1108 		/* big trouble no memory */
1109 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1110 		return (ENOMEM);
1111 	}
1112 	for (i = 0; i < asoc->streamoutcnt; i++) {
1113 		/*
1114 		 * inbound side must be set to 0xffff, also NOTE when we get
1115 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1116 		 * count (streamoutcnt) but first check if we sent to any of
1117 		 * the upper streams that were dropped (if some were). Those
1118 		 * that were dropped must be notified to the upper layer as
1119 		 * failed to send.
1120 		 */
1121 		asoc->strmout[i].next_mid_ordered = 0;
1122 		asoc->strmout[i].next_mid_unordered = 0;
1123 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1124 		asoc->strmout[i].chunks_on_queues = 0;
1125 #if defined(SCTP_DETAILED_STR_STATS)
1126 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1127 			asoc->strmout[i].abandoned_sent[j] = 0;
1128 			asoc->strmout[i].abandoned_unsent[j] = 0;
1129 		}
1130 #else
1131 		asoc->strmout[i].abandoned_sent[0] = 0;
1132 		asoc->strmout[i].abandoned_unsent[0] = 0;
1133 #endif
1134 		asoc->strmout[i].sid = i;
1135 		asoc->strmout[i].last_msg_incomplete = 0;
1136 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1137 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1138 	}
1139 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1140 
1141 	/* Now the mapping array */
1142 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1143 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1144 	    SCTP_M_MAP);
1145 	if (asoc->mapping_array == NULL) {
1146 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1147 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1148 		return (ENOMEM);
1149 	}
1150 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1151 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1152 	    SCTP_M_MAP);
1153 	if (asoc->nr_mapping_array == NULL) {
1154 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1155 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1156 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1157 		return (ENOMEM);
1158 	}
1159 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1160 
1161 	/* Now the init of the other outqueues */
1162 	TAILQ_INIT(&asoc->free_chunks);
1163 	TAILQ_INIT(&asoc->control_send_queue);
1164 	TAILQ_INIT(&asoc->asconf_send_queue);
1165 	TAILQ_INIT(&asoc->send_queue);
1166 	TAILQ_INIT(&asoc->sent_queue);
1167 	TAILQ_INIT(&asoc->resetHead);
1168 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1169 	TAILQ_INIT(&asoc->asconf_queue);
1170 	/* authentication fields */
1171 	asoc->authinfo.random = NULL;
1172 	asoc->authinfo.active_keyid = 0;
1173 	asoc->authinfo.assoc_key = NULL;
1174 	asoc->authinfo.assoc_keyid = 0;
1175 	asoc->authinfo.recv_key = NULL;
1176 	asoc->authinfo.recv_keyid = 0;
1177 	LIST_INIT(&asoc->shared_keys);
1178 	asoc->marked_retrans = 0;
1179 	asoc->port = inp->sctp_ep.port;
1180 	asoc->timoinit = 0;
1181 	asoc->timodata = 0;
1182 	asoc->timosack = 0;
1183 	asoc->timoshutdown = 0;
1184 	asoc->timoheartbeat = 0;
1185 	asoc->timocookie = 0;
1186 	asoc->timoshutdownack = 0;
1187 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188 	asoc->discontinuity_time = asoc->start_time;
1189 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1190 		asoc->abandoned_unsent[i] = 0;
1191 		asoc->abandoned_sent[i] = 0;
1192 	}
1193 	/*
1194 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1195 	 * freed later when the association is freed.
1196 	 */
1197 	return (0);
1198 }
1199 
1200 void
1201 sctp_print_mapping_array(struct sctp_association *asoc)
1202 {
1203 	unsigned int i, limit;
1204 
1205 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1206 	    asoc->mapping_array_size,
1207 	    asoc->mapping_array_base_tsn,
1208 	    asoc->cumulative_tsn,
1209 	    asoc->highest_tsn_inside_map,
1210 	    asoc->highest_tsn_inside_nr_map);
1211 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1212 		if (asoc->mapping_array[limit - 1] != 0) {
1213 			break;
1214 		}
1215 	}
1216 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1217 	for (i = 0; i < limit; i++) {
1218 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1219 	}
1220 	if (limit % 16)
1221 		SCTP_PRINTF("\n");
1222 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1223 		if (asoc->nr_mapping_array[limit - 1]) {
1224 			break;
1225 		}
1226 	}
1227 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1228 	for (i = 0; i < limit; i++) {
1229 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1230 	}
1231 	if (limit % 16)
1232 		SCTP_PRINTF("\n");
1233 }
1234 
1235 int
1236 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1237 {
1238 	/* mapping array needs to grow */
1239 	uint8_t *new_array1, *new_array2;
1240 	uint32_t new_size;
1241 
1242 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1243 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1244 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1245 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1246 		/* can't get more, forget it */
1247 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1248 		if (new_array1) {
1249 			SCTP_FREE(new_array1, SCTP_M_MAP);
1250 		}
1251 		if (new_array2) {
1252 			SCTP_FREE(new_array2, SCTP_M_MAP);
1253 		}
1254 		return (-1);
1255 	}
1256 	memset(new_array1, 0, new_size);
1257 	memset(new_array2, 0, new_size);
1258 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1259 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1260 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1261 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1262 	asoc->mapping_array = new_array1;
1263 	asoc->nr_mapping_array = new_array2;
1264 	asoc->mapping_array_size = new_size;
1265 	return (0);
1266 }
1267 
1268 
1269 static void
1270 sctp_iterator_work(struct sctp_iterator *it)
1271 {
1272 	int iteration_count = 0;
1273 	int inp_skip = 0;
1274 	int first_in = 1;
1275 	struct sctp_inpcb *tinp;
1276 
1277 	SCTP_INP_INFO_RLOCK();
1278 	SCTP_ITERATOR_LOCK();
1279 	sctp_it_ctl.cur_it = it;
1280 	if (it->inp) {
1281 		SCTP_INP_RLOCK(it->inp);
1282 		SCTP_INP_DECR_REF(it->inp);
1283 	}
1284 	if (it->inp == NULL) {
1285 		/* iterator is complete */
1286 done_with_iterator:
1287 		sctp_it_ctl.cur_it = NULL;
1288 		SCTP_ITERATOR_UNLOCK();
1289 		SCTP_INP_INFO_RUNLOCK();
1290 		if (it->function_atend != NULL) {
1291 			(*it->function_atend) (it->pointer, it->val);
1292 		}
1293 		SCTP_FREE(it, SCTP_M_ITER);
1294 		return;
1295 	}
1296 select_a_new_ep:
1297 	if (first_in) {
1298 		first_in = 0;
1299 	} else {
1300 		SCTP_INP_RLOCK(it->inp);
1301 	}
1302 	while (((it->pcb_flags) &&
1303 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1304 	    ((it->pcb_features) &&
1305 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1306 		/* endpoint flags or features don't match, so keep looking */
1307 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1308 			SCTP_INP_RUNLOCK(it->inp);
1309 			goto done_with_iterator;
1310 		}
1311 		tinp = it->inp;
1312 		it->inp = LIST_NEXT(it->inp, sctp_list);
1313 		SCTP_INP_RUNLOCK(tinp);
1314 		if (it->inp == NULL) {
1315 			goto done_with_iterator;
1316 		}
1317 		SCTP_INP_RLOCK(it->inp);
1318 	}
1319 	/* now go through each assoc which is in the desired state */
1320 	if (it->done_current_ep == 0) {
1321 		if (it->function_inp != NULL)
1322 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1323 		it->done_current_ep = 1;
1324 	}
1325 	if (it->stcb == NULL) {
1326 		/* run the per instance function */
1327 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1328 	}
1329 	if ((inp_skip) || it->stcb == NULL) {
1330 		if (it->function_inp_end != NULL) {
1331 			inp_skip = (*it->function_inp_end) (it->inp,
1332 			    it->pointer,
1333 			    it->val);
1334 		}
1335 		SCTP_INP_RUNLOCK(it->inp);
1336 		goto no_stcb;
1337 	}
1338 	while (it->stcb) {
1339 		SCTP_TCB_LOCK(it->stcb);
1340 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1341 			/* not in the right state... keep looking */
1342 			SCTP_TCB_UNLOCK(it->stcb);
1343 			goto next_assoc;
1344 		}
1345 		/* see if we have limited out the iterator loop */
1346 		iteration_count++;
1347 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1348 			/* Pause to let others grab the lock */
1349 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1350 			SCTP_TCB_UNLOCK(it->stcb);
1351 			SCTP_INP_INCR_REF(it->inp);
1352 			SCTP_INP_RUNLOCK(it->inp);
1353 			SCTP_ITERATOR_UNLOCK();
1354 			SCTP_INP_INFO_RUNLOCK();
1355 			SCTP_INP_INFO_RLOCK();
1356 			SCTP_ITERATOR_LOCK();
1357 			if (sctp_it_ctl.iterator_flags) {
1358 				/* We won't be staying here */
1359 				SCTP_INP_DECR_REF(it->inp);
1360 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1361 				if (sctp_it_ctl.iterator_flags &
1362 				    SCTP_ITERATOR_STOP_CUR_IT) {
1363 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1364 					goto done_with_iterator;
1365 				}
1366 				if (sctp_it_ctl.iterator_flags &
1367 				    SCTP_ITERATOR_STOP_CUR_INP) {
1368 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1369 					goto no_stcb;
1370 				}
1371 				/* If we reach here huh? */
1372 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1373 				    sctp_it_ctl.iterator_flags);
1374 				sctp_it_ctl.iterator_flags = 0;
1375 			}
1376 			SCTP_INP_RLOCK(it->inp);
1377 			SCTP_INP_DECR_REF(it->inp);
1378 			SCTP_TCB_LOCK(it->stcb);
1379 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1380 			iteration_count = 0;
1381 		}
1382 		/* run function on this one */
1383 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1384 
1385 		/*
1386 		 * we lie here, it really needs to have its own type but
1387 		 * first I must verify that this won't effect things :-0
1388 		 */
1389 		if (it->no_chunk_output == 0)
1390 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1391 
1392 		SCTP_TCB_UNLOCK(it->stcb);
1393 next_assoc:
1394 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1395 		if (it->stcb == NULL) {
1396 			/* Run last function */
1397 			if (it->function_inp_end != NULL) {
1398 				inp_skip = (*it->function_inp_end) (it->inp,
1399 				    it->pointer,
1400 				    it->val);
1401 			}
1402 		}
1403 	}
1404 	SCTP_INP_RUNLOCK(it->inp);
1405 no_stcb:
1406 	/* done with all assocs on this endpoint, move on to next endpoint */
1407 	it->done_current_ep = 0;
1408 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1409 		it->inp = NULL;
1410 	} else {
1411 		it->inp = LIST_NEXT(it->inp, sctp_list);
1412 	}
1413 	if (it->inp == NULL) {
1414 		goto done_with_iterator;
1415 	}
1416 	goto select_a_new_ep;
1417 }
1418 
1419 void
1420 sctp_iterator_worker(void)
1421 {
1422 	struct sctp_iterator *it, *nit;
1423 
1424 	/* This function is called with the WQ lock in place */
1425 
1426 	sctp_it_ctl.iterator_running = 1;
1427 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1428 		/* now lets work on this one */
1429 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1430 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1431 		CURVNET_SET(it->vn);
1432 		sctp_iterator_work(it);
1433 		CURVNET_RESTORE();
1434 		SCTP_IPI_ITERATOR_WQ_LOCK();
1435 		/* sa_ignore FREED_MEMORY */
1436 	}
1437 	sctp_it_ctl.iterator_running = 0;
1438 	return;
1439 }
1440 
1441 
1442 static void
1443 sctp_handle_addr_wq(void)
1444 {
1445 	/* deal with the ADDR wq from the rtsock calls */
1446 	struct sctp_laddr *wi, *nwi;
1447 	struct sctp_asconf_iterator *asc;
1448 
1449 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1450 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1451 	if (asc == NULL) {
1452 		/* Try later, no memory */
1453 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1454 		    (struct sctp_inpcb *)NULL,
1455 		    (struct sctp_tcb *)NULL,
1456 		    (struct sctp_nets *)NULL);
1457 		return;
1458 	}
1459 	LIST_INIT(&asc->list_of_work);
1460 	asc->cnt = 0;
1461 
1462 	SCTP_WQ_ADDR_LOCK();
1463 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1464 		LIST_REMOVE(wi, sctp_nxt_addr);
1465 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1466 		asc->cnt++;
1467 	}
1468 	SCTP_WQ_ADDR_UNLOCK();
1469 
1470 	if (asc->cnt == 0) {
1471 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1472 	} else {
1473 		int ret;
1474 
1475 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1476 		    sctp_asconf_iterator_stcb,
1477 		    NULL,	/* No ep end for boundall */
1478 		    SCTP_PCB_FLAGS_BOUNDALL,
1479 		    SCTP_PCB_ANY_FEATURES,
1480 		    SCTP_ASOC_ANY_STATE,
1481 		    (void *)asc, 0,
1482 		    sctp_asconf_iterator_end, NULL, 0);
1483 		if (ret) {
1484 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1485 			/* Freeing if we are stopping or put back on the
1486 			 * addr_wq. */
1487 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1488 				sctp_asconf_iterator_end(asc, 0);
1489 			} else {
1490 				SCTP_WQ_ADDR_LOCK();
1491 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1492 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1493 				}
1494 				SCTP_WQ_ADDR_UNLOCK();
1495 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1496 			}
1497 		}
1498 	}
1499 }
1500 
1501 void
1502 sctp_timeout_handler(void *t)
1503 {
1504 	struct sctp_inpcb *inp;
1505 	struct sctp_tcb *stcb;
1506 	struct sctp_nets *net;
1507 	struct sctp_timer *tmr;
1508 	struct mbuf *op_err;
1509 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1510 	struct socket *so;
1511 #endif
1512 	int did_output;
1513 	int type;
1514 
1515 	tmr = (struct sctp_timer *)t;
1516 	inp = (struct sctp_inpcb *)tmr->ep;
1517 	stcb = (struct sctp_tcb *)tmr->tcb;
1518 	net = (struct sctp_nets *)tmr->net;
1519 	CURVNET_SET((struct vnet *)tmr->vnet);
1520 	did_output = 1;
1521 
1522 #ifdef SCTP_AUDITING_ENABLED
1523 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1524 	sctp_auditing(3, inp, stcb, net);
1525 #endif
1526 
1527 	/* sanity checks... */
1528 	if (tmr->self != (void *)tmr) {
1529 		/*
1530 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1531 		 * (void *)tmr);
1532 		 */
1533 		CURVNET_RESTORE();
1534 		return;
1535 	}
1536 	tmr->stopped_from = 0xa001;
1537 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1538 		/*
1539 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1540 		 * tmr->type);
1541 		 */
1542 		CURVNET_RESTORE();
1543 		return;
1544 	}
1545 	tmr->stopped_from = 0xa002;
1546 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	/* if this is an iterator timeout, get the struct and clear inp */
1551 	tmr->stopped_from = 0xa003;
1552 	if (inp) {
1553 		SCTP_INP_INCR_REF(inp);
1554 		if ((inp->sctp_socket == NULL) &&
1555 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1556 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1557 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1558 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1559 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1560 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1564 		    ) {
1565 			SCTP_INP_DECR_REF(inp);
1566 			CURVNET_RESTORE();
1567 			return;
1568 		}
1569 	}
1570 	tmr->stopped_from = 0xa004;
1571 	if (stcb) {
1572 		atomic_add_int(&stcb->asoc.refcnt, 1);
1573 		if (stcb->asoc.state == 0) {
1574 			atomic_add_int(&stcb->asoc.refcnt, -1);
1575 			if (inp) {
1576 				SCTP_INP_DECR_REF(inp);
1577 			}
1578 			CURVNET_RESTORE();
1579 			return;
1580 		}
1581 	}
1582 	type = tmr->type;
1583 	tmr->stopped_from = 0xa005;
1584 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1585 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1586 		if (inp) {
1587 			SCTP_INP_DECR_REF(inp);
1588 		}
1589 		if (stcb) {
1590 			atomic_add_int(&stcb->asoc.refcnt, -1);
1591 		}
1592 		CURVNET_RESTORE();
1593 		return;
1594 	}
1595 	tmr->stopped_from = 0xa006;
1596 
1597 	if (stcb) {
1598 		SCTP_TCB_LOCK(stcb);
1599 		atomic_add_int(&stcb->asoc.refcnt, -1);
1600 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1601 		    ((stcb->asoc.state == 0) ||
1602 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1603 			SCTP_TCB_UNLOCK(stcb);
1604 			if (inp) {
1605 				SCTP_INP_DECR_REF(inp);
1606 			}
1607 			CURVNET_RESTORE();
1608 			return;
1609 		}
1610 	}
1611 	/* record in stopped what t-o occurred */
1612 	tmr->stopped_from = type;
1613 
1614 	/* mark as being serviced now */
1615 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1616 		/*
1617 		 * Callout has been rescheduled.
1618 		 */
1619 		goto get_out;
1620 	}
1621 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1622 		/*
1623 		 * Not active, so no action.
1624 		 */
1625 		goto get_out;
1626 	}
1627 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1628 
1629 	/* call the handler for the appropriate timer type */
1630 	switch (type) {
1631 	case SCTP_TIMER_TYPE_ZERO_COPY:
1632 		if (inp == NULL) {
1633 			break;
1634 		}
1635 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1636 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1637 		}
1638 		break;
1639 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1640 		if (inp == NULL) {
1641 			break;
1642 		}
1643 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1644 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1645 		}
1646 		break;
1647 	case SCTP_TIMER_TYPE_ADDR_WQ:
1648 		sctp_handle_addr_wq();
1649 		break;
1650 	case SCTP_TIMER_TYPE_SEND:
1651 		if ((stcb == NULL) || (inp == NULL)) {
1652 			break;
1653 		}
1654 		SCTP_STAT_INCR(sctps_timodata);
1655 		stcb->asoc.timodata++;
1656 		stcb->asoc.num_send_timers_up--;
1657 		if (stcb->asoc.num_send_timers_up < 0) {
1658 			stcb->asoc.num_send_timers_up = 0;
1659 		}
1660 		SCTP_TCB_LOCK_ASSERT(stcb);
1661 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1662 			/* no need to unlock on tcb its gone */
1663 
1664 			goto out_decr;
1665 		}
1666 		SCTP_TCB_LOCK_ASSERT(stcb);
1667 #ifdef SCTP_AUDITING_ENABLED
1668 		sctp_auditing(4, inp, stcb, net);
1669 #endif
1670 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1671 		if ((stcb->asoc.num_send_timers_up == 0) &&
1672 		    (stcb->asoc.sent_queue_cnt > 0)) {
1673 			struct sctp_tmit_chunk *chk;
1674 
1675 			/*
1676 			 * safeguard. If there on some on the sent queue
1677 			 * somewhere but no timers running something is
1678 			 * wrong... so we start a timer on the first chunk
1679 			 * on the send queue on whatever net it is sent to.
1680 			 */
1681 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1682 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1683 			    chk->whoTo);
1684 		}
1685 		break;
1686 	case SCTP_TIMER_TYPE_INIT:
1687 		if ((stcb == NULL) || (inp == NULL)) {
1688 			break;
1689 		}
1690 		SCTP_STAT_INCR(sctps_timoinit);
1691 		stcb->asoc.timoinit++;
1692 		if (sctp_t1init_timer(inp, stcb, net)) {
1693 			/* no need to unlock on tcb its gone */
1694 			goto out_decr;
1695 		}
1696 		/* We do output but not here */
1697 		did_output = 0;
1698 		break;
1699 	case SCTP_TIMER_TYPE_RECV:
1700 		if ((stcb == NULL) || (inp == NULL)) {
1701 			break;
1702 		}
1703 		SCTP_STAT_INCR(sctps_timosack);
1704 		stcb->asoc.timosack++;
1705 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1706 #ifdef SCTP_AUDITING_ENABLED
1707 		sctp_auditing(4, inp, stcb, net);
1708 #endif
1709 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1710 		break;
1711 	case SCTP_TIMER_TYPE_SHUTDOWN:
1712 		if ((stcb == NULL) || (inp == NULL)) {
1713 			break;
1714 		}
1715 		if (sctp_shutdown_timer(inp, stcb, net)) {
1716 			/* no need to unlock on tcb its gone */
1717 			goto out_decr;
1718 		}
1719 		SCTP_STAT_INCR(sctps_timoshutdown);
1720 		stcb->asoc.timoshutdown++;
1721 #ifdef SCTP_AUDITING_ENABLED
1722 		sctp_auditing(4, inp, stcb, net);
1723 #endif
1724 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1725 		break;
1726 	case SCTP_TIMER_TYPE_HEARTBEAT:
1727 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1728 			break;
1729 		}
1730 		SCTP_STAT_INCR(sctps_timoheartbeat);
1731 		stcb->asoc.timoheartbeat++;
1732 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1733 			/* no need to unlock on tcb its gone */
1734 			goto out_decr;
1735 		}
1736 #ifdef SCTP_AUDITING_ENABLED
1737 		sctp_auditing(4, inp, stcb, net);
1738 #endif
1739 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1740 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1741 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1742 		}
1743 		break;
1744 	case SCTP_TIMER_TYPE_COOKIE:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		if (sctp_cookie_timer(inp, stcb, net)) {
1749 			/* no need to unlock on tcb its gone */
1750 			goto out_decr;
1751 		}
1752 		SCTP_STAT_INCR(sctps_timocookie);
1753 		stcb->asoc.timocookie++;
1754 #ifdef SCTP_AUDITING_ENABLED
1755 		sctp_auditing(4, inp, stcb, net);
1756 #endif
1757 		/*
1758 		 * We consider T3 and Cookie timer pretty much the same with
1759 		 * respect to where from in chunk_output.
1760 		 */
1761 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1762 		break;
1763 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1764 		{
1765 			struct timeval tv;
1766 			int i, secret;
1767 
1768 			if (inp == NULL) {
1769 				break;
1770 			}
1771 			SCTP_STAT_INCR(sctps_timosecret);
1772 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1773 			SCTP_INP_WLOCK(inp);
1774 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1775 			inp->sctp_ep.last_secret_number =
1776 			    inp->sctp_ep.current_secret_number;
1777 			inp->sctp_ep.current_secret_number++;
1778 			if (inp->sctp_ep.current_secret_number >=
1779 			    SCTP_HOW_MANY_SECRETS) {
1780 				inp->sctp_ep.current_secret_number = 0;
1781 			}
1782 			secret = (int)inp->sctp_ep.current_secret_number;
1783 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1784 				inp->sctp_ep.secret_key[secret][i] =
1785 				    sctp_select_initial_TSN(&inp->sctp_ep);
1786 			}
1787 			SCTP_INP_WUNLOCK(inp);
1788 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1789 		}
1790 		did_output = 0;
1791 		break;
1792 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1793 		if ((stcb == NULL) || (inp == NULL)) {
1794 			break;
1795 		}
1796 		SCTP_STAT_INCR(sctps_timopathmtu);
1797 		sctp_pathmtu_timer(inp, stcb, net);
1798 		did_output = 0;
1799 		break;
1800 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1805 			/* no need to unlock on tcb its gone */
1806 			goto out_decr;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoshutdownack);
1809 		stcb->asoc.timoshutdownack++;
1810 #ifdef SCTP_AUDITING_ENABLED
1811 		sctp_auditing(4, inp, stcb, net);
1812 #endif
1813 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1814 		break;
1815 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1816 		if ((stcb == NULL) || (inp == NULL)) {
1817 			break;
1818 		}
1819 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1820 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1821 		    "Shutdown guard timer expired");
1822 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1823 		/* no need to unlock on tcb its gone */
1824 		goto out_decr;
1825 
1826 	case SCTP_TIMER_TYPE_STRRESET:
1827 		if ((stcb == NULL) || (inp == NULL)) {
1828 			break;
1829 		}
1830 		if (sctp_strreset_timer(inp, stcb, net)) {
1831 			/* no need to unlock on tcb its gone */
1832 			goto out_decr;
1833 		}
1834 		SCTP_STAT_INCR(sctps_timostrmrst);
1835 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1836 		break;
1837 	case SCTP_TIMER_TYPE_ASCONF:
1838 		if ((stcb == NULL) || (inp == NULL)) {
1839 			break;
1840 		}
1841 		if (sctp_asconf_timer(inp, stcb, net)) {
1842 			/* no need to unlock on tcb its gone */
1843 			goto out_decr;
1844 		}
1845 		SCTP_STAT_INCR(sctps_timoasconf);
1846 #ifdef SCTP_AUDITING_ENABLED
1847 		sctp_auditing(4, inp, stcb, net);
1848 #endif
1849 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1850 		break;
1851 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1852 		if ((stcb == NULL) || (inp == NULL)) {
1853 			break;
1854 		}
1855 		sctp_delete_prim_timer(inp, stcb, net);
1856 		SCTP_STAT_INCR(sctps_timodelprim);
1857 		break;
1858 
1859 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1860 		if ((stcb == NULL) || (inp == NULL)) {
1861 			break;
1862 		}
1863 		SCTP_STAT_INCR(sctps_timoautoclose);
1864 		sctp_autoclose_timer(inp, stcb, net);
1865 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1866 		did_output = 0;
1867 		break;
1868 	case SCTP_TIMER_TYPE_ASOCKILL:
1869 		if ((stcb == NULL) || (inp == NULL)) {
1870 			break;
1871 		}
1872 		SCTP_STAT_INCR(sctps_timoassockill);
1873 		/* Can we free it yet? */
1874 		SCTP_INP_DECR_REF(inp);
1875 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1876 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1877 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1878 		so = SCTP_INP_SO(inp);
1879 		atomic_add_int(&stcb->asoc.refcnt, 1);
1880 		SCTP_TCB_UNLOCK(stcb);
1881 		SCTP_SOCKET_LOCK(so, 1);
1882 		SCTP_TCB_LOCK(stcb);
1883 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1884 #endif
1885 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1886 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1887 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1888 		SCTP_SOCKET_UNLOCK(so, 1);
1889 #endif
1890 		/*
1891 		 * free asoc, always unlocks (or destroy's) so prevent
1892 		 * duplicate unlock or unlock of a free mtx :-0
1893 		 */
1894 		stcb = NULL;
1895 		goto out_no_decr;
1896 	case SCTP_TIMER_TYPE_INPKILL:
1897 		SCTP_STAT_INCR(sctps_timoinpkill);
1898 		if (inp == NULL) {
1899 			break;
1900 		}
1901 		/*
1902 		 * special case, take away our increment since WE are the
1903 		 * killer
1904 		 */
1905 		SCTP_INP_DECR_REF(inp);
1906 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1907 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1908 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1909 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1910 		inp = NULL;
1911 		goto out_no_decr;
1912 	default:
1913 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1914 		    type);
1915 		break;
1916 	}
1917 #ifdef SCTP_AUDITING_ENABLED
1918 	sctp_audit_log(0xF1, (uint8_t) type);
1919 	if (inp)
1920 		sctp_auditing(5, inp, stcb, net);
1921 #endif
1922 	if ((did_output) && stcb) {
1923 		/*
1924 		 * Now we need to clean up the control chunk chain if an
1925 		 * ECNE is on it. It must be marked as UNSENT again so next
1926 		 * call will continue to send it until such time that we get
1927 		 * a CWR, to remove it. It is, however, less likely that we
1928 		 * will find a ecn echo on the chain though.
1929 		 */
1930 		sctp_fix_ecn_echo(&stcb->asoc);
1931 	}
1932 get_out:
1933 	if (stcb) {
1934 		SCTP_TCB_UNLOCK(stcb);
1935 	}
1936 out_decr:
1937 	if (inp) {
1938 		SCTP_INP_DECR_REF(inp);
1939 	}
1940 out_no_decr:
1941 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1942 	CURVNET_RESTORE();
1943 }
1944 
1945 void
1946 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1947     struct sctp_nets *net)
1948 {
1949 	uint32_t to_ticks;
1950 	struct sctp_timer *tmr;
1951 
1952 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1953 		return;
1954 
1955 	tmr = NULL;
1956 	if (stcb) {
1957 		SCTP_TCB_LOCK_ASSERT(stcb);
1958 	}
1959 	switch (t_type) {
1960 	case SCTP_TIMER_TYPE_ZERO_COPY:
1961 		tmr = &inp->sctp_ep.zero_copy_timer;
1962 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1963 		break;
1964 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1965 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1966 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1967 		break;
1968 	case SCTP_TIMER_TYPE_ADDR_WQ:
1969 		/* Only 1 tick away :-) */
1970 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1971 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_SEND:
1974 		/* Here we use the RTO timer */
1975 		{
1976 			int rto_val;
1977 
1978 			if ((stcb == NULL) || (net == NULL)) {
1979 				return;
1980 			}
1981 			tmr = &net->rxt_timer;
1982 			if (net->RTO == 0) {
1983 				rto_val = stcb->asoc.initial_rto;
1984 			} else {
1985 				rto_val = net->RTO;
1986 			}
1987 			to_ticks = MSEC_TO_TICKS(rto_val);
1988 		}
1989 		break;
1990 	case SCTP_TIMER_TYPE_INIT:
1991 		/*
1992 		 * Here we use the INIT timer default usually about 1
1993 		 * minute.
1994 		 */
1995 		if ((stcb == NULL) || (net == NULL)) {
1996 			return;
1997 		}
1998 		tmr = &net->rxt_timer;
1999 		if (net->RTO == 0) {
2000 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2001 		} else {
2002 			to_ticks = MSEC_TO_TICKS(net->RTO);
2003 		}
2004 		break;
2005 	case SCTP_TIMER_TYPE_RECV:
2006 		/*
2007 		 * Here we use the Delayed-Ack timer value from the inp
2008 		 * ususually about 200ms.
2009 		 */
2010 		if (stcb == NULL) {
2011 			return;
2012 		}
2013 		tmr = &stcb->asoc.dack_timer;
2014 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2015 		break;
2016 	case SCTP_TIMER_TYPE_SHUTDOWN:
2017 		/* Here we use the RTO of the destination. */
2018 		if ((stcb == NULL) || (net == NULL)) {
2019 			return;
2020 		}
2021 		if (net->RTO == 0) {
2022 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2023 		} else {
2024 			to_ticks = MSEC_TO_TICKS(net->RTO);
2025 		}
2026 		tmr = &net->rxt_timer;
2027 		break;
2028 	case SCTP_TIMER_TYPE_HEARTBEAT:
2029 		/*
2030 		 * the net is used here so that we can add in the RTO. Even
2031 		 * though we use a different timer. We also add the HB timer
2032 		 * PLUS a random jitter.
2033 		 */
2034 		if ((stcb == NULL) || (net == NULL)) {
2035 			return;
2036 		} else {
2037 			uint32_t rndval;
2038 			uint32_t jitter;
2039 
2040 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2041 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2042 				return;
2043 			}
2044 			if (net->RTO == 0) {
2045 				to_ticks = stcb->asoc.initial_rto;
2046 			} else {
2047 				to_ticks = net->RTO;
2048 			}
2049 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2050 			jitter = rndval % to_ticks;
2051 			if (jitter >= (to_ticks >> 1)) {
2052 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2053 			} else {
2054 				to_ticks = to_ticks - jitter;
2055 			}
2056 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2057 			    !(net->dest_state & SCTP_ADDR_PF)) {
2058 				to_ticks += net->heart_beat_delay;
2059 			}
2060 			/*
2061 			 * Now we must convert the to_ticks that are now in
2062 			 * ms to ticks.
2063 			 */
2064 			to_ticks = MSEC_TO_TICKS(to_ticks);
2065 			tmr = &net->hb_timer;
2066 		}
2067 		break;
2068 	case SCTP_TIMER_TYPE_COOKIE:
2069 		/*
2070 		 * Here we can use the RTO timer from the network since one
2071 		 * RTT was compelete. If a retran happened then we will be
2072 		 * using the RTO initial value.
2073 		 */
2074 		if ((stcb == NULL) || (net == NULL)) {
2075 			return;
2076 		}
2077 		if (net->RTO == 0) {
2078 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2079 		} else {
2080 			to_ticks = MSEC_TO_TICKS(net->RTO);
2081 		}
2082 		tmr = &net->rxt_timer;
2083 		break;
2084 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2085 		/*
2086 		 * nothing needed but the endpoint here ususually about 60
2087 		 * minutes.
2088 		 */
2089 		tmr = &inp->sctp_ep.signature_change;
2090 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2091 		break;
2092 	case SCTP_TIMER_TYPE_ASOCKILL:
2093 		if (stcb == NULL) {
2094 			return;
2095 		}
2096 		tmr = &stcb->asoc.strreset_timer;
2097 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2098 		break;
2099 	case SCTP_TIMER_TYPE_INPKILL:
2100 		/*
2101 		 * The inp is setup to die. We re-use the signature_chage
2102 		 * timer since that has stopped and we are in the GONE
2103 		 * state.
2104 		 */
2105 		tmr = &inp->sctp_ep.signature_change;
2106 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2107 		break;
2108 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2109 		/*
2110 		 * Here we use the value found in the EP for PMTU ususually
2111 		 * about 10 minutes.
2112 		 */
2113 		if ((stcb == NULL) || (net == NULL)) {
2114 			return;
2115 		}
2116 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2117 			return;
2118 		}
2119 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2120 		tmr = &net->pmtu_timer;
2121 		break;
2122 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2123 		/* Here we use the RTO of the destination */
2124 		if ((stcb == NULL) || (net == NULL)) {
2125 			return;
2126 		}
2127 		if (net->RTO == 0) {
2128 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2129 		} else {
2130 			to_ticks = MSEC_TO_TICKS(net->RTO);
2131 		}
2132 		tmr = &net->rxt_timer;
2133 		break;
2134 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2135 		/*
2136 		 * Here we use the endpoints shutdown guard timer usually
2137 		 * about 3 minutes.
2138 		 */
2139 		if (stcb == NULL) {
2140 			return;
2141 		}
2142 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2143 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2144 		} else {
2145 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2146 		}
2147 		tmr = &stcb->asoc.shut_guard_timer;
2148 		break;
2149 	case SCTP_TIMER_TYPE_STRRESET:
2150 		/*
2151 		 * Here the timer comes from the stcb but its value is from
2152 		 * the net's RTO.
2153 		 */
2154 		if ((stcb == NULL) || (net == NULL)) {
2155 			return;
2156 		}
2157 		if (net->RTO == 0) {
2158 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2159 		} else {
2160 			to_ticks = MSEC_TO_TICKS(net->RTO);
2161 		}
2162 		tmr = &stcb->asoc.strreset_timer;
2163 		break;
2164 	case SCTP_TIMER_TYPE_ASCONF:
2165 		/*
2166 		 * Here the timer comes from the stcb but its value is from
2167 		 * the net's RTO.
2168 		 */
2169 		if ((stcb == NULL) || (net == NULL)) {
2170 			return;
2171 		}
2172 		if (net->RTO == 0) {
2173 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2174 		} else {
2175 			to_ticks = MSEC_TO_TICKS(net->RTO);
2176 		}
2177 		tmr = &stcb->asoc.asconf_timer;
2178 		break;
2179 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2180 		if ((stcb == NULL) || (net != NULL)) {
2181 			return;
2182 		}
2183 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2184 		tmr = &stcb->asoc.delete_prim_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2187 		if (stcb == NULL) {
2188 			return;
2189 		}
2190 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2191 			/*
2192 			 * Really an error since stcb is NOT set to
2193 			 * autoclose
2194 			 */
2195 			return;
2196 		}
2197 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2198 		tmr = &stcb->asoc.autoclose_timer;
2199 		break;
2200 	default:
2201 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2202 		    __func__, t_type);
2203 		return;
2204 		break;
2205 	}
2206 	if ((to_ticks <= 0) || (tmr == NULL)) {
2207 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2208 		    __func__, t_type, to_ticks, (void *)tmr);
2209 		return;
2210 	}
2211 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2212 		/*
2213 		 * we do NOT allow you to have it already running. if it is
2214 		 * we leave the current one up unchanged
2215 		 */
2216 		return;
2217 	}
2218 	/* At this point we can proceed */
2219 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2220 		stcb->asoc.num_send_timers_up++;
2221 	}
2222 	tmr->stopped_from = 0;
2223 	tmr->type = t_type;
2224 	tmr->ep = (void *)inp;
2225 	tmr->tcb = (void *)stcb;
2226 	tmr->net = (void *)net;
2227 	tmr->self = (void *)tmr;
2228 	tmr->vnet = (void *)curvnet;
2229 	tmr->ticks = sctp_get_tick_count();
2230 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2231 	return;
2232 }
2233 
2234 void
2235 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2236     struct sctp_nets *net, uint32_t from)
2237 {
2238 	struct sctp_timer *tmr;
2239 
2240 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2241 	    (inp == NULL))
2242 		return;
2243 
2244 	tmr = NULL;
2245 	if (stcb) {
2246 		SCTP_TCB_LOCK_ASSERT(stcb);
2247 	}
2248 	switch (t_type) {
2249 	case SCTP_TIMER_TYPE_ZERO_COPY:
2250 		tmr = &inp->sctp_ep.zero_copy_timer;
2251 		break;
2252 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2253 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_ADDR_WQ:
2256 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2257 		break;
2258 	case SCTP_TIMER_TYPE_SEND:
2259 		if ((stcb == NULL) || (net == NULL)) {
2260 			return;
2261 		}
2262 		tmr = &net->rxt_timer;
2263 		break;
2264 	case SCTP_TIMER_TYPE_INIT:
2265 		if ((stcb == NULL) || (net == NULL)) {
2266 			return;
2267 		}
2268 		tmr = &net->rxt_timer;
2269 		break;
2270 	case SCTP_TIMER_TYPE_RECV:
2271 		if (stcb == NULL) {
2272 			return;
2273 		}
2274 		tmr = &stcb->asoc.dack_timer;
2275 		break;
2276 	case SCTP_TIMER_TYPE_SHUTDOWN:
2277 		if ((stcb == NULL) || (net == NULL)) {
2278 			return;
2279 		}
2280 		tmr = &net->rxt_timer;
2281 		break;
2282 	case SCTP_TIMER_TYPE_HEARTBEAT:
2283 		if ((stcb == NULL) || (net == NULL)) {
2284 			return;
2285 		}
2286 		tmr = &net->hb_timer;
2287 		break;
2288 	case SCTP_TIMER_TYPE_COOKIE:
2289 		if ((stcb == NULL) || (net == NULL)) {
2290 			return;
2291 		}
2292 		tmr = &net->rxt_timer;
2293 		break;
2294 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2295 		/* nothing needed but the endpoint here */
2296 		tmr = &inp->sctp_ep.signature_change;
2297 		/*
2298 		 * We re-use the newcookie timer for the INP kill timer. We
2299 		 * must assure that we do not kill it by accident.
2300 		 */
2301 		break;
2302 	case SCTP_TIMER_TYPE_ASOCKILL:
2303 		/*
2304 		 * Stop the asoc kill timer.
2305 		 */
2306 		if (stcb == NULL) {
2307 			return;
2308 		}
2309 		tmr = &stcb->asoc.strreset_timer;
2310 		break;
2311 
2312 	case SCTP_TIMER_TYPE_INPKILL:
2313 		/*
2314 		 * The inp is setup to die. We re-use the signature_chage
2315 		 * timer since that has stopped and we are in the GONE
2316 		 * state.
2317 		 */
2318 		tmr = &inp->sctp_ep.signature_change;
2319 		break;
2320 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2321 		if ((stcb == NULL) || (net == NULL)) {
2322 			return;
2323 		}
2324 		tmr = &net->pmtu_timer;
2325 		break;
2326 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2327 		if ((stcb == NULL) || (net == NULL)) {
2328 			return;
2329 		}
2330 		tmr = &net->rxt_timer;
2331 		break;
2332 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2333 		if (stcb == NULL) {
2334 			return;
2335 		}
2336 		tmr = &stcb->asoc.shut_guard_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_STRRESET:
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.strreset_timer;
2343 		break;
2344 	case SCTP_TIMER_TYPE_ASCONF:
2345 		if (stcb == NULL) {
2346 			return;
2347 		}
2348 		tmr = &stcb->asoc.asconf_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2351 		if (stcb == NULL) {
2352 			return;
2353 		}
2354 		tmr = &stcb->asoc.delete_prim_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2357 		if (stcb == NULL) {
2358 			return;
2359 		}
2360 		tmr = &stcb->asoc.autoclose_timer;
2361 		break;
2362 	default:
2363 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2364 		    __func__, t_type);
2365 		break;
2366 	}
2367 	if (tmr == NULL) {
2368 		return;
2369 	}
2370 	if ((tmr->type != t_type) && tmr->type) {
2371 		/*
2372 		 * Ok we have a timer that is under joint use. Cookie timer
2373 		 * per chance with the SEND timer. We therefore are NOT
2374 		 * running the timer that the caller wants stopped.  So just
2375 		 * return.
2376 		 */
2377 		return;
2378 	}
2379 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2380 		stcb->asoc.num_send_timers_up--;
2381 		if (stcb->asoc.num_send_timers_up < 0) {
2382 			stcb->asoc.num_send_timers_up = 0;
2383 		}
2384 	}
2385 	tmr->self = NULL;
2386 	tmr->stopped_from = from;
2387 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2388 	return;
2389 }
2390 
2391 uint32_t
2392 sctp_calculate_len(struct mbuf *m)
2393 {
2394 	uint32_t tlen = 0;
2395 	struct mbuf *at;
2396 
2397 	at = m;
2398 	while (at) {
2399 		tlen += SCTP_BUF_LEN(at);
2400 		at = SCTP_BUF_NEXT(at);
2401 	}
2402 	return (tlen);
2403 }
2404 
2405 void
2406 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2407     struct sctp_association *asoc, uint32_t mtu)
2408 {
2409 	/*
2410 	 * Reset the P-MTU size on this association, this involves changing
2411 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2412 	 * allow the DF flag to be cleared.
2413 	 */
2414 	struct sctp_tmit_chunk *chk;
2415 	unsigned int eff_mtu, ovh;
2416 
2417 	asoc->smallest_mtu = mtu;
2418 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2419 		ovh = SCTP_MIN_OVERHEAD;
2420 	} else {
2421 		ovh = SCTP_MIN_V4_OVERHEAD;
2422 	}
2423 	eff_mtu = mtu - ovh;
2424 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2425 		if (chk->send_size > eff_mtu) {
2426 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2427 		}
2428 	}
2429 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2430 		if (chk->send_size > eff_mtu) {
2431 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432 		}
2433 	}
2434 }
2435 
2436 
2437 /*
2438  * given an association and starting time of the current RTT period return
2439  * RTO in number of msecs net should point to the current network
2440  */
2441 
2442 uint32_t
2443 sctp_calculate_rto(struct sctp_tcb *stcb,
2444     struct sctp_association *asoc,
2445     struct sctp_nets *net,
2446     struct timeval *told,
2447     int safe, int rtt_from_sack)
2448 {
2449 	/*-
2450 	 * given an association and the starting time of the current RTT
2451 	 * period (in value1/value2) return RTO in number of msecs.
2452 	 */
2453 	int32_t rtt;		/* RTT in ms */
2454 	uint32_t new_rto;
2455 	int first_measure = 0;
2456 	struct timeval now, then, *old;
2457 
2458 	/* Copy it out for sparc64 */
2459 	if (safe == sctp_align_unsafe_makecopy) {
2460 		old = &then;
2461 		memcpy(&then, told, sizeof(struct timeval));
2462 	} else if (safe == sctp_align_safe_nocopy) {
2463 		old = told;
2464 	} else {
2465 		/* error */
2466 		SCTP_PRINTF("Huh, bad rto calc call\n");
2467 		return (0);
2468 	}
2469 	/************************/
2470 	/* 1. calculate new RTT */
2471 	/************************/
2472 	/* get the current time */
2473 	if (stcb->asoc.use_precise_time) {
2474 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2475 	} else {
2476 		(void)SCTP_GETTIME_TIMEVAL(&now);
2477 	}
2478 	timevalsub(&now, old);
2479 	/* store the current RTT in us */
2480 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2481 	        (uint64_t) now.tv_usec;
2482 
2483 	/* compute rtt in ms */
2484 	rtt = (int32_t) (net->rtt / 1000);
2485 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2486 		/* Tell the CC module that a new update has just occurred
2487 		 * from a sack */
2488 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2489 	}
2490 	/*
2491 	 * Do we need to determine the lan? We do this only on sacks i.e.
2492 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2493 	 */
2494 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2495 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2496 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2497 			net->lan_type = SCTP_LAN_INTERNET;
2498 		} else {
2499 			net->lan_type = SCTP_LAN_LOCAL;
2500 		}
2501 	}
2502 	/***************************/
2503 	/* 2. update RTTVAR & SRTT */
2504 	/***************************/
2505 	/*-
2506 	 * Compute the scaled average lastsa and the
2507 	 * scaled variance lastsv as described in van Jacobson
2508 	 * Paper "Congestion Avoidance and Control", Annex A.
2509 	 *
2510 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2511 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2512 	 */
2513 	if (net->RTO_measured) {
2514 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2515 		net->lastsa += rtt;
2516 		if (rtt < 0) {
2517 			rtt = -rtt;
2518 		}
2519 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2520 		net->lastsv += rtt;
2521 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2522 			rto_logging(net, SCTP_LOG_RTTVAR);
2523 		}
2524 	} else {
2525 		/* First RTO measurment */
2526 		net->RTO_measured = 1;
2527 		first_measure = 1;
2528 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2529 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2530 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2531 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2532 		}
2533 	}
2534 	if (net->lastsv == 0) {
2535 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2536 	}
2537 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2538 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2539 	    (stcb->asoc.sat_network_lockout == 0)) {
2540 		stcb->asoc.sat_network = 1;
2541 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2542 		stcb->asoc.sat_network = 0;
2543 		stcb->asoc.sat_network_lockout = 1;
2544 	}
2545 	/* bound it, per C6/C7 in Section 5.3.1 */
2546 	if (new_rto < stcb->asoc.minrto) {
2547 		new_rto = stcb->asoc.minrto;
2548 	}
2549 	if (new_rto > stcb->asoc.maxrto) {
2550 		new_rto = stcb->asoc.maxrto;
2551 	}
2552 	/* we are now returning the RTO */
2553 	return (new_rto);
2554 }
2555 
2556 /*
2557  * return a pointer to a contiguous piece of data from the given mbuf chain
2558  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2559  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2560  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2561  */
2562 caddr_t
2563 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2564 {
2565 	uint32_t count;
2566 	uint8_t *ptr;
2567 
2568 	ptr = in_ptr;
2569 	if ((off < 0) || (len <= 0))
2570 		return (NULL);
2571 
2572 	/* find the desired start location */
2573 	while ((m != NULL) && (off > 0)) {
2574 		if (off < SCTP_BUF_LEN(m))
2575 			break;
2576 		off -= SCTP_BUF_LEN(m);
2577 		m = SCTP_BUF_NEXT(m);
2578 	}
2579 	if (m == NULL)
2580 		return (NULL);
2581 
2582 	/* is the current mbuf large enough (eg. contiguous)? */
2583 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2584 		return (mtod(m, caddr_t)+off);
2585 	} else {
2586 		/* else, it spans more than one mbuf, so save a temp copy... */
2587 		while ((m != NULL) && (len > 0)) {
2588 			count = min(SCTP_BUF_LEN(m) - off, len);
2589 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2590 			len -= count;
2591 			ptr += count;
2592 			off = 0;
2593 			m = SCTP_BUF_NEXT(m);
2594 		}
2595 		if ((m == NULL) && (len > 0))
2596 			return (NULL);
2597 		else
2598 			return ((caddr_t)in_ptr);
2599 	}
2600 }
2601 
2602 
2603 
2604 struct sctp_paramhdr *
2605 sctp_get_next_param(struct mbuf *m,
2606     int offset,
2607     struct sctp_paramhdr *pull,
2608     int pull_limit)
2609 {
2610 	/* This just provides a typed signature to Peter's Pull routine */
2611 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2612 	    (uint8_t *) pull));
2613 }
2614 
2615 
2616 struct mbuf *
2617 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2618 {
2619 	struct mbuf *m_last;
2620 	caddr_t dp;
2621 
2622 	if (padlen > 3) {
2623 		return (NULL);
2624 	}
2625 	if (padlen <= M_TRAILINGSPACE(m)) {
2626 		/*
2627 		 * The easy way. We hope the majority of the time we hit
2628 		 * here :)
2629 		 */
2630 		m_last = m;
2631 	} else {
2632 		/* Hard way we must grow the mbuf chain */
2633 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2634 		if (m_last == NULL) {
2635 			return (NULL);
2636 		}
2637 		SCTP_BUF_LEN(m_last) = 0;
2638 		SCTP_BUF_NEXT(m_last) = NULL;
2639 		SCTP_BUF_NEXT(m) = m_last;
2640 	}
2641 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2642 	SCTP_BUF_LEN(m_last) += padlen;
2643 	memset(dp, 0, padlen);
2644 	return (m_last);
2645 }
2646 
2647 struct mbuf *
2648 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2649 {
2650 	/* find the last mbuf in chain and pad it */
2651 	struct mbuf *m_at;
2652 
2653 	if (last_mbuf != NULL) {
2654 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2655 	} else {
2656 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2657 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2658 				return (sctp_add_pad_tombuf(m_at, padval));
2659 			}
2660 		}
2661 	}
2662 	return (NULL);
2663 }
2664 
2665 static void
2666 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2667     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2668 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2669     SCTP_UNUSED
2670 #endif
2671 )
2672 {
2673 	struct mbuf *m_notify;
2674 	struct sctp_assoc_change *sac;
2675 	struct sctp_queued_to_read *control;
2676 	unsigned int notif_len;
2677 	uint16_t abort_len;
2678 	unsigned int i;
2679 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2680 	struct socket *so;
2681 #endif
2682 
2683 	if (stcb == NULL) {
2684 		return;
2685 	}
2686 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2687 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2688 		if (abort != NULL) {
2689 			abort_len = ntohs(abort->ch.chunk_length);
2690 		} else {
2691 			abort_len = 0;
2692 		}
2693 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2694 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2695 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2696 			notif_len += abort_len;
2697 		}
2698 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2699 		if (m_notify == NULL) {
2700 			/* Retry with smaller value. */
2701 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2702 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2703 			if (m_notify == NULL) {
2704 				goto set_error;
2705 			}
2706 		}
2707 		SCTP_BUF_NEXT(m_notify) = NULL;
2708 		sac = mtod(m_notify, struct sctp_assoc_change *);
2709 		memset(sac, 0, notif_len);
2710 		sac->sac_type = SCTP_ASSOC_CHANGE;
2711 		sac->sac_flags = 0;
2712 		sac->sac_length = sizeof(struct sctp_assoc_change);
2713 		sac->sac_state = state;
2714 		sac->sac_error = error;
2715 		/* XXX verify these stream counts */
2716 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2717 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2718 		sac->sac_assoc_id = sctp_get_associd(stcb);
2719 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2720 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2721 				i = 0;
2722 				if (stcb->asoc.prsctp_supported == 1) {
2723 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2724 				}
2725 				if (stcb->asoc.auth_supported == 1) {
2726 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2727 				}
2728 				if (stcb->asoc.asconf_supported == 1) {
2729 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2730 				}
2731 				if (stcb->asoc.idata_supported == 1) {
2732 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2733 				}
2734 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2735 				if (stcb->asoc.reconfig_supported == 1) {
2736 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2737 				}
2738 				sac->sac_length += i;
2739 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2740 				memcpy(sac->sac_info, abort, abort_len);
2741 				sac->sac_length += abort_len;
2742 			}
2743 		}
2744 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2745 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2746 		    0, 0, stcb->asoc.context, 0, 0, 0,
2747 		    m_notify);
2748 		if (control != NULL) {
2749 			control->length = SCTP_BUF_LEN(m_notify);
2750 			/* not that we need this */
2751 			control->tail_mbuf = m_notify;
2752 			control->spec_flags = M_NOTIFICATION;
2753 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2754 			    control,
2755 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2756 			    so_locked);
2757 		} else {
2758 			sctp_m_freem(m_notify);
2759 		}
2760 	}
2761 	/*
2762 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2763 	 * comes in.
2764 	 */
2765 set_error:
2766 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2767 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2768 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2769 		SOCK_LOCK(stcb->sctp_socket);
2770 		if (from_peer) {
2771 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2772 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2773 				stcb->sctp_socket->so_error = ECONNREFUSED;
2774 			} else {
2775 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2776 				stcb->sctp_socket->so_error = ECONNRESET;
2777 			}
2778 		} else {
2779 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2780 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2781 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2782 				stcb->sctp_socket->so_error = ETIMEDOUT;
2783 			} else {
2784 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2785 				stcb->sctp_socket->so_error = ECONNABORTED;
2786 			}
2787 		}
2788 	}
2789 	/* Wake ANY sleepers */
2790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2791 	so = SCTP_INP_SO(stcb->sctp_ep);
2792 	if (!so_locked) {
2793 		atomic_add_int(&stcb->asoc.refcnt, 1);
2794 		SCTP_TCB_UNLOCK(stcb);
2795 		SCTP_SOCKET_LOCK(so, 1);
2796 		SCTP_TCB_LOCK(stcb);
2797 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2798 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2799 			SCTP_SOCKET_UNLOCK(so, 1);
2800 			return;
2801 		}
2802 	}
2803 #endif
2804 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2805 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2806 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2807 		socantrcvmore_locked(stcb->sctp_socket);
2808 	}
2809 	sorwakeup(stcb->sctp_socket);
2810 	sowwakeup(stcb->sctp_socket);
2811 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2812 	if (!so_locked) {
2813 		SCTP_SOCKET_UNLOCK(so, 1);
2814 	}
2815 #endif
2816 }
2817 
2818 static void
2819 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2820     struct sockaddr *sa, uint32_t error, int so_locked
2821 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2822     SCTP_UNUSED
2823 #endif
2824 )
2825 {
2826 	struct mbuf *m_notify;
2827 	struct sctp_paddr_change *spc;
2828 	struct sctp_queued_to_read *control;
2829 
2830 	if ((stcb == NULL) ||
2831 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2832 		/* event not enabled */
2833 		return;
2834 	}
2835 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2836 	if (m_notify == NULL)
2837 		return;
2838 	SCTP_BUF_LEN(m_notify) = 0;
2839 	spc = mtod(m_notify, struct sctp_paddr_change *);
2840 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2841 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2842 	spc->spc_flags = 0;
2843 	spc->spc_length = sizeof(struct sctp_paddr_change);
2844 	switch (sa->sa_family) {
2845 #ifdef INET
2846 	case AF_INET:
2847 #ifdef INET6
2848 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2849 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2850 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2851 		} else {
2852 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2853 		}
2854 #else
2855 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2856 #endif
2857 		break;
2858 #endif
2859 #ifdef INET6
2860 	case AF_INET6:
2861 		{
2862 			struct sockaddr_in6 *sin6;
2863 
2864 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2865 
2866 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2867 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2868 				if (sin6->sin6_scope_id == 0) {
2869 					/* recover scope_id for user */
2870 					(void)sa6_recoverscope(sin6);
2871 				} else {
2872 					/* clear embedded scope_id for user */
2873 					in6_clearscope(&sin6->sin6_addr);
2874 				}
2875 			}
2876 			break;
2877 		}
2878 #endif
2879 	default:
2880 		/* TSNH */
2881 		break;
2882 	}
2883 	spc->spc_state = state;
2884 	spc->spc_error = error;
2885 	spc->spc_assoc_id = sctp_get_associd(stcb);
2886 
2887 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2888 	SCTP_BUF_NEXT(m_notify) = NULL;
2889 
2890 	/* append to socket */
2891 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2892 	    0, 0, stcb->asoc.context, 0, 0, 0,
2893 	    m_notify);
2894 	if (control == NULL) {
2895 		/* no memory */
2896 		sctp_m_freem(m_notify);
2897 		return;
2898 	}
2899 	control->length = SCTP_BUF_LEN(m_notify);
2900 	control->spec_flags = M_NOTIFICATION;
2901 	/* not that we need this */
2902 	control->tail_mbuf = m_notify;
2903 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2904 	    control,
2905 	    &stcb->sctp_socket->so_rcv, 1,
2906 	    SCTP_READ_LOCK_NOT_HELD,
2907 	    so_locked);
2908 }
2909 
2910 
2911 static void
2912 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2913     struct sctp_tmit_chunk *chk, int so_locked
2914 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2915     SCTP_UNUSED
2916 #endif
2917 )
2918 {
2919 	struct mbuf *m_notify;
2920 	struct sctp_send_failed *ssf;
2921 	struct sctp_send_failed_event *ssfe;
2922 	struct sctp_queued_to_read *control;
2923 	struct sctp_chunkhdr *chkhdr;
2924 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2925 
2926 	if ((stcb == NULL) ||
2927 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2928 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2929 		/* event not enabled */
2930 		return;
2931 	}
2932 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2933 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2934 	} else {
2935 		notifhdr_len = sizeof(struct sctp_send_failed);
2936 	}
2937 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2938 	if (m_notify == NULL)
2939 		/* no space left */
2940 		return;
2941 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2942 	if (stcb->asoc.idata_supported) {
2943 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2944 	} else {
2945 		chkhdr_len = sizeof(struct sctp_data_chunk);
2946 	}
2947 	/* Use some defaults in case we can't access the chunk header */
2948 	if (chk->send_size >= chkhdr_len) {
2949 		payload_len = chk->send_size - chkhdr_len;
2950 	} else {
2951 		payload_len = 0;
2952 	}
2953 	padding_len = 0;
2954 	if (chk->data != NULL) {
2955 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2956 		if (chkhdr != NULL) {
2957 			chk_len = ntohs(chkhdr->chunk_length);
2958 			if ((chk_len >= chkhdr_len) &&
2959 			    (chk->send_size >= chk_len) &&
2960 			    (chk->send_size - chk_len < 4)) {
2961 				padding_len = chk->send_size - chk_len;
2962 				payload_len = chk->send_size - chkhdr_len - padding_len;
2963 			}
2964 		}
2965 	}
2966 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2967 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2968 		memset(ssfe, 0, notifhdr_len);
2969 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2970 		if (sent) {
2971 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2972 		} else {
2973 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2974 		}
2975 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + payload_len);
2976 		ssfe->ssfe_error = error;
2977 		/* not exactly what the user sent in, but should be close :) */
2978 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2979 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2980 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2981 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2982 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2983 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2984 	} else {
2985 		ssf = mtod(m_notify, struct sctp_send_failed *);
2986 		memset(ssf, 0, notifhdr_len);
2987 		ssf->ssf_type = SCTP_SEND_FAILED;
2988 		if (sent) {
2989 			ssf->ssf_flags = SCTP_DATA_SENT;
2990 		} else {
2991 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2992 		}
2993 		ssf->ssf_length = (uint32_t) (notifhdr_len + payload_len);
2994 		ssf->ssf_error = error;
2995 		/* not exactly what the user sent in, but should be close :) */
2996 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2997 		ssf->ssf_info.sinfo_ssn = (uint16_t) chk->rec.data.mid;
2998 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2999 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3000 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3001 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3002 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3003 	}
3004 	if (chk->data != NULL) {
3005 		/* Trim off the sctp chunk header (it should be there) */
3006 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3007 			m_adj(chk->data, chkhdr_len);
3008 			m_adj(chk->data, -padding_len);
3009 			sctp_mbuf_crush(chk->data);
3010 			chk->send_size -= (chkhdr_len + padding_len);
3011 		}
3012 	}
3013 	SCTP_BUF_NEXT(m_notify) = chk->data;
3014 	/* Steal off the mbuf */
3015 	chk->data = NULL;
3016 	/*
3017 	 * For this case, we check the actual socket buffer, since the assoc
3018 	 * is going away we don't want to overfill the socket buffer for a
3019 	 * non-reader
3020 	 */
3021 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3022 		sctp_m_freem(m_notify);
3023 		return;
3024 	}
3025 	/* append to socket */
3026 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3027 	    0, 0, stcb->asoc.context, 0, 0, 0,
3028 	    m_notify);
3029 	if (control == NULL) {
3030 		/* no memory */
3031 		sctp_m_freem(m_notify);
3032 		return;
3033 	}
3034 	control->spec_flags = M_NOTIFICATION;
3035 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3036 	    control,
3037 	    &stcb->sctp_socket->so_rcv, 1,
3038 	    SCTP_READ_LOCK_NOT_HELD,
3039 	    so_locked);
3040 }
3041 
3042 
3043 static void
3044 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3045     struct sctp_stream_queue_pending *sp, int so_locked
3046 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3047     SCTP_UNUSED
3048 #endif
3049 )
3050 {
3051 	struct mbuf *m_notify;
3052 	struct sctp_send_failed *ssf;
3053 	struct sctp_send_failed_event *ssfe;
3054 	struct sctp_queued_to_read *control;
3055 	int notifhdr_len;
3056 
3057 	if ((stcb == NULL) ||
3058 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3059 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3060 		/* event not enabled */
3061 		return;
3062 	}
3063 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3064 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3065 	} else {
3066 		notifhdr_len = sizeof(struct sctp_send_failed);
3067 	}
3068 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3069 	if (m_notify == NULL) {
3070 		/* no space left */
3071 		return;
3072 	}
3073 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3074 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3075 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3076 		memset(ssfe, 0, notifhdr_len);
3077 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3078 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3079 		ssfe->ssfe_length = (uint32_t) (notifhdr_len + sp->length);
3080 		ssfe->ssfe_error = error;
3081 		/* not exactly what the user sent in, but should be close :) */
3082 		ssfe->ssfe_info.snd_sid = sp->sid;
3083 		if (sp->some_taken) {
3084 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3085 		} else {
3086 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3087 		}
3088 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3089 		ssfe->ssfe_info.snd_context = sp->context;
3090 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3091 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3092 	} else {
3093 		ssf = mtod(m_notify, struct sctp_send_failed *);
3094 		memset(ssf, 0, notifhdr_len);
3095 		ssf->ssf_type = SCTP_SEND_FAILED;
3096 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3097 		ssf->ssf_length = (uint32_t) (notifhdr_len + sp->length);
3098 		ssf->ssf_error = error;
3099 		/* not exactly what the user sent in, but should be close :) */
3100 		ssf->ssf_info.sinfo_stream = sp->sid;
3101 		ssf->ssf_info.sinfo_ssn = 0;
3102 		if (sp->some_taken) {
3103 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3104 		} else {
3105 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3106 		}
3107 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3108 		ssf->ssf_info.sinfo_context = sp->context;
3109 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3110 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3111 	}
3112 	SCTP_BUF_NEXT(m_notify) = sp->data;
3113 
3114 	/* Steal off the mbuf */
3115 	sp->data = NULL;
3116 	/*
3117 	 * For this case, we check the actual socket buffer, since the assoc
3118 	 * is going away we don't want to overfill the socket buffer for a
3119 	 * non-reader
3120 	 */
3121 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3122 		sctp_m_freem(m_notify);
3123 		return;
3124 	}
3125 	/* append to socket */
3126 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3127 	    0, 0, stcb->asoc.context, 0, 0, 0,
3128 	    m_notify);
3129 	if (control == NULL) {
3130 		/* no memory */
3131 		sctp_m_freem(m_notify);
3132 		return;
3133 	}
3134 	control->spec_flags = M_NOTIFICATION;
3135 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3136 	    control,
3137 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3138 }
3139 
3140 
3141 
3142 static void
3143 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3144 {
3145 	struct mbuf *m_notify;
3146 	struct sctp_adaptation_event *sai;
3147 	struct sctp_queued_to_read *control;
3148 
3149 	if ((stcb == NULL) ||
3150 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3151 		/* event not enabled */
3152 		return;
3153 	}
3154 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3155 	if (m_notify == NULL)
3156 		/* no space left */
3157 		return;
3158 	SCTP_BUF_LEN(m_notify) = 0;
3159 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3160 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3161 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3162 	sai->sai_flags = 0;
3163 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3164 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3165 	sai->sai_assoc_id = sctp_get_associd(stcb);
3166 
3167 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3168 	SCTP_BUF_NEXT(m_notify) = NULL;
3169 
3170 	/* append to socket */
3171 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3172 	    0, 0, stcb->asoc.context, 0, 0, 0,
3173 	    m_notify);
3174 	if (control == NULL) {
3175 		/* no memory */
3176 		sctp_m_freem(m_notify);
3177 		return;
3178 	}
3179 	control->length = SCTP_BUF_LEN(m_notify);
3180 	control->spec_flags = M_NOTIFICATION;
3181 	/* not that we need this */
3182 	control->tail_mbuf = m_notify;
3183 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3184 	    control,
3185 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3186 }
3187 
3188 /* This always must be called with the read-queue LOCKED in the INP */
3189 static void
3190 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3191     uint32_t val, int so_locked
3192 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3193     SCTP_UNUSED
3194 #endif
3195 )
3196 {
3197 	struct mbuf *m_notify;
3198 	struct sctp_pdapi_event *pdapi;
3199 	struct sctp_queued_to_read *control;
3200 	struct sockbuf *sb;
3201 
3202 	if ((stcb == NULL) ||
3203 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3204 		/* event not enabled */
3205 		return;
3206 	}
3207 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3208 		return;
3209 	}
3210 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3211 	if (m_notify == NULL)
3212 		/* no space left */
3213 		return;
3214 	SCTP_BUF_LEN(m_notify) = 0;
3215 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3216 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3217 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3218 	pdapi->pdapi_flags = 0;
3219 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3220 	pdapi->pdapi_indication = error;
3221 	pdapi->pdapi_stream = (val >> 16);
3222 	pdapi->pdapi_seq = (val & 0x0000ffff);
3223 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3224 
3225 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3226 	SCTP_BUF_NEXT(m_notify) = NULL;
3227 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3228 	    0, 0, stcb->asoc.context, 0, 0, 0,
3229 	    m_notify);
3230 	if (control == NULL) {
3231 		/* no memory */
3232 		sctp_m_freem(m_notify);
3233 		return;
3234 	}
3235 	control->spec_flags = M_NOTIFICATION;
3236 	control->length = SCTP_BUF_LEN(m_notify);
3237 	/* not that we need this */
3238 	control->tail_mbuf = m_notify;
3239 	control->held_length = 0;
3240 	control->length = 0;
3241 	sb = &stcb->sctp_socket->so_rcv;
3242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3243 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3244 	}
3245 	sctp_sballoc(stcb, sb, m_notify);
3246 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3247 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3248 	}
3249 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3250 	control->end_added = 1;
3251 	if (stcb->asoc.control_pdapi)
3252 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3253 	else {
3254 		/* we really should not see this case */
3255 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3256 	}
3257 	if (stcb->sctp_ep && stcb->sctp_socket) {
3258 		/* This should always be the case */
3259 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3260 		struct socket *so;
3261 
3262 		so = SCTP_INP_SO(stcb->sctp_ep);
3263 		if (!so_locked) {
3264 			atomic_add_int(&stcb->asoc.refcnt, 1);
3265 			SCTP_TCB_UNLOCK(stcb);
3266 			SCTP_SOCKET_LOCK(so, 1);
3267 			SCTP_TCB_LOCK(stcb);
3268 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3269 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3270 				SCTP_SOCKET_UNLOCK(so, 1);
3271 				return;
3272 			}
3273 		}
3274 #endif
3275 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3276 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3277 		if (!so_locked) {
3278 			SCTP_SOCKET_UNLOCK(so, 1);
3279 		}
3280 #endif
3281 	}
3282 }
3283 
3284 static void
3285 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3286 {
3287 	struct mbuf *m_notify;
3288 	struct sctp_shutdown_event *sse;
3289 	struct sctp_queued_to_read *control;
3290 
3291 	/*
3292 	 * For TCP model AND UDP connected sockets we will send an error up
3293 	 * when an SHUTDOWN completes
3294 	 */
3295 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3296 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3297 		/* mark socket closed for read/write and wakeup! */
3298 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3299 		struct socket *so;
3300 
3301 		so = SCTP_INP_SO(stcb->sctp_ep);
3302 		atomic_add_int(&stcb->asoc.refcnt, 1);
3303 		SCTP_TCB_UNLOCK(stcb);
3304 		SCTP_SOCKET_LOCK(so, 1);
3305 		SCTP_TCB_LOCK(stcb);
3306 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3307 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3308 			SCTP_SOCKET_UNLOCK(so, 1);
3309 			return;
3310 		}
3311 #endif
3312 		socantsendmore(stcb->sctp_socket);
3313 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3314 		SCTP_SOCKET_UNLOCK(so, 1);
3315 #endif
3316 	}
3317 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3318 		/* event not enabled */
3319 		return;
3320 	}
3321 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3322 	if (m_notify == NULL)
3323 		/* no space left */
3324 		return;
3325 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3326 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3327 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3328 	sse->sse_flags = 0;
3329 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3330 	sse->sse_assoc_id = sctp_get_associd(stcb);
3331 
3332 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3333 	SCTP_BUF_NEXT(m_notify) = NULL;
3334 
3335 	/* append to socket */
3336 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3337 	    0, 0, stcb->asoc.context, 0, 0, 0,
3338 	    m_notify);
3339 	if (control == NULL) {
3340 		/* no memory */
3341 		sctp_m_freem(m_notify);
3342 		return;
3343 	}
3344 	control->spec_flags = M_NOTIFICATION;
3345 	control->length = SCTP_BUF_LEN(m_notify);
3346 	/* not that we need this */
3347 	control->tail_mbuf = m_notify;
3348 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3349 	    control,
3350 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3351 }
3352 
3353 static void
3354 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3355     int so_locked
3356 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3357     SCTP_UNUSED
3358 #endif
3359 )
3360 {
3361 	struct mbuf *m_notify;
3362 	struct sctp_sender_dry_event *event;
3363 	struct sctp_queued_to_read *control;
3364 
3365 	if ((stcb == NULL) ||
3366 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3367 		/* event not enabled */
3368 		return;
3369 	}
3370 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3371 	if (m_notify == NULL) {
3372 		/* no space left */
3373 		return;
3374 	}
3375 	SCTP_BUF_LEN(m_notify) = 0;
3376 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3377 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3378 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3379 	event->sender_dry_flags = 0;
3380 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3381 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3382 
3383 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3384 	SCTP_BUF_NEXT(m_notify) = NULL;
3385 
3386 	/* append to socket */
3387 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3388 	    0, 0, stcb->asoc.context, 0, 0, 0,
3389 	    m_notify);
3390 	if (control == NULL) {
3391 		/* no memory */
3392 		sctp_m_freem(m_notify);
3393 		return;
3394 	}
3395 	control->length = SCTP_BUF_LEN(m_notify);
3396 	control->spec_flags = M_NOTIFICATION;
3397 	/* not that we need this */
3398 	control->tail_mbuf = m_notify;
3399 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3400 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3401 }
3402 
3403 
3404 void
3405 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3406 {
3407 	struct mbuf *m_notify;
3408 	struct sctp_queued_to_read *control;
3409 	struct sctp_stream_change_event *stradd;
3410 
3411 	if ((stcb == NULL) ||
3412 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3413 		/* event not enabled */
3414 		return;
3415 	}
3416 	if ((stcb->asoc.peer_req_out) && flag) {
3417 		/* Peer made the request, don't tell the local user */
3418 		stcb->asoc.peer_req_out = 0;
3419 		return;
3420 	}
3421 	stcb->asoc.peer_req_out = 0;
3422 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3423 	if (m_notify == NULL)
3424 		/* no space left */
3425 		return;
3426 	SCTP_BUF_LEN(m_notify) = 0;
3427 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3428 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3429 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3430 	stradd->strchange_flags = flag;
3431 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3432 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3433 	stradd->strchange_instrms = numberin;
3434 	stradd->strchange_outstrms = numberout;
3435 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3436 	SCTP_BUF_NEXT(m_notify) = NULL;
3437 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3438 		/* no space */
3439 		sctp_m_freem(m_notify);
3440 		return;
3441 	}
3442 	/* append to socket */
3443 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3444 	    0, 0, stcb->asoc.context, 0, 0, 0,
3445 	    m_notify);
3446 	if (control == NULL) {
3447 		/* no memory */
3448 		sctp_m_freem(m_notify);
3449 		return;
3450 	}
3451 	control->spec_flags = M_NOTIFICATION;
3452 	control->length = SCTP_BUF_LEN(m_notify);
3453 	/* not that we need this */
3454 	control->tail_mbuf = m_notify;
3455 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3456 	    control,
3457 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3458 }
3459 
3460 void
3461 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3462 {
3463 	struct mbuf *m_notify;
3464 	struct sctp_queued_to_read *control;
3465 	struct sctp_assoc_reset_event *strasoc;
3466 
3467 	if ((stcb == NULL) ||
3468 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3469 		/* event not enabled */
3470 		return;
3471 	}
3472 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3473 	if (m_notify == NULL)
3474 		/* no space left */
3475 		return;
3476 	SCTP_BUF_LEN(m_notify) = 0;
3477 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3478 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3479 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3480 	strasoc->assocreset_flags = flag;
3481 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3482 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3483 	strasoc->assocreset_local_tsn = sending_tsn;
3484 	strasoc->assocreset_remote_tsn = recv_tsn;
3485 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3486 	SCTP_BUF_NEXT(m_notify) = NULL;
3487 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3488 		/* no space */
3489 		sctp_m_freem(m_notify);
3490 		return;
3491 	}
3492 	/* append to socket */
3493 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3494 	    0, 0, stcb->asoc.context, 0, 0, 0,
3495 	    m_notify);
3496 	if (control == NULL) {
3497 		/* no memory */
3498 		sctp_m_freem(m_notify);
3499 		return;
3500 	}
3501 	control->spec_flags = M_NOTIFICATION;
3502 	control->length = SCTP_BUF_LEN(m_notify);
3503 	/* not that we need this */
3504 	control->tail_mbuf = m_notify;
3505 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3506 	    control,
3507 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3508 }
3509 
3510 
3511 
3512 static void
3513 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3514     int number_entries, uint16_t * list, int flag)
3515 {
3516 	struct mbuf *m_notify;
3517 	struct sctp_queued_to_read *control;
3518 	struct sctp_stream_reset_event *strreset;
3519 	int len;
3520 
3521 	if ((stcb == NULL) ||
3522 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3523 		/* event not enabled */
3524 		return;
3525 	}
3526 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3527 	if (m_notify == NULL)
3528 		/* no space left */
3529 		return;
3530 	SCTP_BUF_LEN(m_notify) = 0;
3531 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3532 	if (len > M_TRAILINGSPACE(m_notify)) {
3533 		/* never enough room */
3534 		sctp_m_freem(m_notify);
3535 		return;
3536 	}
3537 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3538 	memset(strreset, 0, len);
3539 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3540 	strreset->strreset_flags = flag;
3541 	strreset->strreset_length = len;
3542 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3543 	if (number_entries) {
3544 		int i;
3545 
3546 		for (i = 0; i < number_entries; i++) {
3547 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3548 		}
3549 	}
3550 	SCTP_BUF_LEN(m_notify) = len;
3551 	SCTP_BUF_NEXT(m_notify) = NULL;
3552 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3553 		/* no space */
3554 		sctp_m_freem(m_notify);
3555 		return;
3556 	}
3557 	/* append to socket */
3558 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3559 	    0, 0, stcb->asoc.context, 0, 0, 0,
3560 	    m_notify);
3561 	if (control == NULL) {
3562 		/* no memory */
3563 		sctp_m_freem(m_notify);
3564 		return;
3565 	}
3566 	control->spec_flags = M_NOTIFICATION;
3567 	control->length = SCTP_BUF_LEN(m_notify);
3568 	/* not that we need this */
3569 	control->tail_mbuf = m_notify;
3570 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3571 	    control,
3572 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3573 }
3574 
3575 
3576 static void
3577 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3578 {
3579 	struct mbuf *m_notify;
3580 	struct sctp_remote_error *sre;
3581 	struct sctp_queued_to_read *control;
3582 	unsigned int notif_len;
3583 	uint16_t chunk_len;
3584 
3585 	if ((stcb == NULL) ||
3586 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3587 		return;
3588 	}
3589 	if (chunk != NULL) {
3590 		chunk_len = ntohs(chunk->ch.chunk_length);
3591 	} else {
3592 		chunk_len = 0;
3593 	}
3594 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3595 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3596 	if (m_notify == NULL) {
3597 		/* Retry with smaller value. */
3598 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3599 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3600 		if (m_notify == NULL) {
3601 			return;
3602 		}
3603 	}
3604 	SCTP_BUF_NEXT(m_notify) = NULL;
3605 	sre = mtod(m_notify, struct sctp_remote_error *);
3606 	memset(sre, 0, notif_len);
3607 	sre->sre_type = SCTP_REMOTE_ERROR;
3608 	sre->sre_flags = 0;
3609 	sre->sre_length = sizeof(struct sctp_remote_error);
3610 	sre->sre_error = error;
3611 	sre->sre_assoc_id = sctp_get_associd(stcb);
3612 	if (notif_len > sizeof(struct sctp_remote_error)) {
3613 		memcpy(sre->sre_data, chunk, chunk_len);
3614 		sre->sre_length += chunk_len;
3615 	}
3616 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3617 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3618 	    0, 0, stcb->asoc.context, 0, 0, 0,
3619 	    m_notify);
3620 	if (control != NULL) {
3621 		control->length = SCTP_BUF_LEN(m_notify);
3622 		/* not that we need this */
3623 		control->tail_mbuf = m_notify;
3624 		control->spec_flags = M_NOTIFICATION;
3625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3626 		    control,
3627 		    &stcb->sctp_socket->so_rcv, 1,
3628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3629 	} else {
3630 		sctp_m_freem(m_notify);
3631 	}
3632 }
3633 
3634 
3635 void
3636 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3637     uint32_t error, void *data, int so_locked
3638 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3639     SCTP_UNUSED
3640 #endif
3641 )
3642 {
3643 	if ((stcb == NULL) ||
3644 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3645 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3646 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3647 		/* If the socket is gone we are out of here */
3648 		return;
3649 	}
3650 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3651 		return;
3652 	}
3653 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3654 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3655 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3656 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3657 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3658 			/* Don't report these in front states */
3659 			return;
3660 		}
3661 	}
3662 	switch (notification) {
3663 	case SCTP_NOTIFY_ASSOC_UP:
3664 		if (stcb->asoc.assoc_up_sent == 0) {
3665 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3666 			stcb->asoc.assoc_up_sent = 1;
3667 		}
3668 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3669 			sctp_notify_adaptation_layer(stcb);
3670 		}
3671 		if (stcb->asoc.auth_supported == 0) {
3672 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3673 			    NULL, so_locked);
3674 		}
3675 		break;
3676 	case SCTP_NOTIFY_ASSOC_DOWN:
3677 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3678 		break;
3679 	case SCTP_NOTIFY_INTERFACE_DOWN:
3680 		{
3681 			struct sctp_nets *net;
3682 
3683 			net = (struct sctp_nets *)data;
3684 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3685 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3686 			break;
3687 		}
3688 	case SCTP_NOTIFY_INTERFACE_UP:
3689 		{
3690 			struct sctp_nets *net;
3691 
3692 			net = (struct sctp_nets *)data;
3693 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3694 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3695 			break;
3696 		}
3697 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3698 		{
3699 			struct sctp_nets *net;
3700 
3701 			net = (struct sctp_nets *)data;
3702 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3703 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3704 			break;
3705 		}
3706 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3707 		sctp_notify_send_failed2(stcb, error,
3708 		    (struct sctp_stream_queue_pending *)data, so_locked);
3709 		break;
3710 	case SCTP_NOTIFY_SENT_DG_FAIL:
3711 		sctp_notify_send_failed(stcb, 1, error,
3712 		    (struct sctp_tmit_chunk *)data, so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3715 		sctp_notify_send_failed(stcb, 0, error,
3716 		    (struct sctp_tmit_chunk *)data, so_locked);
3717 		break;
3718 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3719 		{
3720 			uint32_t val;
3721 
3722 			val = *((uint32_t *) data);
3723 
3724 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3725 			break;
3726 		}
3727 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3728 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3729 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3730 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3731 		} else {
3732 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3733 		}
3734 		break;
3735 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3736 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3737 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3738 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3739 		} else {
3740 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3741 		}
3742 		break;
3743 	case SCTP_NOTIFY_ASSOC_RESTART:
3744 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3745 		if (stcb->asoc.auth_supported == 0) {
3746 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3747 			    NULL, so_locked);
3748 		}
3749 		break;
3750 	case SCTP_NOTIFY_STR_RESET_SEND:
3751 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3752 		break;
3753 	case SCTP_NOTIFY_STR_RESET_RECV:
3754 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3755 		break;
3756 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3757 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3758 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3759 		break;
3760 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3761 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3762 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3766 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3767 		break;
3768 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3769 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3770 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3771 		break;
3772 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3773 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3774 		    error, so_locked);
3775 		break;
3776 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3777 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3778 		    error, so_locked);
3779 		break;
3780 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3781 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3782 		    error, so_locked);
3783 		break;
3784 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3785 		sctp_notify_shutdown_event(stcb);
3786 		break;
3787 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3788 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3789 		    (uint16_t) (uintptr_t) data,
3790 		    so_locked);
3791 		break;
3792 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3793 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3794 		    (uint16_t) (uintptr_t) data,
3795 		    so_locked);
3796 		break;
3797 	case SCTP_NOTIFY_NO_PEER_AUTH:
3798 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3799 		    (uint16_t) (uintptr_t) data,
3800 		    so_locked);
3801 		break;
3802 	case SCTP_NOTIFY_SENDER_DRY:
3803 		sctp_notify_sender_dry_event(stcb, so_locked);
3804 		break;
3805 	case SCTP_NOTIFY_REMOTE_ERROR:
3806 		sctp_notify_remote_error(stcb, error, data);
3807 		break;
3808 	default:
3809 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3810 		    __func__, notification, notification);
3811 		break;
3812 	}			/* end switch */
3813 }
3814 
3815 void
3816 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3817 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3818     SCTP_UNUSED
3819 #endif
3820 )
3821 {
3822 	struct sctp_association *asoc;
3823 	struct sctp_stream_out *outs;
3824 	struct sctp_tmit_chunk *chk, *nchk;
3825 	struct sctp_stream_queue_pending *sp, *nsp;
3826 	int i;
3827 
3828 	if (stcb == NULL) {
3829 		return;
3830 	}
3831 	asoc = &stcb->asoc;
3832 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3833 		/* already being freed */
3834 		return;
3835 	}
3836 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3837 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3838 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3839 		return;
3840 	}
3841 	/* now through all the gunk freeing chunks */
3842 	if (holds_lock == 0) {
3843 		SCTP_TCB_SEND_LOCK(stcb);
3844 	}
3845 	/* sent queue SHOULD be empty */
3846 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3847 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3848 		asoc->sent_queue_cnt--;
3849 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3850 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3851 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3852 #ifdef INVARIANTS
3853 			} else {
3854 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3855 #endif
3856 			}
3857 		}
3858 		if (chk->data != NULL) {
3859 			sctp_free_bufspace(stcb, asoc, chk, 1);
3860 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3861 			    error, chk, so_locked);
3862 			if (chk->data) {
3863 				sctp_m_freem(chk->data);
3864 				chk->data = NULL;
3865 			}
3866 		}
3867 		sctp_free_a_chunk(stcb, chk, so_locked);
3868 		/* sa_ignore FREED_MEMORY */
3869 	}
3870 	/* pending send queue SHOULD be empty */
3871 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3872 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3873 		asoc->send_queue_cnt--;
3874 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3875 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3876 #ifdef INVARIANTS
3877 		} else {
3878 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3879 #endif
3880 		}
3881 		if (chk->data != NULL) {
3882 			sctp_free_bufspace(stcb, asoc, chk, 1);
3883 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3884 			    error, chk, so_locked);
3885 			if (chk->data) {
3886 				sctp_m_freem(chk->data);
3887 				chk->data = NULL;
3888 			}
3889 		}
3890 		sctp_free_a_chunk(stcb, chk, so_locked);
3891 		/* sa_ignore FREED_MEMORY */
3892 	}
3893 	for (i = 0; i < asoc->streamoutcnt; i++) {
3894 		/* For each stream */
3895 		outs = &asoc->strmout[i];
3896 		/* clean up any sends there */
3897 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3898 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3899 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3900 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3901 			sctp_free_spbufspace(stcb, asoc, sp);
3902 			if (sp->data) {
3903 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3904 				    error, (void *)sp, so_locked);
3905 				if (sp->data) {
3906 					sctp_m_freem(sp->data);
3907 					sp->data = NULL;
3908 					sp->tail_mbuf = NULL;
3909 					sp->length = 0;
3910 				}
3911 			}
3912 			if (sp->net) {
3913 				sctp_free_remote_addr(sp->net);
3914 				sp->net = NULL;
3915 			}
3916 			/* Free the chunk */
3917 			sctp_free_a_strmoq(stcb, sp, so_locked);
3918 			/* sa_ignore FREED_MEMORY */
3919 		}
3920 	}
3921 
3922 	if (holds_lock == 0) {
3923 		SCTP_TCB_SEND_UNLOCK(stcb);
3924 	}
3925 }
3926 
3927 void
3928 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3929     struct sctp_abort_chunk *abort, int so_locked
3930 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3931     SCTP_UNUSED
3932 #endif
3933 )
3934 {
3935 	if (stcb == NULL) {
3936 		return;
3937 	}
3938 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3939 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3940 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3941 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3942 	}
3943 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3944 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3945 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3946 		return;
3947 	}
3948 	/* Tell them we lost the asoc */
3949 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3950 	if (from_peer) {
3951 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3952 	} else {
3953 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3954 	}
3955 }
3956 
3957 void
3958 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3959     struct mbuf *m, int iphlen,
3960     struct sockaddr *src, struct sockaddr *dst,
3961     struct sctphdr *sh, struct mbuf *op_err,
3962     uint8_t mflowtype, uint32_t mflowid,
3963     uint32_t vrf_id, uint16_t port)
3964 {
3965 	uint32_t vtag;
3966 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3967 	struct socket *so;
3968 #endif
3969 
3970 	vtag = 0;
3971 	if (stcb != NULL) {
3972 		vtag = stcb->asoc.peer_vtag;
3973 		vrf_id = stcb->asoc.vrf_id;
3974 	}
3975 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3976 	    mflowtype, mflowid, inp->fibnum,
3977 	    vrf_id, port);
3978 	if (stcb != NULL) {
3979 		/* We have a TCB to abort, send notification too */
3980 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3981 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3982 		/* Ok, now lets free it */
3983 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3984 		so = SCTP_INP_SO(inp);
3985 		atomic_add_int(&stcb->asoc.refcnt, 1);
3986 		SCTP_TCB_UNLOCK(stcb);
3987 		SCTP_SOCKET_LOCK(so, 1);
3988 		SCTP_TCB_LOCK(stcb);
3989 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3990 #endif
3991 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3992 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3993 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3994 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3995 		}
3996 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3997 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3998 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3999 		SCTP_SOCKET_UNLOCK(so, 1);
4000 #endif
4001 	}
4002 }
4003 #ifdef SCTP_ASOCLOG_OF_TSNS
4004 void
4005 sctp_print_out_track_log(struct sctp_tcb *stcb)
4006 {
4007 #ifdef NOSIY_PRINTS
4008 	int i;
4009 
4010 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4011 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4012 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4013 		SCTP_PRINTF("None rcvd\n");
4014 		goto none_in;
4015 	}
4016 	if (stcb->asoc.tsn_in_wrapped) {
4017 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4018 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4019 			    stcb->asoc.in_tsnlog[i].tsn,
4020 			    stcb->asoc.in_tsnlog[i].strm,
4021 			    stcb->asoc.in_tsnlog[i].seq,
4022 			    stcb->asoc.in_tsnlog[i].flgs,
4023 			    stcb->asoc.in_tsnlog[i].sz);
4024 		}
4025 	}
4026 	if (stcb->asoc.tsn_in_at) {
4027 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4028 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4029 			    stcb->asoc.in_tsnlog[i].tsn,
4030 			    stcb->asoc.in_tsnlog[i].strm,
4031 			    stcb->asoc.in_tsnlog[i].seq,
4032 			    stcb->asoc.in_tsnlog[i].flgs,
4033 			    stcb->asoc.in_tsnlog[i].sz);
4034 		}
4035 	}
4036 none_in:
4037 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4038 	if ((stcb->asoc.tsn_out_at == 0) &&
4039 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4040 		SCTP_PRINTF("None sent\n");
4041 	}
4042 	if (stcb->asoc.tsn_out_wrapped) {
4043 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4044 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4045 			    stcb->asoc.out_tsnlog[i].tsn,
4046 			    stcb->asoc.out_tsnlog[i].strm,
4047 			    stcb->asoc.out_tsnlog[i].seq,
4048 			    stcb->asoc.out_tsnlog[i].flgs,
4049 			    stcb->asoc.out_tsnlog[i].sz);
4050 		}
4051 	}
4052 	if (stcb->asoc.tsn_out_at) {
4053 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4054 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4055 			    stcb->asoc.out_tsnlog[i].tsn,
4056 			    stcb->asoc.out_tsnlog[i].strm,
4057 			    stcb->asoc.out_tsnlog[i].seq,
4058 			    stcb->asoc.out_tsnlog[i].flgs,
4059 			    stcb->asoc.out_tsnlog[i].sz);
4060 		}
4061 	}
4062 #endif
4063 }
4064 #endif
4065 
4066 void
4067 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4068     struct mbuf *op_err,
4069     int so_locked
4070 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4071     SCTP_UNUSED
4072 #endif
4073 )
4074 {
4075 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4076 	struct socket *so;
4077 #endif
4078 
4079 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4080 	so = SCTP_INP_SO(inp);
4081 #endif
4082 	if (stcb == NULL) {
4083 		/* Got to have a TCB */
4084 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4085 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4086 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4087 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4088 			}
4089 		}
4090 		return;
4091 	} else {
4092 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4093 	}
4094 	/* notify the peer */
4095 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4096 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4097 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4098 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4099 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4100 	}
4101 	/* notify the ulp */
4102 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4103 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4104 	}
4105 	/* now free the asoc */
4106 #ifdef SCTP_ASOCLOG_OF_TSNS
4107 	sctp_print_out_track_log(stcb);
4108 #endif
4109 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4110 	if (!so_locked) {
4111 		atomic_add_int(&stcb->asoc.refcnt, 1);
4112 		SCTP_TCB_UNLOCK(stcb);
4113 		SCTP_SOCKET_LOCK(so, 1);
4114 		SCTP_TCB_LOCK(stcb);
4115 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4116 	}
4117 #endif
4118 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4119 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4120 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4121 	if (!so_locked) {
4122 		SCTP_SOCKET_UNLOCK(so, 1);
4123 	}
4124 #endif
4125 }
4126 
4127 void
4128 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4129     struct sockaddr *src, struct sockaddr *dst,
4130     struct sctphdr *sh, struct sctp_inpcb *inp,
4131     struct mbuf *cause,
4132     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4133     uint32_t vrf_id, uint16_t port)
4134 {
4135 	struct sctp_chunkhdr *ch, chunk_buf;
4136 	unsigned int chk_length;
4137 	int contains_init_chunk;
4138 
4139 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4140 	/* Generate a TO address for future reference */
4141 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4142 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4143 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4144 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4145 		}
4146 	}
4147 	contains_init_chunk = 0;
4148 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4149 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4150 	while (ch != NULL) {
4151 		chk_length = ntohs(ch->chunk_length);
4152 		if (chk_length < sizeof(*ch)) {
4153 			/* break to abort land */
4154 			break;
4155 		}
4156 		switch (ch->chunk_type) {
4157 		case SCTP_INIT:
4158 			contains_init_chunk = 1;
4159 			break;
4160 		case SCTP_PACKET_DROPPED:
4161 			/* we don't respond to pkt-dropped */
4162 			return;
4163 		case SCTP_ABORT_ASSOCIATION:
4164 			/* we don't respond with an ABORT to an ABORT */
4165 			return;
4166 		case SCTP_SHUTDOWN_COMPLETE:
4167 			/*
4168 			 * we ignore it since we are not waiting for it and
4169 			 * peer is gone
4170 			 */
4171 			return;
4172 		case SCTP_SHUTDOWN_ACK:
4173 			sctp_send_shutdown_complete2(src, dst, sh,
4174 			    mflowtype, mflowid, fibnum,
4175 			    vrf_id, port);
4176 			return;
4177 		default:
4178 			break;
4179 		}
4180 		offset += SCTP_SIZE32(chk_length);
4181 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4182 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4183 	}
4184 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4185 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4186 	    (contains_init_chunk == 0))) {
4187 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4188 		    mflowtype, mflowid, fibnum,
4189 		    vrf_id, port);
4190 	}
4191 }
4192 
4193 /*
4194  * check the inbound datagram to make sure there is not an abort inside it,
4195  * if there is return 1, else return 0.
4196  */
4197 int
4198 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4199 {
4200 	struct sctp_chunkhdr *ch;
4201 	struct sctp_init_chunk *init_chk, chunk_buf;
4202 	int offset;
4203 	unsigned int chk_length;
4204 
4205 	offset = iphlen + sizeof(struct sctphdr);
4206 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4207 	    (uint8_t *) & chunk_buf);
4208 	while (ch != NULL) {
4209 		chk_length = ntohs(ch->chunk_length);
4210 		if (chk_length < sizeof(*ch)) {
4211 			/* packet is probably corrupt */
4212 			break;
4213 		}
4214 		/* we seem to be ok, is it an abort? */
4215 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4216 			/* yep, tell them */
4217 			return (1);
4218 		}
4219 		if (ch->chunk_type == SCTP_INITIATION) {
4220 			/* need to update the Vtag */
4221 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4222 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4223 			if (init_chk != NULL) {
4224 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4225 			}
4226 		}
4227 		/* Nope, move to the next chunk */
4228 		offset += SCTP_SIZE32(chk_length);
4229 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4230 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4231 	}
4232 	return (0);
4233 }
4234 
4235 /*
4236  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4237  * set (i.e. it's 0) so, create this function to compare link local scopes
4238  */
4239 #ifdef INET6
4240 uint32_t
4241 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4242 {
4243 	struct sockaddr_in6 a, b;
4244 
4245 	/* save copies */
4246 	a = *addr1;
4247 	b = *addr2;
4248 
4249 	if (a.sin6_scope_id == 0)
4250 		if (sa6_recoverscope(&a)) {
4251 			/* can't get scope, so can't match */
4252 			return (0);
4253 		}
4254 	if (b.sin6_scope_id == 0)
4255 		if (sa6_recoverscope(&b)) {
4256 			/* can't get scope, so can't match */
4257 			return (0);
4258 		}
4259 	if (a.sin6_scope_id != b.sin6_scope_id)
4260 		return (0);
4261 
4262 	return (1);
4263 }
4264 
4265 /*
4266  * returns a sockaddr_in6 with embedded scope recovered and removed
4267  */
4268 struct sockaddr_in6 *
4269 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4270 {
4271 	/* check and strip embedded scope junk */
4272 	if (addr->sin6_family == AF_INET6) {
4273 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4274 			if (addr->sin6_scope_id == 0) {
4275 				*store = *addr;
4276 				if (!sa6_recoverscope(store)) {
4277 					/* use the recovered scope */
4278 					addr = store;
4279 				}
4280 			} else {
4281 				/* else, return the original "to" addr */
4282 				in6_clearscope(&addr->sin6_addr);
4283 			}
4284 		}
4285 	}
4286 	return (addr);
4287 }
4288 #endif
4289 
4290 /*
4291  * are the two addresses the same?  currently a "scopeless" check returns: 1
4292  * if same, 0 if not
4293  */
4294 int
4295 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4296 {
4297 
4298 	/* must be valid */
4299 	if (sa1 == NULL || sa2 == NULL)
4300 		return (0);
4301 
4302 	/* must be the same family */
4303 	if (sa1->sa_family != sa2->sa_family)
4304 		return (0);
4305 
4306 	switch (sa1->sa_family) {
4307 #ifdef INET6
4308 	case AF_INET6:
4309 		{
4310 			/* IPv6 addresses */
4311 			struct sockaddr_in6 *sin6_1, *sin6_2;
4312 
4313 			sin6_1 = (struct sockaddr_in6 *)sa1;
4314 			sin6_2 = (struct sockaddr_in6 *)sa2;
4315 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4316 			    sin6_2));
4317 		}
4318 #endif
4319 #ifdef INET
4320 	case AF_INET:
4321 		{
4322 			/* IPv4 addresses */
4323 			struct sockaddr_in *sin_1, *sin_2;
4324 
4325 			sin_1 = (struct sockaddr_in *)sa1;
4326 			sin_2 = (struct sockaddr_in *)sa2;
4327 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4328 		}
4329 #endif
4330 	default:
4331 		/* we don't do these... */
4332 		return (0);
4333 	}
4334 }
4335 
4336 void
4337 sctp_print_address(struct sockaddr *sa)
4338 {
4339 #ifdef INET6
4340 	char ip6buf[INET6_ADDRSTRLEN];
4341 #endif
4342 
4343 	switch (sa->sa_family) {
4344 #ifdef INET6
4345 	case AF_INET6:
4346 		{
4347 			struct sockaddr_in6 *sin6;
4348 
4349 			sin6 = (struct sockaddr_in6 *)sa;
4350 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4351 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4352 			    ntohs(sin6->sin6_port),
4353 			    sin6->sin6_scope_id);
4354 			break;
4355 		}
4356 #endif
4357 #ifdef INET
4358 	case AF_INET:
4359 		{
4360 			struct sockaddr_in *sin;
4361 			unsigned char *p;
4362 
4363 			sin = (struct sockaddr_in *)sa;
4364 			p = (unsigned char *)&sin->sin_addr;
4365 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4366 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4367 			break;
4368 		}
4369 #endif
4370 	default:
4371 		SCTP_PRINTF("?\n");
4372 		break;
4373 	}
4374 }
4375 
4376 void
4377 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4378     struct sctp_inpcb *new_inp,
4379     struct sctp_tcb *stcb,
4380     int waitflags)
4381 {
4382 	/*
4383 	 * go through our old INP and pull off any control structures that
4384 	 * belong to stcb and move then to the new inp.
4385 	 */
4386 	struct socket *old_so, *new_so;
4387 	struct sctp_queued_to_read *control, *nctl;
4388 	struct sctp_readhead tmp_queue;
4389 	struct mbuf *m;
4390 	int error = 0;
4391 
4392 	old_so = old_inp->sctp_socket;
4393 	new_so = new_inp->sctp_socket;
4394 	TAILQ_INIT(&tmp_queue);
4395 	error = sblock(&old_so->so_rcv, waitflags);
4396 	if (error) {
4397 		/*
4398 		 * Gak, can't get sblock, we have a problem. data will be
4399 		 * left stranded.. and we don't dare look at it since the
4400 		 * other thread may be reading something. Oh well, its a
4401 		 * screwed up app that does a peeloff OR a accept while
4402 		 * reading from the main socket... actually its only the
4403 		 * peeloff() case, since I think read will fail on a
4404 		 * listening socket..
4405 		 */
4406 		return;
4407 	}
4408 	/* lock the socket buffers */
4409 	SCTP_INP_READ_LOCK(old_inp);
4410 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4411 		/* Pull off all for out target stcb */
4412 		if (control->stcb == stcb) {
4413 			/* remove it we want it */
4414 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4415 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4416 			m = control->data;
4417 			while (m) {
4418 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4419 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4420 				}
4421 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4422 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4423 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4424 				}
4425 				m = SCTP_BUF_NEXT(m);
4426 			}
4427 		}
4428 	}
4429 	SCTP_INP_READ_UNLOCK(old_inp);
4430 	/* Remove the sb-lock on the old socket */
4431 
4432 	sbunlock(&old_so->so_rcv);
4433 	/* Now we move them over to the new socket buffer */
4434 	SCTP_INP_READ_LOCK(new_inp);
4435 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4436 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4437 		m = control->data;
4438 		while (m) {
4439 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4440 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4441 			}
4442 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4443 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4444 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4445 			}
4446 			m = SCTP_BUF_NEXT(m);
4447 		}
4448 	}
4449 	SCTP_INP_READ_UNLOCK(new_inp);
4450 }
4451 
4452 void
4453 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4454     struct sctp_tcb *stcb,
4455     int so_locked
4456 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4457     SCTP_UNUSED
4458 #endif
4459 )
4460 {
4461 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4462 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4463 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4464 		} else {
4465 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4466 			struct socket *so;
4467 
4468 			so = SCTP_INP_SO(inp);
4469 			if (!so_locked) {
4470 				if (stcb) {
4471 					atomic_add_int(&stcb->asoc.refcnt, 1);
4472 					SCTP_TCB_UNLOCK(stcb);
4473 				}
4474 				SCTP_SOCKET_LOCK(so, 1);
4475 				if (stcb) {
4476 					SCTP_TCB_LOCK(stcb);
4477 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4478 				}
4479 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4480 					SCTP_SOCKET_UNLOCK(so, 1);
4481 					return;
4482 				}
4483 			}
4484 #endif
4485 			sctp_sorwakeup(inp, inp->sctp_socket);
4486 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4487 			if (!so_locked) {
4488 				SCTP_SOCKET_UNLOCK(so, 1);
4489 			}
4490 #endif
4491 		}
4492 	}
4493 }
4494 
4495 void
4496 sctp_add_to_readq(struct sctp_inpcb *inp,
4497     struct sctp_tcb *stcb,
4498     struct sctp_queued_to_read *control,
4499     struct sockbuf *sb,
4500     int end,
4501     int inp_read_lock_held,
4502     int so_locked
4503 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4504     SCTP_UNUSED
4505 #endif
4506 )
4507 {
4508 	/*
4509 	 * Here we must place the control on the end of the socket read
4510 	 * queue AND increment sb_cc so that select will work properly on
4511 	 * read.
4512 	 */
4513 	struct mbuf *m, *prev = NULL;
4514 
4515 	if (inp == NULL) {
4516 		/* Gak, TSNH!! */
4517 #ifdef INVARIANTS
4518 		panic("Gak, inp NULL on add_to_readq");
4519 #endif
4520 		return;
4521 	}
4522 	if (inp_read_lock_held == 0)
4523 		SCTP_INP_READ_LOCK(inp);
4524 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4525 		sctp_free_remote_addr(control->whoFrom);
4526 		if (control->data) {
4527 			sctp_m_freem(control->data);
4528 			control->data = NULL;
4529 		}
4530 		sctp_free_a_readq(stcb, control);
4531 		if (inp_read_lock_held == 0)
4532 			SCTP_INP_READ_UNLOCK(inp);
4533 		return;
4534 	}
4535 	if (!(control->spec_flags & M_NOTIFICATION)) {
4536 		atomic_add_int(&inp->total_recvs, 1);
4537 		if (!control->do_not_ref_stcb) {
4538 			atomic_add_int(&stcb->total_recvs, 1);
4539 		}
4540 	}
4541 	m = control->data;
4542 	control->held_length = 0;
4543 	control->length = 0;
4544 	while (m) {
4545 		if (SCTP_BUF_LEN(m) == 0) {
4546 			/* Skip mbufs with NO length */
4547 			if (prev == NULL) {
4548 				/* First one */
4549 				control->data = sctp_m_free(m);
4550 				m = control->data;
4551 			} else {
4552 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4553 				m = SCTP_BUF_NEXT(prev);
4554 			}
4555 			if (m == NULL) {
4556 				control->tail_mbuf = prev;
4557 			}
4558 			continue;
4559 		}
4560 		prev = m;
4561 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4562 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4563 		}
4564 		sctp_sballoc(stcb, sb, m);
4565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4566 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4567 		}
4568 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4569 		m = SCTP_BUF_NEXT(m);
4570 	}
4571 	if (prev != NULL) {
4572 		control->tail_mbuf = prev;
4573 	} else {
4574 		/* Everything got collapsed out?? */
4575 		sctp_free_remote_addr(control->whoFrom);
4576 		sctp_free_a_readq(stcb, control);
4577 		if (inp_read_lock_held == 0)
4578 			SCTP_INP_READ_UNLOCK(inp);
4579 		return;
4580 	}
4581 	if (end) {
4582 		control->end_added = 1;
4583 	}
4584 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4585 	control->on_read_q = 1;
4586 	if (inp_read_lock_held == 0)
4587 		SCTP_INP_READ_UNLOCK(inp);
4588 	if (inp && inp->sctp_socket) {
4589 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4590 	}
4591 }
4592 
4593 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4594  *************ALTERNATE ROUTING CODE
4595  */
4596 
4597 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4598  *************ALTERNATE ROUTING CODE
4599  */
4600 
4601 struct mbuf *
4602 sctp_generate_cause(uint16_t code, char *info)
4603 {
4604 	struct mbuf *m;
4605 	struct sctp_gen_error_cause *cause;
4606 	size_t info_len;
4607 	uint16_t len;
4608 
4609 	if ((code == 0) || (info == NULL)) {
4610 		return (NULL);
4611 	}
4612 	info_len = strlen(info);
4613 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4614 		return (NULL);
4615 	}
4616 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4617 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4618 	if (m != NULL) {
4619 		SCTP_BUF_LEN(m) = len;
4620 		cause = mtod(m, struct sctp_gen_error_cause *);
4621 		cause->code = htons(code);
4622 		cause->length = htons(len);
4623 		memcpy(cause->info, info, info_len);
4624 	}
4625 	return (m);
4626 }
4627 
4628 struct mbuf *
4629 sctp_generate_no_user_data_cause(uint32_t tsn)
4630 {
4631 	struct mbuf *m;
4632 	struct sctp_error_no_user_data *no_user_data_cause;
4633 	uint16_t len;
4634 
4635 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4636 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4637 	if (m != NULL) {
4638 		SCTP_BUF_LEN(m) = len;
4639 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4640 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4641 		no_user_data_cause->cause.length = htons(len);
4642 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4643 	}
4644 	return (m);
4645 }
4646 
4647 #ifdef SCTP_MBCNT_LOGGING
4648 void
4649 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4650     struct sctp_tmit_chunk *tp1, int chk_cnt)
4651 {
4652 	if (tp1->data == NULL) {
4653 		return;
4654 	}
4655 	asoc->chunks_on_out_queue -= chk_cnt;
4656 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4657 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4658 		    asoc->total_output_queue_size,
4659 		    tp1->book_size,
4660 		    0,
4661 		    tp1->mbcnt);
4662 	}
4663 	if (asoc->total_output_queue_size >= tp1->book_size) {
4664 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4665 	} else {
4666 		asoc->total_output_queue_size = 0;
4667 	}
4668 
4669 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4670 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4671 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4672 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4673 		} else {
4674 			stcb->sctp_socket->so_snd.sb_cc = 0;
4675 
4676 		}
4677 	}
4678 }
4679 
4680 #endif
4681 
4682 int
4683 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4684     uint8_t sent, int so_locked
4685 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4686     SCTP_UNUSED
4687 #endif
4688 )
4689 {
4690 	struct sctp_stream_out *strq;
4691 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4692 	struct sctp_stream_queue_pending *sp;
4693 	uint32_t mid;
4694 	uint16_t sid;
4695 	uint8_t foundeom = 0;
4696 	int ret_sz = 0;
4697 	int notdone;
4698 	int do_wakeup_routine = 0;
4699 
4700 	sid = tp1->rec.data.sid;
4701 	mid = tp1->rec.data.mid;
4702 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4703 		stcb->asoc.abandoned_sent[0]++;
4704 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4705 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4706 #if defined(SCTP_DETAILED_STR_STATS)
4707 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4708 #endif
4709 	} else {
4710 		stcb->asoc.abandoned_unsent[0]++;
4711 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4712 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4713 #if defined(SCTP_DETAILED_STR_STATS)
4714 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4715 #endif
4716 	}
4717 	do {
4718 		ret_sz += tp1->book_size;
4719 		if (tp1->data != NULL) {
4720 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4721 				sctp_flight_size_decrease(tp1);
4722 				sctp_total_flight_decrease(stcb, tp1);
4723 			}
4724 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4725 			stcb->asoc.peers_rwnd += tp1->send_size;
4726 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4727 			if (sent) {
4728 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4729 			} else {
4730 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4731 			}
4732 			if (tp1->data) {
4733 				sctp_m_freem(tp1->data);
4734 				tp1->data = NULL;
4735 			}
4736 			do_wakeup_routine = 1;
4737 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4738 				stcb->asoc.sent_queue_cnt_removeable--;
4739 			}
4740 		}
4741 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4742 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4743 		    SCTP_DATA_NOT_FRAG) {
4744 			/* not frag'ed we ae done   */
4745 			notdone = 0;
4746 			foundeom = 1;
4747 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4748 			/* end of frag, we are done */
4749 			notdone = 0;
4750 			foundeom = 1;
4751 		} else {
4752 			/*
4753 			 * Its a begin or middle piece, we must mark all of
4754 			 * it
4755 			 */
4756 			notdone = 1;
4757 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4758 		}
4759 	} while (tp1 && notdone);
4760 	if (foundeom == 0) {
4761 		/*
4762 		 * The multi-part message was scattered across the send and
4763 		 * sent queue.
4764 		 */
4765 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4766 			if ((tp1->rec.data.sid != sid) ||
4767 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4768 				break;
4769 			}
4770 			/*
4771 			 * save to chk in case we have some on stream out
4772 			 * queue. If so and we have an un-transmitted one we
4773 			 * don't have to fudge the TSN.
4774 			 */
4775 			chk = tp1;
4776 			ret_sz += tp1->book_size;
4777 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4778 			if (sent) {
4779 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4780 			} else {
4781 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4782 			}
4783 			if (tp1->data) {
4784 				sctp_m_freem(tp1->data);
4785 				tp1->data = NULL;
4786 			}
4787 			/* No flight involved here book the size to 0 */
4788 			tp1->book_size = 0;
4789 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4790 				foundeom = 1;
4791 			}
4792 			do_wakeup_routine = 1;
4793 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4794 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4795 			/* on to the sent queue so we can wait for it to be
4796 			 * passed by. */
4797 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4798 			    sctp_next);
4799 			stcb->asoc.send_queue_cnt--;
4800 			stcb->asoc.sent_queue_cnt++;
4801 		}
4802 	}
4803 	if (foundeom == 0) {
4804 		/*
4805 		 * Still no eom found. That means there is stuff left on the
4806 		 * stream out queue.. yuck.
4807 		 */
4808 		SCTP_TCB_SEND_LOCK(stcb);
4809 		strq = &stcb->asoc.strmout[sid];
4810 		sp = TAILQ_FIRST(&strq->outqueue);
4811 		if (sp != NULL) {
4812 			sp->discard_rest = 1;
4813 			/*
4814 			 * We may need to put a chunk on the queue that
4815 			 * holds the TSN that would have been sent with the
4816 			 * LAST bit.
4817 			 */
4818 			if (chk == NULL) {
4819 				/* Yep, we have to */
4820 				sctp_alloc_a_chunk(stcb, chk);
4821 				if (chk == NULL) {
4822 					/*
4823 					 * we are hosed. All we can do is
4824 					 * nothing.. which will cause an
4825 					 * abort if the peer is paying
4826 					 * attention.
4827 					 */
4828 					goto oh_well;
4829 				}
4830 				memset(chk, 0, sizeof(*chk));
4831 				chk->rec.data.rcv_flags = 0;
4832 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4833 				chk->asoc = &stcb->asoc;
4834 				if (stcb->asoc.idata_supported == 0) {
4835 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4836 						chk->rec.data.mid = 0;
4837 					} else {
4838 						chk->rec.data.mid = strq->next_mid_ordered;
4839 					}
4840 				} else {
4841 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4842 						chk->rec.data.mid = strq->next_mid_unordered;
4843 					} else {
4844 						chk->rec.data.mid = strq->next_mid_ordered;
4845 					}
4846 				}
4847 				chk->rec.data.sid = sp->sid;
4848 				chk->rec.data.ppid = sp->ppid;
4849 				chk->rec.data.context = sp->context;
4850 				chk->flags = sp->act_flags;
4851 				chk->whoTo = NULL;
4852 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4853 				strq->chunks_on_queues++;
4854 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4855 				stcb->asoc.sent_queue_cnt++;
4856 				stcb->asoc.pr_sctp_cnt++;
4857 			}
4858 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4859 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4860 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4861 			}
4862 			if (stcb->asoc.idata_supported == 0) {
4863 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4864 					strq->next_mid_ordered++;
4865 				}
4866 			} else {
4867 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4868 					strq->next_mid_unordered++;
4869 				} else {
4870 					strq->next_mid_ordered++;
4871 				}
4872 			}
4873 	oh_well:
4874 			if (sp->data) {
4875 				/*
4876 				 * Pull any data to free up the SB and allow
4877 				 * sender to "add more" while we will throw
4878 				 * away :-)
4879 				 */
4880 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4881 				ret_sz += sp->length;
4882 				do_wakeup_routine = 1;
4883 				sp->some_taken = 1;
4884 				sctp_m_freem(sp->data);
4885 				sp->data = NULL;
4886 				sp->tail_mbuf = NULL;
4887 				sp->length = 0;
4888 			}
4889 		}
4890 		SCTP_TCB_SEND_UNLOCK(stcb);
4891 	}
4892 	if (do_wakeup_routine) {
4893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4894 		struct socket *so;
4895 
4896 		so = SCTP_INP_SO(stcb->sctp_ep);
4897 		if (!so_locked) {
4898 			atomic_add_int(&stcb->asoc.refcnt, 1);
4899 			SCTP_TCB_UNLOCK(stcb);
4900 			SCTP_SOCKET_LOCK(so, 1);
4901 			SCTP_TCB_LOCK(stcb);
4902 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4903 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4904 				/* assoc was freed while we were unlocked */
4905 				SCTP_SOCKET_UNLOCK(so, 1);
4906 				return (ret_sz);
4907 			}
4908 		}
4909 #endif
4910 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4911 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4912 		if (!so_locked) {
4913 			SCTP_SOCKET_UNLOCK(so, 1);
4914 		}
4915 #endif
4916 	}
4917 	return (ret_sz);
4918 }
4919 
4920 /*
4921  * checks to see if the given address, sa, is one that is currently known by
4922  * the kernel note: can't distinguish the same address on multiple interfaces
4923  * and doesn't handle multiple addresses with different zone/scope id's note:
4924  * ifa_ifwithaddr() compares the entire sockaddr struct
4925  */
4926 struct sctp_ifa *
4927 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4928     int holds_lock)
4929 {
4930 	struct sctp_laddr *laddr;
4931 
4932 	if (holds_lock == 0) {
4933 		SCTP_INP_RLOCK(inp);
4934 	}
4935 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4936 		if (laddr->ifa == NULL)
4937 			continue;
4938 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4939 			continue;
4940 #ifdef INET
4941 		if (addr->sa_family == AF_INET) {
4942 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4943 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4944 				/* found him. */
4945 				if (holds_lock == 0) {
4946 					SCTP_INP_RUNLOCK(inp);
4947 				}
4948 				return (laddr->ifa);
4949 				break;
4950 			}
4951 		}
4952 #endif
4953 #ifdef INET6
4954 		if (addr->sa_family == AF_INET6) {
4955 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4956 			    &laddr->ifa->address.sin6)) {
4957 				/* found him. */
4958 				if (holds_lock == 0) {
4959 					SCTP_INP_RUNLOCK(inp);
4960 				}
4961 				return (laddr->ifa);
4962 				break;
4963 			}
4964 		}
4965 #endif
4966 	}
4967 	if (holds_lock == 0) {
4968 		SCTP_INP_RUNLOCK(inp);
4969 	}
4970 	return (NULL);
4971 }
4972 
4973 uint32_t
4974 sctp_get_ifa_hash_val(struct sockaddr *addr)
4975 {
4976 	switch (addr->sa_family) {
4977 #ifdef INET
4978 	case AF_INET:
4979 		{
4980 			struct sockaddr_in *sin;
4981 
4982 			sin = (struct sockaddr_in *)addr;
4983 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4984 		}
4985 #endif
4986 #ifdef INET6
4987 	case AF_INET6:
4988 		{
4989 			struct sockaddr_in6 *sin6;
4990 			uint32_t hash_of_addr;
4991 
4992 			sin6 = (struct sockaddr_in6 *)addr;
4993 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4994 			    sin6->sin6_addr.s6_addr32[1] +
4995 			    sin6->sin6_addr.s6_addr32[2] +
4996 			    sin6->sin6_addr.s6_addr32[3]);
4997 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4998 			return (hash_of_addr);
4999 		}
5000 #endif
5001 	default:
5002 		break;
5003 	}
5004 	return (0);
5005 }
5006 
5007 struct sctp_ifa *
5008 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5009 {
5010 	struct sctp_ifa *sctp_ifap;
5011 	struct sctp_vrf *vrf;
5012 	struct sctp_ifalist *hash_head;
5013 	uint32_t hash_of_addr;
5014 
5015 	if (holds_lock == 0)
5016 		SCTP_IPI_ADDR_RLOCK();
5017 
5018 	vrf = sctp_find_vrf(vrf_id);
5019 	if (vrf == NULL) {
5020 		if (holds_lock == 0)
5021 			SCTP_IPI_ADDR_RUNLOCK();
5022 		return (NULL);
5023 	}
5024 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5025 
5026 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5027 	if (hash_head == NULL) {
5028 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5029 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5030 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5031 		sctp_print_address(addr);
5032 		SCTP_PRINTF("No such bucket for address\n");
5033 		if (holds_lock == 0)
5034 			SCTP_IPI_ADDR_RUNLOCK();
5035 
5036 		return (NULL);
5037 	}
5038 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5039 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5040 			continue;
5041 #ifdef INET
5042 		if (addr->sa_family == AF_INET) {
5043 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5044 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5045 				/* found him. */
5046 				if (holds_lock == 0)
5047 					SCTP_IPI_ADDR_RUNLOCK();
5048 				return (sctp_ifap);
5049 				break;
5050 			}
5051 		}
5052 #endif
5053 #ifdef INET6
5054 		if (addr->sa_family == AF_INET6) {
5055 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5056 			    &sctp_ifap->address.sin6)) {
5057 				/* found him. */
5058 				if (holds_lock == 0)
5059 					SCTP_IPI_ADDR_RUNLOCK();
5060 				return (sctp_ifap);
5061 				break;
5062 			}
5063 		}
5064 #endif
5065 	}
5066 	if (holds_lock == 0)
5067 		SCTP_IPI_ADDR_RUNLOCK();
5068 	return (NULL);
5069 }
5070 
5071 static void
5072 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5073     uint32_t rwnd_req)
5074 {
5075 	/* User pulled some data, do we need a rwnd update? */
5076 	int r_unlocked = 0;
5077 	uint32_t dif, rwnd;
5078 	struct socket *so = NULL;
5079 
5080 	if (stcb == NULL)
5081 		return;
5082 
5083 	atomic_add_int(&stcb->asoc.refcnt, 1);
5084 
5085 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5086 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5087 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5088 		/* Pre-check If we are freeing no update */
5089 		goto no_lock;
5090 	}
5091 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5092 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5093 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5094 		goto out;
5095 	}
5096 	so = stcb->sctp_socket;
5097 	if (so == NULL) {
5098 		goto out;
5099 	}
5100 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5101 	/* Have you have freed enough to look */
5102 	*freed_so_far = 0;
5103 	/* Yep, its worth a look and the lock overhead */
5104 
5105 	/* Figure out what the rwnd would be */
5106 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5107 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5108 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5109 	} else {
5110 		dif = 0;
5111 	}
5112 	if (dif >= rwnd_req) {
5113 		if (hold_rlock) {
5114 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5115 			r_unlocked = 1;
5116 		}
5117 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5118 			/*
5119 			 * One last check before we allow the guy possibly
5120 			 * to get in. There is a race, where the guy has not
5121 			 * reached the gate. In that case
5122 			 */
5123 			goto out;
5124 		}
5125 		SCTP_TCB_LOCK(stcb);
5126 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5127 			/* No reports here */
5128 			SCTP_TCB_UNLOCK(stcb);
5129 			goto out;
5130 		}
5131 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5132 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5133 
5134 		sctp_chunk_output(stcb->sctp_ep, stcb,
5135 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5136 		/* make sure no timer is running */
5137 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5138 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5139 		SCTP_TCB_UNLOCK(stcb);
5140 	} else {
5141 		/* Update how much we have pending */
5142 		stcb->freed_by_sorcv_sincelast = dif;
5143 	}
5144 out:
5145 	if (so && r_unlocked && hold_rlock) {
5146 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5147 	}
5148 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5149 no_lock:
5150 	atomic_add_int(&stcb->asoc.refcnt, -1);
5151 	return;
5152 }
5153 
5154 int
5155 sctp_sorecvmsg(struct socket *so,
5156     struct uio *uio,
5157     struct mbuf **mp,
5158     struct sockaddr *from,
5159     int fromlen,
5160     int *msg_flags,
5161     struct sctp_sndrcvinfo *sinfo,
5162     int filling_sinfo)
5163 {
5164 	/*
5165 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5166 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5167 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5168 	 * On the way out we may send out any combination of:
5169 	 * MSG_NOTIFICATION MSG_EOR
5170 	 *
5171 	 */
5172 	struct sctp_inpcb *inp = NULL;
5173 	int my_len = 0;
5174 	int cp_len = 0, error = 0;
5175 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5176 	struct mbuf *m = NULL;
5177 	struct sctp_tcb *stcb = NULL;
5178 	int wakeup_read_socket = 0;
5179 	int freecnt_applied = 0;
5180 	int out_flags = 0, in_flags = 0;
5181 	int block_allowed = 1;
5182 	uint32_t freed_so_far = 0;
5183 	uint32_t copied_so_far = 0;
5184 	int in_eeor_mode = 0;
5185 	int no_rcv_needed = 0;
5186 	uint32_t rwnd_req = 0;
5187 	int hold_sblock = 0;
5188 	int hold_rlock = 0;
5189 	ssize_t slen = 0;
5190 	uint32_t held_length = 0;
5191 	int sockbuf_lock = 0;
5192 
5193 	if (uio == NULL) {
5194 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5195 		return (EINVAL);
5196 	}
5197 	if (msg_flags) {
5198 		in_flags = *msg_flags;
5199 		if (in_flags & MSG_PEEK)
5200 			SCTP_STAT_INCR(sctps_read_peeks);
5201 	} else {
5202 		in_flags = 0;
5203 	}
5204 	slen = uio->uio_resid;
5205 
5206 	/* Pull in and set up our int flags */
5207 	if (in_flags & MSG_OOB) {
5208 		/* Out of band's NOT supported */
5209 		return (EOPNOTSUPP);
5210 	}
5211 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5212 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5213 		return (EINVAL);
5214 	}
5215 	if ((in_flags & (MSG_DONTWAIT
5216 	    | MSG_NBIO
5217 	    )) ||
5218 	    SCTP_SO_IS_NBIO(so)) {
5219 		block_allowed = 0;
5220 	}
5221 	/* setup the endpoint */
5222 	inp = (struct sctp_inpcb *)so->so_pcb;
5223 	if (inp == NULL) {
5224 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5225 		return (EFAULT);
5226 	}
5227 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5228 	/* Must be at least a MTU's worth */
5229 	if (rwnd_req < SCTP_MIN_RWND)
5230 		rwnd_req = SCTP_MIN_RWND;
5231 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5232 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5233 		sctp_misc_ints(SCTP_SORECV_ENTER,
5234 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5235 	}
5236 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5237 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5238 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5239 	}
5240 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5241 	if (error) {
5242 		goto release_unlocked;
5243 	}
5244 	sockbuf_lock = 1;
5245 restart:
5246 
5247 
5248 restart_nosblocks:
5249 	if (hold_sblock == 0) {
5250 		SOCKBUF_LOCK(&so->so_rcv);
5251 		hold_sblock = 1;
5252 	}
5253 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5254 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5255 		goto out;
5256 	}
5257 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5258 		if (so->so_error) {
5259 			error = so->so_error;
5260 			if ((in_flags & MSG_PEEK) == 0)
5261 				so->so_error = 0;
5262 			goto out;
5263 		} else {
5264 			if (so->so_rcv.sb_cc == 0) {
5265 				/* indicate EOF */
5266 				error = 0;
5267 				goto out;
5268 			}
5269 		}
5270 	}
5271 	if (so->so_rcv.sb_cc <= held_length) {
5272 		if (so->so_error) {
5273 			error = so->so_error;
5274 			if ((in_flags & MSG_PEEK) == 0) {
5275 				so->so_error = 0;
5276 			}
5277 			goto out;
5278 		}
5279 		if ((so->so_rcv.sb_cc == 0) &&
5280 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5281 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5282 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5283 				/*
5284 				 * For active open side clear flags for
5285 				 * re-use passive open is blocked by
5286 				 * connect.
5287 				 */
5288 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5289 					/* You were aborted, passive side
5290 					 * always hits here */
5291 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5292 					error = ECONNRESET;
5293 				}
5294 				so->so_state &= ~(SS_ISCONNECTING |
5295 				    SS_ISDISCONNECTING |
5296 				    SS_ISCONFIRMING |
5297 				    SS_ISCONNECTED);
5298 				if (error == 0) {
5299 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5300 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5301 						error = ENOTCONN;
5302 					}
5303 				}
5304 				goto out;
5305 			}
5306 		}
5307 		if (block_allowed) {
5308 			error = sbwait(&so->so_rcv);
5309 			if (error) {
5310 				goto out;
5311 			}
5312 			held_length = 0;
5313 			goto restart_nosblocks;
5314 		} else {
5315 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5316 			error = EWOULDBLOCK;
5317 			goto out;
5318 		}
5319 	}
5320 	if (hold_sblock == 1) {
5321 		SOCKBUF_UNLOCK(&so->so_rcv);
5322 		hold_sblock = 0;
5323 	}
5324 	/* we possibly have data we can read */
5325 	/* sa_ignore FREED_MEMORY */
5326 	control = TAILQ_FIRST(&inp->read_queue);
5327 	if (control == NULL) {
5328 		/*
5329 		 * This could be happening since the appender did the
5330 		 * increment but as not yet did the tailq insert onto the
5331 		 * read_queue
5332 		 */
5333 		if (hold_rlock == 0) {
5334 			SCTP_INP_READ_LOCK(inp);
5335 		}
5336 		control = TAILQ_FIRST(&inp->read_queue);
5337 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5338 #ifdef INVARIANTS
5339 			panic("Huh, its non zero and nothing on control?");
5340 #endif
5341 			so->so_rcv.sb_cc = 0;
5342 		}
5343 		SCTP_INP_READ_UNLOCK(inp);
5344 		hold_rlock = 0;
5345 		goto restart;
5346 	}
5347 	if ((control->length == 0) &&
5348 	    (control->do_not_ref_stcb)) {
5349 		/*
5350 		 * Clean up code for freeing assoc that left behind a
5351 		 * pdapi.. maybe a peer in EEOR that just closed after
5352 		 * sending and never indicated a EOR.
5353 		 */
5354 		if (hold_rlock == 0) {
5355 			hold_rlock = 1;
5356 			SCTP_INP_READ_LOCK(inp);
5357 		}
5358 		control->held_length = 0;
5359 		if (control->data) {
5360 			/* Hmm there is data here .. fix */
5361 			struct mbuf *m_tmp;
5362 			int cnt = 0;
5363 
5364 			m_tmp = control->data;
5365 			while (m_tmp) {
5366 				cnt += SCTP_BUF_LEN(m_tmp);
5367 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5368 					control->tail_mbuf = m_tmp;
5369 					control->end_added = 1;
5370 				}
5371 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5372 			}
5373 			control->length = cnt;
5374 		} else {
5375 			/* remove it */
5376 			TAILQ_REMOVE(&inp->read_queue, control, next);
5377 			/* Add back any hiddend data */
5378 			sctp_free_remote_addr(control->whoFrom);
5379 			sctp_free_a_readq(stcb, control);
5380 		}
5381 		if (hold_rlock) {
5382 			hold_rlock = 0;
5383 			SCTP_INP_READ_UNLOCK(inp);
5384 		}
5385 		goto restart;
5386 	}
5387 	if ((control->length == 0) &&
5388 	    (control->end_added == 1)) {
5389 		/* Do we also need to check for (control->pdapi_aborted ==
5390 		 * 1)? */
5391 		if (hold_rlock == 0) {
5392 			hold_rlock = 1;
5393 			SCTP_INP_READ_LOCK(inp);
5394 		}
5395 		TAILQ_REMOVE(&inp->read_queue, control, next);
5396 		if (control->data) {
5397 #ifdef INVARIANTS
5398 			panic("control->data not null but control->length == 0");
5399 #else
5400 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5401 			sctp_m_freem(control->data);
5402 			control->data = NULL;
5403 #endif
5404 		}
5405 		if (control->aux_data) {
5406 			sctp_m_free(control->aux_data);
5407 			control->aux_data = NULL;
5408 		}
5409 #ifdef INVARIANTS
5410 		if (control->on_strm_q) {
5411 			panic("About to free ctl:%p so:%p and its in %d",
5412 			    control, so, control->on_strm_q);
5413 		}
5414 #endif
5415 		sctp_free_remote_addr(control->whoFrom);
5416 		sctp_free_a_readq(stcb, control);
5417 		if (hold_rlock) {
5418 			hold_rlock = 0;
5419 			SCTP_INP_READ_UNLOCK(inp);
5420 		}
5421 		goto restart;
5422 	}
5423 	if (control->length == 0) {
5424 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5425 		    (filling_sinfo)) {
5426 			/* find a more suitable one then this */
5427 			ctl = TAILQ_NEXT(control, next);
5428 			while (ctl) {
5429 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5430 				    (ctl->some_taken ||
5431 				    (ctl->spec_flags & M_NOTIFICATION) ||
5432 				    ((ctl->do_not_ref_stcb == 0) &&
5433 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5434 				    ) {
5435 					/*-
5436 					 * If we have a different TCB next, and there is data
5437 					 * present. If we have already taken some (pdapi), OR we can
5438 					 * ref the tcb and no delivery as started on this stream, we
5439 					 * take it. Note we allow a notification on a different
5440 					 * assoc to be delivered..
5441 					 */
5442 					control = ctl;
5443 					goto found_one;
5444 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5445 					    (ctl->length) &&
5446 					    ((ctl->some_taken) ||
5447 					    ((ctl->do_not_ref_stcb == 0) &&
5448 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5449 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5450 					/*-
5451 					 * If we have the same tcb, and there is data present, and we
5452 					 * have the strm interleave feature present. Then if we have
5453 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5454 					 * not started a delivery for this stream, we can take it.
5455 					 * Note we do NOT allow a notificaiton on the same assoc to
5456 					 * be delivered.
5457 					 */
5458 					control = ctl;
5459 					goto found_one;
5460 				}
5461 				ctl = TAILQ_NEXT(ctl, next);
5462 			}
5463 		}
5464 		/*
5465 		 * if we reach here, not suitable replacement is available
5466 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5467 		 * into the our held count, and its time to sleep again.
5468 		 */
5469 		held_length = so->so_rcv.sb_cc;
5470 		control->held_length = so->so_rcv.sb_cc;
5471 		goto restart;
5472 	}
5473 	/* Clear the held length since there is something to read */
5474 	control->held_length = 0;
5475 found_one:
5476 	/*
5477 	 * If we reach here, control has a some data for us to read off.
5478 	 * Note that stcb COULD be NULL.
5479 	 */
5480 	if (hold_rlock == 0) {
5481 		hold_rlock = 1;
5482 		SCTP_INP_READ_LOCK(inp);
5483 	}
5484 	control->some_taken++;
5485 	stcb = control->stcb;
5486 	if (stcb) {
5487 		if ((control->do_not_ref_stcb == 0) &&
5488 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5489 			if (freecnt_applied == 0)
5490 				stcb = NULL;
5491 		} else if (control->do_not_ref_stcb == 0) {
5492 			/* you can't free it on me please */
5493 			/*
5494 			 * The lock on the socket buffer protects us so the
5495 			 * free code will stop. But since we used the
5496 			 * socketbuf lock and the sender uses the tcb_lock
5497 			 * to increment, we need to use the atomic add to
5498 			 * the refcnt
5499 			 */
5500 			if (freecnt_applied) {
5501 #ifdef INVARIANTS
5502 				panic("refcnt already incremented");
5503 #else
5504 				SCTP_PRINTF("refcnt already incremented?\n");
5505 #endif
5506 			} else {
5507 				atomic_add_int(&stcb->asoc.refcnt, 1);
5508 				freecnt_applied = 1;
5509 			}
5510 			/*
5511 			 * Setup to remember how much we have not yet told
5512 			 * the peer our rwnd has opened up. Note we grab the
5513 			 * value from the tcb from last time. Note too that
5514 			 * sack sending clears this when a sack is sent,
5515 			 * which is fine. Once we hit the rwnd_req, we then
5516 			 * will go to the sctp_user_rcvd() that will not
5517 			 * lock until it KNOWs it MUST send a WUP-SACK.
5518 			 */
5519 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5520 			stcb->freed_by_sorcv_sincelast = 0;
5521 		}
5522 	}
5523 	if (stcb &&
5524 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5525 	    control->do_not_ref_stcb == 0) {
5526 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5527 	}
5528 	/* First lets get off the sinfo and sockaddr info */
5529 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5530 		sinfo->sinfo_stream = control->sinfo_stream;
5531 		sinfo->sinfo_ssn = (uint16_t) control->mid;
5532 		sinfo->sinfo_flags = control->sinfo_flags;
5533 		sinfo->sinfo_ppid = control->sinfo_ppid;
5534 		sinfo->sinfo_context = control->sinfo_context;
5535 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5536 		sinfo->sinfo_tsn = control->sinfo_tsn;
5537 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5538 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5539 		nxt = TAILQ_NEXT(control, next);
5540 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5541 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5542 			struct sctp_extrcvinfo *s_extra;
5543 
5544 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5545 			if ((nxt) &&
5546 			    (nxt->length)) {
5547 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5548 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5549 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5550 				}
5551 				if (nxt->spec_flags & M_NOTIFICATION) {
5552 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5553 				}
5554 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5555 				s_extra->serinfo_next_length = nxt->length;
5556 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5557 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5558 				if (nxt->tail_mbuf != NULL) {
5559 					if (nxt->end_added) {
5560 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5561 					}
5562 				}
5563 			} else {
5564 				/*
5565 				 * we explicitly 0 this, since the memcpy
5566 				 * got some other things beyond the older
5567 				 * sinfo_ that is on the control's structure
5568 				 * :-D
5569 				 */
5570 				nxt = NULL;
5571 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5572 				s_extra->serinfo_next_aid = 0;
5573 				s_extra->serinfo_next_length = 0;
5574 				s_extra->serinfo_next_ppid = 0;
5575 				s_extra->serinfo_next_stream = 0;
5576 			}
5577 		}
5578 		/*
5579 		 * update off the real current cum-ack, if we have an stcb.
5580 		 */
5581 		if ((control->do_not_ref_stcb == 0) && stcb)
5582 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5583 		/*
5584 		 * mask off the high bits, we keep the actual chunk bits in
5585 		 * there.
5586 		 */
5587 		sinfo->sinfo_flags &= 0x00ff;
5588 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5589 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5590 		}
5591 	}
5592 #ifdef SCTP_ASOCLOG_OF_TSNS
5593 	{
5594 		int index, newindex;
5595 		struct sctp_pcbtsn_rlog *entry;
5596 
5597 		do {
5598 			index = inp->readlog_index;
5599 			newindex = index + 1;
5600 			if (newindex >= SCTP_READ_LOG_SIZE) {
5601 				newindex = 0;
5602 			}
5603 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5604 		entry = &inp->readlog[index];
5605 		entry->vtag = control->sinfo_assoc_id;
5606 		entry->strm = control->sinfo_stream;
5607 		entry->seq = (uint16_t) control->mid;
5608 		entry->sz = control->length;
5609 		entry->flgs = control->sinfo_flags;
5610 	}
5611 #endif
5612 	if ((fromlen > 0) && (from != NULL)) {
5613 		union sctp_sockstore store;
5614 		size_t len;
5615 
5616 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5617 #ifdef INET6
5618 		case AF_INET6:
5619 			len = sizeof(struct sockaddr_in6);
5620 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5621 			store.sin6.sin6_port = control->port_from;
5622 			break;
5623 #endif
5624 #ifdef INET
5625 		case AF_INET:
5626 #ifdef INET6
5627 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5628 				len = sizeof(struct sockaddr_in6);
5629 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5630 				    &store.sin6);
5631 				store.sin6.sin6_port = control->port_from;
5632 			} else {
5633 				len = sizeof(struct sockaddr_in);
5634 				store.sin = control->whoFrom->ro._l_addr.sin;
5635 				store.sin.sin_port = control->port_from;
5636 			}
5637 #else
5638 			len = sizeof(struct sockaddr_in);
5639 			store.sin = control->whoFrom->ro._l_addr.sin;
5640 			store.sin.sin_port = control->port_from;
5641 #endif
5642 			break;
5643 #endif
5644 		default:
5645 			len = 0;
5646 			break;
5647 		}
5648 		memcpy(from, &store, min((size_t)fromlen, len));
5649 #ifdef INET6
5650 		{
5651 			struct sockaddr_in6 lsa6, *from6;
5652 
5653 			from6 = (struct sockaddr_in6 *)from;
5654 			sctp_recover_scope_mac(from6, (&lsa6));
5655 		}
5656 #endif
5657 	}
5658 	if (hold_rlock) {
5659 		SCTP_INP_READ_UNLOCK(inp);
5660 		hold_rlock = 0;
5661 	}
5662 	if (hold_sblock) {
5663 		SOCKBUF_UNLOCK(&so->so_rcv);
5664 		hold_sblock = 0;
5665 	}
5666 	/* now copy out what data we can */
5667 	if (mp == NULL) {
5668 		/* copy out each mbuf in the chain up to length */
5669 get_more_data:
5670 		m = control->data;
5671 		while (m) {
5672 			/* Move out all we can */
5673 			cp_len = (int)uio->uio_resid;
5674 			my_len = (int)SCTP_BUF_LEN(m);
5675 			if (cp_len > my_len) {
5676 				/* not enough in this buf */
5677 				cp_len = my_len;
5678 			}
5679 			if (hold_rlock) {
5680 				SCTP_INP_READ_UNLOCK(inp);
5681 				hold_rlock = 0;
5682 			}
5683 			if (cp_len > 0)
5684 				error = uiomove(mtod(m, char *), cp_len, uio);
5685 			/* re-read */
5686 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5687 				goto release;
5688 			}
5689 			if ((control->do_not_ref_stcb == 0) && stcb &&
5690 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5691 				no_rcv_needed = 1;
5692 			}
5693 			if (error) {
5694 				/* error we are out of here */
5695 				goto release;
5696 			}
5697 			SCTP_INP_READ_LOCK(inp);
5698 			hold_rlock = 1;
5699 			if (cp_len == SCTP_BUF_LEN(m)) {
5700 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5701 				    (control->end_added)) {
5702 					out_flags |= MSG_EOR;
5703 					if ((control->do_not_ref_stcb == 0) &&
5704 					    (control->stcb != NULL) &&
5705 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5706 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5707 				}
5708 				if (control->spec_flags & M_NOTIFICATION) {
5709 					out_flags |= MSG_NOTIFICATION;
5710 				}
5711 				/* we ate up the mbuf */
5712 				if (in_flags & MSG_PEEK) {
5713 					/* just looking */
5714 					m = SCTP_BUF_NEXT(m);
5715 					copied_so_far += cp_len;
5716 				} else {
5717 					/* dispose of the mbuf */
5718 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5719 						sctp_sblog(&so->so_rcv,
5720 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5721 					}
5722 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5723 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5724 						sctp_sblog(&so->so_rcv,
5725 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5726 					}
5727 					copied_so_far += cp_len;
5728 					freed_so_far += cp_len;
5729 					freed_so_far += MSIZE;
5730 					atomic_subtract_int(&control->length, cp_len);
5731 					control->data = sctp_m_free(m);
5732 					m = control->data;
5733 					/* been through it all, must hold sb
5734 					 * lock ok to null tail */
5735 					if (control->data == NULL) {
5736 #ifdef INVARIANTS
5737 						if ((control->end_added == 0) ||
5738 						    (TAILQ_NEXT(control, next) == NULL)) {
5739 							/*
5740 							 * If the end is not
5741 							 * added, OR the
5742 							 * next is NOT null
5743 							 * we MUST have the
5744 							 * lock.
5745 							 */
5746 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5747 								panic("Hmm we don't own the lock?");
5748 							}
5749 						}
5750 #endif
5751 						control->tail_mbuf = NULL;
5752 #ifdef INVARIANTS
5753 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5754 							panic("end_added, nothing left and no MSG_EOR");
5755 						}
5756 #endif
5757 					}
5758 				}
5759 			} else {
5760 				/* Do we need to trim the mbuf? */
5761 				if (control->spec_flags & M_NOTIFICATION) {
5762 					out_flags |= MSG_NOTIFICATION;
5763 				}
5764 				if ((in_flags & MSG_PEEK) == 0) {
5765 					SCTP_BUF_RESV_UF(m, cp_len);
5766 					SCTP_BUF_LEN(m) -= cp_len;
5767 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5768 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5769 					}
5770 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5771 					if ((control->do_not_ref_stcb == 0) &&
5772 					    stcb) {
5773 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5774 					}
5775 					copied_so_far += cp_len;
5776 					freed_so_far += cp_len;
5777 					freed_so_far += MSIZE;
5778 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5779 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5780 						    SCTP_LOG_SBRESULT, 0);
5781 					}
5782 					atomic_subtract_int(&control->length, cp_len);
5783 				} else {
5784 					copied_so_far += cp_len;
5785 				}
5786 			}
5787 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5788 				break;
5789 			}
5790 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5791 			    (control->do_not_ref_stcb == 0) &&
5792 			    (freed_so_far >= rwnd_req)) {
5793 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5794 			}
5795 		}		/* end while(m) */
5796 		/*
5797 		 * At this point we have looked at it all and we either have
5798 		 * a MSG_EOR/or read all the user wants... <OR>
5799 		 * control->length == 0.
5800 		 */
5801 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5802 			/* we are done with this control */
5803 			if (control->length == 0) {
5804 				if (control->data) {
5805 #ifdef INVARIANTS
5806 					panic("control->data not null at read eor?");
5807 #else
5808 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5809 					sctp_m_freem(control->data);
5810 					control->data = NULL;
5811 #endif
5812 				}
5813 		done_with_control:
5814 				if (hold_rlock == 0) {
5815 					SCTP_INP_READ_LOCK(inp);
5816 					hold_rlock = 1;
5817 				}
5818 				TAILQ_REMOVE(&inp->read_queue, control, next);
5819 				/* Add back any hiddend data */
5820 				if (control->held_length) {
5821 					held_length = 0;
5822 					control->held_length = 0;
5823 					wakeup_read_socket = 1;
5824 				}
5825 				if (control->aux_data) {
5826 					sctp_m_free(control->aux_data);
5827 					control->aux_data = NULL;
5828 				}
5829 				no_rcv_needed = control->do_not_ref_stcb;
5830 				sctp_free_remote_addr(control->whoFrom);
5831 				control->data = NULL;
5832 #ifdef INVARIANTS
5833 				if (control->on_strm_q) {
5834 					panic("About to free ctl:%p so:%p and its in %d",
5835 					    control, so, control->on_strm_q);
5836 				}
5837 #endif
5838 				sctp_free_a_readq(stcb, control);
5839 				control = NULL;
5840 				if ((freed_so_far >= rwnd_req) &&
5841 				    (no_rcv_needed == 0))
5842 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5843 
5844 			} else {
5845 				/*
5846 				 * The user did not read all of this
5847 				 * message, turn off the returned MSG_EOR
5848 				 * since we are leaving more behind on the
5849 				 * control to read.
5850 				 */
5851 #ifdef INVARIANTS
5852 				if (control->end_added &&
5853 				    (control->data == NULL) &&
5854 				    (control->tail_mbuf == NULL)) {
5855 					panic("Gak, control->length is corrupt?");
5856 				}
5857 #endif
5858 				no_rcv_needed = control->do_not_ref_stcb;
5859 				out_flags &= ~MSG_EOR;
5860 			}
5861 		}
5862 		if (out_flags & MSG_EOR) {
5863 			goto release;
5864 		}
5865 		if ((uio->uio_resid == 0) ||
5866 		    ((in_eeor_mode) &&
5867 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5868 			goto release;
5869 		}
5870 		/*
5871 		 * If I hit here the receiver wants more and this message is
5872 		 * NOT done (pd-api). So two questions. Can we block? if not
5873 		 * we are done. Did the user NOT set MSG_WAITALL?
5874 		 */
5875 		if (block_allowed == 0) {
5876 			goto release;
5877 		}
5878 		/*
5879 		 * We need to wait for more data a few things: - We don't
5880 		 * sbunlock() so we don't get someone else reading. - We
5881 		 * must be sure to account for the case where what is added
5882 		 * is NOT to our control when we wakeup.
5883 		 */
5884 
5885 		/*
5886 		 * Do we need to tell the transport a rwnd update might be
5887 		 * needed before we go to sleep?
5888 		 */
5889 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5890 		    ((freed_so_far >= rwnd_req) &&
5891 		    (control->do_not_ref_stcb == 0) &&
5892 		    (no_rcv_needed == 0))) {
5893 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5894 		}
5895 wait_some_more:
5896 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5897 			goto release;
5898 		}
5899 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5900 			goto release;
5901 
5902 		if (hold_rlock == 1) {
5903 			SCTP_INP_READ_UNLOCK(inp);
5904 			hold_rlock = 0;
5905 		}
5906 		if (hold_sblock == 0) {
5907 			SOCKBUF_LOCK(&so->so_rcv);
5908 			hold_sblock = 1;
5909 		}
5910 		if ((copied_so_far) && (control->length == 0) &&
5911 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5912 			goto release;
5913 		}
5914 		if (so->so_rcv.sb_cc <= control->held_length) {
5915 			error = sbwait(&so->so_rcv);
5916 			if (error) {
5917 				goto release;
5918 			}
5919 			control->held_length = 0;
5920 		}
5921 		if (hold_sblock) {
5922 			SOCKBUF_UNLOCK(&so->so_rcv);
5923 			hold_sblock = 0;
5924 		}
5925 		if (control->length == 0) {
5926 			/* still nothing here */
5927 			if (control->end_added == 1) {
5928 				/* he aborted, or is done i.e.did a shutdown */
5929 				out_flags |= MSG_EOR;
5930 				if (control->pdapi_aborted) {
5931 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5932 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5933 
5934 					out_flags |= MSG_TRUNC;
5935 				} else {
5936 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5937 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5938 				}
5939 				goto done_with_control;
5940 			}
5941 			if (so->so_rcv.sb_cc > held_length) {
5942 				control->held_length = so->so_rcv.sb_cc;
5943 				held_length = 0;
5944 			}
5945 			goto wait_some_more;
5946 		} else if (control->data == NULL) {
5947 			/*
5948 			 * we must re-sync since data is probably being
5949 			 * added
5950 			 */
5951 			SCTP_INP_READ_LOCK(inp);
5952 			if ((control->length > 0) && (control->data == NULL)) {
5953 				/* big trouble.. we have the lock and its
5954 				 * corrupt? */
5955 #ifdef INVARIANTS
5956 				panic("Impossible data==NULL length !=0");
5957 #endif
5958 				out_flags |= MSG_EOR;
5959 				out_flags |= MSG_TRUNC;
5960 				control->length = 0;
5961 				SCTP_INP_READ_UNLOCK(inp);
5962 				goto done_with_control;
5963 			}
5964 			SCTP_INP_READ_UNLOCK(inp);
5965 			/* We will fall around to get more data */
5966 		}
5967 		goto get_more_data;
5968 	} else {
5969 		/*-
5970 		 * Give caller back the mbuf chain,
5971 		 * store in uio_resid the length
5972 		 */
5973 		wakeup_read_socket = 0;
5974 		if ((control->end_added == 0) ||
5975 		    (TAILQ_NEXT(control, next) == NULL)) {
5976 			/* Need to get rlock */
5977 			if (hold_rlock == 0) {
5978 				SCTP_INP_READ_LOCK(inp);
5979 				hold_rlock = 1;
5980 			}
5981 		}
5982 		if (control->end_added) {
5983 			out_flags |= MSG_EOR;
5984 			if ((control->do_not_ref_stcb == 0) &&
5985 			    (control->stcb != NULL) &&
5986 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5987 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5988 		}
5989 		if (control->spec_flags & M_NOTIFICATION) {
5990 			out_flags |= MSG_NOTIFICATION;
5991 		}
5992 		uio->uio_resid = control->length;
5993 		*mp = control->data;
5994 		m = control->data;
5995 		while (m) {
5996 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5997 				sctp_sblog(&so->so_rcv,
5998 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5999 			}
6000 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6001 			freed_so_far += SCTP_BUF_LEN(m);
6002 			freed_so_far += MSIZE;
6003 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6004 				sctp_sblog(&so->so_rcv,
6005 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6006 			}
6007 			m = SCTP_BUF_NEXT(m);
6008 		}
6009 		control->data = control->tail_mbuf = NULL;
6010 		control->length = 0;
6011 		if (out_flags & MSG_EOR) {
6012 			/* Done with this control */
6013 			goto done_with_control;
6014 		}
6015 	}
6016 release:
6017 	if (hold_rlock == 1) {
6018 		SCTP_INP_READ_UNLOCK(inp);
6019 		hold_rlock = 0;
6020 	}
6021 	if (hold_sblock == 1) {
6022 		SOCKBUF_UNLOCK(&so->so_rcv);
6023 		hold_sblock = 0;
6024 	}
6025 	sbunlock(&so->so_rcv);
6026 	sockbuf_lock = 0;
6027 
6028 release_unlocked:
6029 	if (hold_sblock) {
6030 		SOCKBUF_UNLOCK(&so->so_rcv);
6031 		hold_sblock = 0;
6032 	}
6033 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6034 		if ((freed_so_far >= rwnd_req) &&
6035 		    (control && (control->do_not_ref_stcb == 0)) &&
6036 		    (no_rcv_needed == 0))
6037 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6038 	}
6039 out:
6040 	if (msg_flags) {
6041 		*msg_flags = out_flags;
6042 	}
6043 	if (((out_flags & MSG_EOR) == 0) &&
6044 	    ((in_flags & MSG_PEEK) == 0) &&
6045 	    (sinfo) &&
6046 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6047 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6048 		struct sctp_extrcvinfo *s_extra;
6049 
6050 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6051 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6052 	}
6053 	if (hold_rlock == 1) {
6054 		SCTP_INP_READ_UNLOCK(inp);
6055 	}
6056 	if (hold_sblock) {
6057 		SOCKBUF_UNLOCK(&so->so_rcv);
6058 	}
6059 	if (sockbuf_lock) {
6060 		sbunlock(&so->so_rcv);
6061 	}
6062 	if (freecnt_applied) {
6063 		/*
6064 		 * The lock on the socket buffer protects us so the free
6065 		 * code will stop. But since we used the socketbuf lock and
6066 		 * the sender uses the tcb_lock to increment, we need to use
6067 		 * the atomic add to the refcnt.
6068 		 */
6069 		if (stcb == NULL) {
6070 #ifdef INVARIANTS
6071 			panic("stcb for refcnt has gone NULL?");
6072 			goto stage_left;
6073 #else
6074 			goto stage_left;
6075 #endif
6076 		}
6077 		/* Save the value back for next time */
6078 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6079 		atomic_add_int(&stcb->asoc.refcnt, -1);
6080 	}
6081 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6082 		if (stcb) {
6083 			sctp_misc_ints(SCTP_SORECV_DONE,
6084 			    freed_so_far,
6085 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6086 			    stcb->asoc.my_rwnd,
6087 			    so->so_rcv.sb_cc);
6088 		} else {
6089 			sctp_misc_ints(SCTP_SORECV_DONE,
6090 			    freed_so_far,
6091 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6092 			    0,
6093 			    so->so_rcv.sb_cc);
6094 		}
6095 	}
6096 stage_left:
6097 	if (wakeup_read_socket) {
6098 		sctp_sorwakeup(inp, so);
6099 	}
6100 	return (error);
6101 }
6102 
6103 
6104 #ifdef SCTP_MBUF_LOGGING
6105 struct mbuf *
6106 sctp_m_free(struct mbuf *m)
6107 {
6108 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6109 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6110 	}
6111 	return (m_free(m));
6112 }
6113 
6114 void
6115 sctp_m_freem(struct mbuf *mb)
6116 {
6117 	while (mb != NULL)
6118 		mb = sctp_m_free(mb);
6119 }
6120 
6121 #endif
6122 
6123 int
6124 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6125 {
6126 	/*
6127 	 * Given a local address. For all associations that holds the
6128 	 * address, request a peer-set-primary.
6129 	 */
6130 	struct sctp_ifa *ifa;
6131 	struct sctp_laddr *wi;
6132 
6133 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6134 	if (ifa == NULL) {
6135 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6136 		return (EADDRNOTAVAIL);
6137 	}
6138 	/*
6139 	 * Now that we have the ifa we must awaken the iterator with this
6140 	 * message.
6141 	 */
6142 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6143 	if (wi == NULL) {
6144 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6145 		return (ENOMEM);
6146 	}
6147 	/* Now incr the count and int wi structure */
6148 	SCTP_INCR_LADDR_COUNT();
6149 	bzero(wi, sizeof(*wi));
6150 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6151 	wi->ifa = ifa;
6152 	wi->action = SCTP_SET_PRIM_ADDR;
6153 	atomic_add_int(&ifa->refcount, 1);
6154 
6155 	/* Now add it to the work queue */
6156 	SCTP_WQ_ADDR_LOCK();
6157 	/*
6158 	 * Should this really be a tailq? As it is we will process the
6159 	 * newest first :-0
6160 	 */
6161 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6162 	SCTP_WQ_ADDR_UNLOCK();
6163 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6164 	    (struct sctp_inpcb *)NULL,
6165 	    (struct sctp_tcb *)NULL,
6166 	    (struct sctp_nets *)NULL);
6167 	return (0);
6168 }
6169 
6170 
6171 int
6172 sctp_soreceive(struct socket *so,
6173     struct sockaddr **psa,
6174     struct uio *uio,
6175     struct mbuf **mp0,
6176     struct mbuf **controlp,
6177     int *flagsp)
6178 {
6179 	int error, fromlen;
6180 	uint8_t sockbuf[256];
6181 	struct sockaddr *from;
6182 	struct sctp_extrcvinfo sinfo;
6183 	int filling_sinfo = 1;
6184 	struct sctp_inpcb *inp;
6185 
6186 	inp = (struct sctp_inpcb *)so->so_pcb;
6187 	/* pickup the assoc we are reading from */
6188 	if (inp == NULL) {
6189 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6190 		return (EINVAL);
6191 	}
6192 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6193 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6194 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6195 	    (controlp == NULL)) {
6196 		/* user does not want the sndrcv ctl */
6197 		filling_sinfo = 0;
6198 	}
6199 	if (psa) {
6200 		from = (struct sockaddr *)sockbuf;
6201 		fromlen = sizeof(sockbuf);
6202 		from->sa_len = 0;
6203 	} else {
6204 		from = NULL;
6205 		fromlen = 0;
6206 	}
6207 
6208 	if (filling_sinfo) {
6209 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6210 	}
6211 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6212 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6213 	if (controlp != NULL) {
6214 		/* copy back the sinfo in a CMSG format */
6215 		if (filling_sinfo)
6216 			*controlp = sctp_build_ctl_nchunk(inp,
6217 			    (struct sctp_sndrcvinfo *)&sinfo);
6218 		else
6219 			*controlp = NULL;
6220 	}
6221 	if (psa) {
6222 		/* copy back the address info */
6223 		if (from && from->sa_len) {
6224 			*psa = sodupsockaddr(from, M_NOWAIT);
6225 		} else {
6226 			*psa = NULL;
6227 		}
6228 	}
6229 	return (error);
6230 }
6231 
6232 
6233 
6234 
6235 
6236 int
6237 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6238     int totaddr, int *error)
6239 {
6240 	int added = 0;
6241 	int i;
6242 	struct sctp_inpcb *inp;
6243 	struct sockaddr *sa;
6244 	size_t incr = 0;
6245 #ifdef INET
6246 	struct sockaddr_in *sin;
6247 #endif
6248 #ifdef INET6
6249 	struct sockaddr_in6 *sin6;
6250 #endif
6251 
6252 	sa = addr;
6253 	inp = stcb->sctp_ep;
6254 	*error = 0;
6255 	for (i = 0; i < totaddr; i++) {
6256 		switch (sa->sa_family) {
6257 #ifdef INET
6258 		case AF_INET:
6259 			incr = sizeof(struct sockaddr_in);
6260 			sin = (struct sockaddr_in *)sa;
6261 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6262 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6263 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6264 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6265 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6266 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6267 				*error = EINVAL;
6268 				goto out_now;
6269 			}
6270 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6271 			    SCTP_DONOT_SETSCOPE,
6272 			    SCTP_ADDR_IS_CONFIRMED)) {
6273 				/* assoc gone no un-lock */
6274 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6275 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6276 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6277 				*error = ENOBUFS;
6278 				goto out_now;
6279 			}
6280 			added++;
6281 			break;
6282 #endif
6283 #ifdef INET6
6284 		case AF_INET6:
6285 			incr = sizeof(struct sockaddr_in6);
6286 			sin6 = (struct sockaddr_in6 *)sa;
6287 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6288 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6289 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6290 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6291 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6292 				*error = EINVAL;
6293 				goto out_now;
6294 			}
6295 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6296 			    SCTP_DONOT_SETSCOPE,
6297 			    SCTP_ADDR_IS_CONFIRMED)) {
6298 				/* assoc gone no un-lock */
6299 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6300 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6301 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6302 				*error = ENOBUFS;
6303 				goto out_now;
6304 			}
6305 			added++;
6306 			break;
6307 #endif
6308 		default:
6309 			break;
6310 		}
6311 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6312 	}
6313 out_now:
6314 	return (added);
6315 }
6316 
6317 struct sctp_tcb *
6318 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6319     unsigned int *totaddr,
6320     unsigned int *num_v4, unsigned int *num_v6, int *error,
6321     unsigned int limit, int *bad_addr)
6322 {
6323 	struct sockaddr *sa;
6324 	struct sctp_tcb *stcb = NULL;
6325 	unsigned int incr, at, i;
6326 
6327 	at = 0;
6328 	sa = addr;
6329 	*error = *num_v6 = *num_v4 = 0;
6330 	/* account and validate addresses */
6331 	for (i = 0; i < *totaddr; i++) {
6332 		switch (sa->sa_family) {
6333 #ifdef INET
6334 		case AF_INET:
6335 			incr = (unsigned int)sizeof(struct sockaddr_in);
6336 			if (sa->sa_len != incr) {
6337 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6338 				*error = EINVAL;
6339 				*bad_addr = 1;
6340 				return (NULL);
6341 			}
6342 			(*num_v4) += 1;
6343 			break;
6344 #endif
6345 #ifdef INET6
6346 		case AF_INET6:
6347 			{
6348 				struct sockaddr_in6 *sin6;
6349 
6350 				sin6 = (struct sockaddr_in6 *)sa;
6351 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6352 					/* Must be non-mapped for connectx */
6353 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6354 					*error = EINVAL;
6355 					*bad_addr = 1;
6356 					return (NULL);
6357 				}
6358 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6359 				if (sa->sa_len != incr) {
6360 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 					*error = EINVAL;
6362 					*bad_addr = 1;
6363 					return (NULL);
6364 				}
6365 				(*num_v6) += 1;
6366 				break;
6367 			}
6368 #endif
6369 		default:
6370 			*totaddr = i;
6371 			incr = 0;
6372 			/* we are done */
6373 			break;
6374 		}
6375 		if (i == *totaddr) {
6376 			break;
6377 		}
6378 		SCTP_INP_INCR_REF(inp);
6379 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6380 		if (stcb != NULL) {
6381 			/* Already have or am bring up an association */
6382 			return (stcb);
6383 		} else {
6384 			SCTP_INP_DECR_REF(inp);
6385 		}
6386 		if ((at + incr) > limit) {
6387 			*totaddr = i;
6388 			break;
6389 		}
6390 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6391 	}
6392 	return ((struct sctp_tcb *)NULL);
6393 }
6394 
6395 /*
6396  * sctp_bindx(ADD) for one address.
6397  * assumes all arguments are valid/checked by caller.
6398  */
6399 void
6400 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6401     struct sockaddr *sa, sctp_assoc_t assoc_id,
6402     uint32_t vrf_id, int *error, void *p)
6403 {
6404 	struct sockaddr *addr_touse;
6405 #if defined(INET) && defined(INET6)
6406 	struct sockaddr_in sin;
6407 #endif
6408 
6409 	/* see if we're bound all already! */
6410 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6411 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6412 		*error = EINVAL;
6413 		return;
6414 	}
6415 	addr_touse = sa;
6416 #ifdef INET6
6417 	if (sa->sa_family == AF_INET6) {
6418 #ifdef INET
6419 		struct sockaddr_in6 *sin6;
6420 
6421 #endif
6422 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6423 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6424 			*error = EINVAL;
6425 			return;
6426 		}
6427 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6428 			/* can only bind v6 on PF_INET6 sockets */
6429 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6430 			*error = EINVAL;
6431 			return;
6432 		}
6433 #ifdef INET
6434 		sin6 = (struct sockaddr_in6 *)addr_touse;
6435 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6436 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6437 			    SCTP_IPV6_V6ONLY(inp)) {
6438 				/* can't bind v4-mapped on PF_INET sockets */
6439 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6440 				*error = EINVAL;
6441 				return;
6442 			}
6443 			in6_sin6_2_sin(&sin, sin6);
6444 			addr_touse = (struct sockaddr *)&sin;
6445 		}
6446 #endif
6447 	}
6448 #endif
6449 #ifdef INET
6450 	if (sa->sa_family == AF_INET) {
6451 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6452 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6453 			*error = EINVAL;
6454 			return;
6455 		}
6456 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6457 		    SCTP_IPV6_V6ONLY(inp)) {
6458 			/* can't bind v4 on PF_INET sockets */
6459 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6460 			*error = EINVAL;
6461 			return;
6462 		}
6463 	}
6464 #endif
6465 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6466 		if (p == NULL) {
6467 			/* Can't get proc for Net/Open BSD */
6468 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6469 			*error = EINVAL;
6470 			return;
6471 		}
6472 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6473 		return;
6474 	}
6475 	/*
6476 	 * No locks required here since bind and mgmt_ep_sa all do their own
6477 	 * locking. If we do something for the FIX: below we may need to
6478 	 * lock in that case.
6479 	 */
6480 	if (assoc_id == 0) {
6481 		/* add the address */
6482 		struct sctp_inpcb *lep;
6483 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6484 
6485 		/* validate the incoming port */
6486 		if ((lsin->sin_port != 0) &&
6487 		    (lsin->sin_port != inp->sctp_lport)) {
6488 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6489 			*error = EINVAL;
6490 			return;
6491 		} else {
6492 			/* user specified 0 port, set it to existing port */
6493 			lsin->sin_port = inp->sctp_lport;
6494 		}
6495 
6496 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6497 		if (lep != NULL) {
6498 			/*
6499 			 * We must decrement the refcount since we have the
6500 			 * ep already and are binding. No remove going on
6501 			 * here.
6502 			 */
6503 			SCTP_INP_DECR_REF(lep);
6504 		}
6505 		if (lep == inp) {
6506 			/* already bound to it.. ok */
6507 			return;
6508 		} else if (lep == NULL) {
6509 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6510 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6511 			    SCTP_ADD_IP_ADDRESS,
6512 			    vrf_id, NULL);
6513 		} else {
6514 			*error = EADDRINUSE;
6515 		}
6516 		if (*error)
6517 			return;
6518 	} else {
6519 		/*
6520 		 * FIX: decide whether we allow assoc based bindx
6521 		 */
6522 	}
6523 }
6524 
6525 /*
6526  * sctp_bindx(DELETE) for one address.
6527  * assumes all arguments are valid/checked by caller.
6528  */
6529 void
6530 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6531     struct sockaddr *sa, sctp_assoc_t assoc_id,
6532     uint32_t vrf_id, int *error)
6533 {
6534 	struct sockaddr *addr_touse;
6535 #if defined(INET) && defined(INET6)
6536 	struct sockaddr_in sin;
6537 #endif
6538 
6539 	/* see if we're bound all already! */
6540 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6541 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6542 		*error = EINVAL;
6543 		return;
6544 	}
6545 	addr_touse = sa;
6546 #ifdef INET6
6547 	if (sa->sa_family == AF_INET6) {
6548 #ifdef INET
6549 		struct sockaddr_in6 *sin6;
6550 #endif
6551 
6552 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6553 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6554 			*error = EINVAL;
6555 			return;
6556 		}
6557 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6558 			/* can only bind v6 on PF_INET6 sockets */
6559 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6560 			*error = EINVAL;
6561 			return;
6562 		}
6563 #ifdef INET
6564 		sin6 = (struct sockaddr_in6 *)addr_touse;
6565 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6566 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6567 			    SCTP_IPV6_V6ONLY(inp)) {
6568 				/* can't bind mapped-v4 on PF_INET sockets */
6569 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570 				*error = EINVAL;
6571 				return;
6572 			}
6573 			in6_sin6_2_sin(&sin, sin6);
6574 			addr_touse = (struct sockaddr *)&sin;
6575 		}
6576 #endif
6577 	}
6578 #endif
6579 #ifdef INET
6580 	if (sa->sa_family == AF_INET) {
6581 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6582 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6583 			*error = EINVAL;
6584 			return;
6585 		}
6586 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6587 		    SCTP_IPV6_V6ONLY(inp)) {
6588 			/* can't bind v4 on PF_INET sockets */
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 	}
6594 #endif
6595 	/*
6596 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6597 	 * below is ever changed we may need to lock before calling
6598 	 * association level binding.
6599 	 */
6600 	if (assoc_id == 0) {
6601 		/* delete the address */
6602 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6603 		    SCTP_DEL_IP_ADDRESS,
6604 		    vrf_id, NULL);
6605 	} else {
6606 		/*
6607 		 * FIX: decide whether we allow assoc based bindx
6608 		 */
6609 	}
6610 }
6611 
6612 /*
6613  * returns the valid local address count for an assoc, taking into account
6614  * all scoping rules
6615  */
6616 int
6617 sctp_local_addr_count(struct sctp_tcb *stcb)
6618 {
6619 	int loopback_scope;
6620 #if defined(INET)
6621 	int ipv4_local_scope, ipv4_addr_legal;
6622 #endif
6623 #if defined (INET6)
6624 	int local_scope, site_scope, ipv6_addr_legal;
6625 #endif
6626 	struct sctp_vrf *vrf;
6627 	struct sctp_ifn *sctp_ifn;
6628 	struct sctp_ifa *sctp_ifa;
6629 	int count = 0;
6630 
6631 	/* Turn on all the appropriate scopes */
6632 	loopback_scope = stcb->asoc.scope.loopback_scope;
6633 #if defined(INET)
6634 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6635 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6636 #endif
6637 #if defined(INET6)
6638 	local_scope = stcb->asoc.scope.local_scope;
6639 	site_scope = stcb->asoc.scope.site_scope;
6640 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6641 #endif
6642 	SCTP_IPI_ADDR_RLOCK();
6643 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6644 	if (vrf == NULL) {
6645 		/* no vrf, no addresses */
6646 		SCTP_IPI_ADDR_RUNLOCK();
6647 		return (0);
6648 	}
6649 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6650 		/*
6651 		 * bound all case: go through all ifns on the vrf
6652 		 */
6653 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6654 			if ((loopback_scope == 0) &&
6655 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6656 				continue;
6657 			}
6658 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6659 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6660 					continue;
6661 				switch (sctp_ifa->address.sa.sa_family) {
6662 #ifdef INET
6663 				case AF_INET:
6664 					if (ipv4_addr_legal) {
6665 						struct sockaddr_in *sin;
6666 
6667 						sin = &sctp_ifa->address.sin;
6668 						if (sin->sin_addr.s_addr == 0) {
6669 							/* skip unspecified
6670 							 * addrs */
6671 							continue;
6672 						}
6673 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6674 						    &sin->sin_addr) != 0) {
6675 							continue;
6676 						}
6677 						if ((ipv4_local_scope == 0) &&
6678 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6679 							continue;
6680 						}
6681 						/* count this one */
6682 						count++;
6683 					} else {
6684 						continue;
6685 					}
6686 					break;
6687 #endif
6688 #ifdef INET6
6689 				case AF_INET6:
6690 					if (ipv6_addr_legal) {
6691 						struct sockaddr_in6 *sin6;
6692 
6693 						sin6 = &sctp_ifa->address.sin6;
6694 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6695 							continue;
6696 						}
6697 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6698 						    &sin6->sin6_addr) != 0) {
6699 							continue;
6700 						}
6701 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6702 							if (local_scope == 0)
6703 								continue;
6704 							if (sin6->sin6_scope_id == 0) {
6705 								if (sa6_recoverscope(sin6) != 0)
6706 									/*
6707 									 *
6708 									 * bad
6709 									 * link
6710 									 *
6711 									 * local
6712 									 *
6713 									 * address
6714 									 */
6715 									continue;
6716 							}
6717 						}
6718 						if ((site_scope == 0) &&
6719 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6720 							continue;
6721 						}
6722 						/* count this one */
6723 						count++;
6724 					}
6725 					break;
6726 #endif
6727 				default:
6728 					/* TSNH */
6729 					break;
6730 				}
6731 			}
6732 		}
6733 	} else {
6734 		/*
6735 		 * subset bound case
6736 		 */
6737 		struct sctp_laddr *laddr;
6738 
6739 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6740 		    sctp_nxt_addr) {
6741 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6742 				continue;
6743 			}
6744 			/* count this one */
6745 			count++;
6746 		}
6747 	}
6748 	SCTP_IPI_ADDR_RUNLOCK();
6749 	return (count);
6750 }
6751 
6752 #if defined(SCTP_LOCAL_TRACE_BUF)
6753 
6754 void
6755 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6756 {
6757 	uint32_t saveindex, newindex;
6758 
6759 	do {
6760 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6761 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6762 			newindex = 1;
6763 		} else {
6764 			newindex = saveindex + 1;
6765 		}
6766 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6767 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6768 		saveindex = 0;
6769 	}
6770 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6771 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6772 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6773 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6774 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6775 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6776 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6777 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6778 }
6779 
6780 #endif
6781 static void
6782 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6783     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6784 {
6785 	struct ip *iph;
6786 #ifdef INET6
6787 	struct ip6_hdr *ip6;
6788 #endif
6789 	struct mbuf *sp, *last;
6790 	struct udphdr *uhdr;
6791 	uint16_t port;
6792 
6793 	if ((m->m_flags & M_PKTHDR) == 0) {
6794 		/* Can't handle one that is not a pkt hdr */
6795 		goto out;
6796 	}
6797 	/* Pull the src port */
6798 	iph = mtod(m, struct ip *);
6799 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6800 	port = uhdr->uh_sport;
6801 	/*
6802 	 * Split out the mbuf chain. Leave the IP header in m, place the
6803 	 * rest in the sp.
6804 	 */
6805 	sp = m_split(m, off, M_NOWAIT);
6806 	if (sp == NULL) {
6807 		/* Gak, drop packet, we can't do a split */
6808 		goto out;
6809 	}
6810 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6811 		/* Gak, packet can't have an SCTP header in it - too small */
6812 		m_freem(sp);
6813 		goto out;
6814 	}
6815 	/* Now pull up the UDP header and SCTP header together */
6816 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6817 	if (sp == NULL) {
6818 		/* Gak pullup failed */
6819 		goto out;
6820 	}
6821 	/* Trim out the UDP header */
6822 	m_adj(sp, sizeof(struct udphdr));
6823 
6824 	/* Now reconstruct the mbuf chain */
6825 	for (last = m; last->m_next; last = last->m_next);
6826 	last->m_next = sp;
6827 	m->m_pkthdr.len += sp->m_pkthdr.len;
6828 	/*
6829 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6830 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6831 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6832 	 * SCTP checksum. Therefore, clear the bit.
6833 	 */
6834 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6835 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6836 	    m->m_pkthdr.len,
6837 	    if_name(m->m_pkthdr.rcvif),
6838 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6839 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6840 	iph = mtod(m, struct ip *);
6841 	switch (iph->ip_v) {
6842 #ifdef INET
6843 	case IPVERSION:
6844 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6845 		sctp_input_with_port(m, off, port);
6846 		break;
6847 #endif
6848 #ifdef INET6
6849 	case IPV6_VERSION >> 4:
6850 		ip6 = mtod(m, struct ip6_hdr *);
6851 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6852 		sctp6_input_with_port(&m, &off, port);
6853 		break;
6854 #endif
6855 	default:
6856 		goto out;
6857 		break;
6858 	}
6859 	return;
6860 out:
6861 	m_freem(m);
6862 }
6863 
6864 #ifdef INET
6865 static void
6866 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6867 {
6868 	struct ip *outer_ip, *inner_ip;
6869 	struct sctphdr *sh;
6870 	struct icmp *icmp;
6871 	struct udphdr *udp;
6872 	struct sctp_inpcb *inp;
6873 	struct sctp_tcb *stcb;
6874 	struct sctp_nets *net;
6875 	struct sctp_init_chunk *ch;
6876 	struct sockaddr_in src, dst;
6877 	uint8_t type, code;
6878 
6879 	inner_ip = (struct ip *)vip;
6880 	icmp = (struct icmp *)((caddr_t)inner_ip -
6881 	    (sizeof(struct icmp) - sizeof(struct ip)));
6882 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6883 	if (ntohs(outer_ip->ip_len) <
6884 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6885 		return;
6886 	}
6887 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6888 	sh = (struct sctphdr *)(udp + 1);
6889 	memset(&src, 0, sizeof(struct sockaddr_in));
6890 	src.sin_family = AF_INET;
6891 	src.sin_len = sizeof(struct sockaddr_in);
6892 	src.sin_port = sh->src_port;
6893 	src.sin_addr = inner_ip->ip_src;
6894 	memset(&dst, 0, sizeof(struct sockaddr_in));
6895 	dst.sin_family = AF_INET;
6896 	dst.sin_len = sizeof(struct sockaddr_in);
6897 	dst.sin_port = sh->dest_port;
6898 	dst.sin_addr = inner_ip->ip_dst;
6899 	/*
6900 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6901 	 * holds our local endpoint address. Thus we reverse the dst and the
6902 	 * src in the lookup.
6903 	 */
6904 	inp = NULL;
6905 	net = NULL;
6906 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6907 	    (struct sockaddr *)&src,
6908 	    &inp, &net, 1,
6909 	    SCTP_DEFAULT_VRFID);
6910 	if ((stcb != NULL) &&
6911 	    (net != NULL) &&
6912 	    (inp != NULL)) {
6913 		/* Check the UDP port numbers */
6914 		if ((udp->uh_dport != net->port) ||
6915 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6916 			SCTP_TCB_UNLOCK(stcb);
6917 			return;
6918 		}
6919 		/* Check the verification tag */
6920 		if (ntohl(sh->v_tag) != 0) {
6921 			/*
6922 			 * This must be the verification tag used for
6923 			 * sending out packets. We don't consider packets
6924 			 * reflecting the verification tag.
6925 			 */
6926 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6927 				SCTP_TCB_UNLOCK(stcb);
6928 				return;
6929 			}
6930 		} else {
6931 			if (ntohs(outer_ip->ip_len) >=
6932 			    sizeof(struct ip) +
6933 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6934 				/*
6935 				 * In this case we can check if we got an
6936 				 * INIT chunk and if the initiate tag
6937 				 * matches.
6938 				 */
6939 				ch = (struct sctp_init_chunk *)(sh + 1);
6940 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6941 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6942 					SCTP_TCB_UNLOCK(stcb);
6943 					return;
6944 				}
6945 			} else {
6946 				SCTP_TCB_UNLOCK(stcb);
6947 				return;
6948 			}
6949 		}
6950 		type = icmp->icmp_type;
6951 		code = icmp->icmp_code;
6952 		if ((type == ICMP_UNREACH) &&
6953 		    (code == ICMP_UNREACH_PORT)) {
6954 			code = ICMP_UNREACH_PROTOCOL;
6955 		}
6956 		sctp_notify(inp, stcb, net, type, code,
6957 		    ntohs(inner_ip->ip_len),
6958 		    ntohs(icmp->icmp_nextmtu));
6959 	} else {
6960 		if ((stcb == NULL) && (inp != NULL)) {
6961 			/* reduce ref-count */
6962 			SCTP_INP_WLOCK(inp);
6963 			SCTP_INP_DECR_REF(inp);
6964 			SCTP_INP_WUNLOCK(inp);
6965 		}
6966 		if (stcb) {
6967 			SCTP_TCB_UNLOCK(stcb);
6968 		}
6969 	}
6970 	return;
6971 }
6972 #endif
6973 
6974 #ifdef INET6
6975 static void
6976 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6977 {
6978 	struct ip6ctlparam *ip6cp;
6979 	struct sctp_inpcb *inp;
6980 	struct sctp_tcb *stcb;
6981 	struct sctp_nets *net;
6982 	struct sctphdr sh;
6983 	struct udphdr udp;
6984 	struct sockaddr_in6 src, dst;
6985 	uint8_t type, code;
6986 
6987 	ip6cp = (struct ip6ctlparam *)d;
6988 	/*
6989 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6990 	 */
6991 	if (ip6cp->ip6c_m == NULL) {
6992 		return;
6993 	}
6994 	/*
6995 	 * Check if we can safely examine the ports and the verification tag
6996 	 * of the SCTP common header.
6997 	 */
6998 	if (ip6cp->ip6c_m->m_pkthdr.len <
6999 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7000 		return;
7001 	}
7002 	/* Copy out the UDP header. */
7003 	memset(&udp, 0, sizeof(struct udphdr));
7004 	m_copydata(ip6cp->ip6c_m,
7005 	    ip6cp->ip6c_off,
7006 	    sizeof(struct udphdr),
7007 	    (caddr_t)&udp);
7008 	/* Copy out the port numbers and the verification tag. */
7009 	memset(&sh, 0, sizeof(struct sctphdr));
7010 	m_copydata(ip6cp->ip6c_m,
7011 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7012 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7013 	    (caddr_t)&sh);
7014 	memset(&src, 0, sizeof(struct sockaddr_in6));
7015 	src.sin6_family = AF_INET6;
7016 	src.sin6_len = sizeof(struct sockaddr_in6);
7017 	src.sin6_port = sh.src_port;
7018 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7019 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7020 		return;
7021 	}
7022 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7023 	dst.sin6_family = AF_INET6;
7024 	dst.sin6_len = sizeof(struct sockaddr_in6);
7025 	dst.sin6_port = sh.dest_port;
7026 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7027 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7028 		return;
7029 	}
7030 	inp = NULL;
7031 	net = NULL;
7032 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7033 	    (struct sockaddr *)&src,
7034 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7035 	if ((stcb != NULL) &&
7036 	    (net != NULL) &&
7037 	    (inp != NULL)) {
7038 		/* Check the UDP port numbers */
7039 		if ((udp.uh_dport != net->port) ||
7040 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7041 			SCTP_TCB_UNLOCK(stcb);
7042 			return;
7043 		}
7044 		/* Check the verification tag */
7045 		if (ntohl(sh.v_tag) != 0) {
7046 			/*
7047 			 * This must be the verification tag used for
7048 			 * sending out packets. We don't consider packets
7049 			 * reflecting the verification tag.
7050 			 */
7051 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7052 				SCTP_TCB_UNLOCK(stcb);
7053 				return;
7054 			}
7055 		} else {
7056 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7057 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7058 			    sizeof(struct sctphdr) +
7059 			    sizeof(struct sctp_chunkhdr) +
7060 			    offsetof(struct sctp_init, a_rwnd)) {
7061 				/*
7062 				 * In this case we can check if we got an
7063 				 * INIT chunk and if the initiate tag
7064 				 * matches.
7065 				 */
7066 				uint32_t initiate_tag;
7067 				uint8_t chunk_type;
7068 
7069 				m_copydata(ip6cp->ip6c_m,
7070 				    ip6cp->ip6c_off +
7071 				    sizeof(struct udphdr) +
7072 				    sizeof(struct sctphdr),
7073 				    sizeof(uint8_t),
7074 				    (caddr_t)&chunk_type);
7075 				m_copydata(ip6cp->ip6c_m,
7076 				    ip6cp->ip6c_off +
7077 				    sizeof(struct udphdr) +
7078 				    sizeof(struct sctphdr) +
7079 				    sizeof(struct sctp_chunkhdr),
7080 				    sizeof(uint32_t),
7081 				    (caddr_t)&initiate_tag);
7082 				if ((chunk_type != SCTP_INITIATION) ||
7083 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7084 					SCTP_TCB_UNLOCK(stcb);
7085 					return;
7086 				}
7087 			} else {
7088 				SCTP_TCB_UNLOCK(stcb);
7089 				return;
7090 			}
7091 		}
7092 		type = ip6cp->ip6c_icmp6->icmp6_type;
7093 		code = ip6cp->ip6c_icmp6->icmp6_code;
7094 		if ((type == ICMP6_DST_UNREACH) &&
7095 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7096 			type = ICMP6_PARAM_PROB;
7097 			code = ICMP6_PARAMPROB_NEXTHEADER;
7098 		}
7099 		sctp6_notify(inp, stcb, net, type, code,
7100 		    (uint16_t) ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7101 	} else {
7102 		if ((stcb == NULL) && (inp != NULL)) {
7103 			/* reduce inp's ref-count */
7104 			SCTP_INP_WLOCK(inp);
7105 			SCTP_INP_DECR_REF(inp);
7106 			SCTP_INP_WUNLOCK(inp);
7107 		}
7108 		if (stcb) {
7109 			SCTP_TCB_UNLOCK(stcb);
7110 		}
7111 	}
7112 }
7113 #endif
7114 
7115 void
7116 sctp_over_udp_stop(void)
7117 {
7118 	/*
7119 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7120 	 * for writting!
7121 	 */
7122 #ifdef INET
7123 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7124 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7125 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7126 	}
7127 #endif
7128 #ifdef INET6
7129 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7130 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7131 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7132 	}
7133 #endif
7134 }
7135 
7136 int
7137 sctp_over_udp_start(void)
7138 {
7139 	uint16_t port;
7140 	int ret;
7141 #ifdef INET
7142 	struct sockaddr_in sin;
7143 #endif
7144 #ifdef INET6
7145 	struct sockaddr_in6 sin6;
7146 #endif
7147 	/*
7148 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7149 	 * for writting!
7150 	 */
7151 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7152 	if (ntohs(port) == 0) {
7153 		/* Must have a port set */
7154 		return (EINVAL);
7155 	}
7156 #ifdef INET
7157 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7158 		/* Already running -- must stop first */
7159 		return (EALREADY);
7160 	}
7161 #endif
7162 #ifdef INET6
7163 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7164 		/* Already running -- must stop first */
7165 		return (EALREADY);
7166 	}
7167 #endif
7168 #ifdef INET
7169 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7170 	    SOCK_DGRAM, IPPROTO_UDP,
7171 	    curthread->td_ucred, curthread))) {
7172 		sctp_over_udp_stop();
7173 		return (ret);
7174 	}
7175 	/* Call the special UDP hook. */
7176 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7177 	    sctp_recv_udp_tunneled_packet,
7178 	    sctp_recv_icmp_tunneled_packet,
7179 	    NULL))) {
7180 		sctp_over_udp_stop();
7181 		return (ret);
7182 	}
7183 	/* Ok, we have a socket, bind it to the port. */
7184 	memset(&sin, 0, sizeof(struct sockaddr_in));
7185 	sin.sin_len = sizeof(struct sockaddr_in);
7186 	sin.sin_family = AF_INET;
7187 	sin.sin_port = htons(port);
7188 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7189 	    (struct sockaddr *)&sin, curthread))) {
7190 		sctp_over_udp_stop();
7191 		return (ret);
7192 	}
7193 #endif
7194 #ifdef INET6
7195 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7196 	    SOCK_DGRAM, IPPROTO_UDP,
7197 	    curthread->td_ucred, curthread))) {
7198 		sctp_over_udp_stop();
7199 		return (ret);
7200 	}
7201 	/* Call the special UDP hook. */
7202 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7203 	    sctp_recv_udp_tunneled_packet,
7204 	    sctp_recv_icmp6_tunneled_packet,
7205 	    NULL))) {
7206 		sctp_over_udp_stop();
7207 		return (ret);
7208 	}
7209 	/* Ok, we have a socket, bind it to the port. */
7210 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7211 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7212 	sin6.sin6_family = AF_INET6;
7213 	sin6.sin6_port = htons(port);
7214 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7215 	    (struct sockaddr *)&sin6, curthread))) {
7216 		sctp_over_udp_stop();
7217 		return (ret);
7218 	}
7219 #endif
7220 	return (0);
7221 }
7222