xref: /freebsd/sys/netinet/sctputil.c (revision 545ddfbe7d4fe8adfb862903b24eac1d5896c1ef)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern struct sctp_cc_functions sctp_cc_functions[];
62 extern struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int
897 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
898     uint32_t override_tag, uint32_t vrf_id)
899 {
900 	struct sctp_association *asoc;
901 
902 	/*
903 	 * Anything set to zero is taken care of by the allocation routine's
904 	 * bzero
905 	 */
906 
907 	/*
908 	 * Up front select what scoping to apply on addresses I tell my peer
909 	 * Not sure what to do with these right now, we will need to come up
910 	 * with a way to set them. We may need to pass them through from the
911 	 * caller in the sctp_aloc_assoc() function.
912 	 */
913 	int i;
914 
915 #if defined(SCTP_DETAILED_STR_STATS)
916 	int j;
917 
918 #endif
919 
920 	asoc = &stcb->asoc;
921 	/* init all variables to a known value. */
922 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
923 	asoc->max_burst = inp->sctp_ep.max_burst;
924 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
925 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
926 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
927 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
928 	asoc->ecn_supported = inp->ecn_supported;
929 	asoc->prsctp_supported = inp->prsctp_supported;
930 	asoc->auth_supported = inp->auth_supported;
931 	asoc->asconf_supported = inp->asconf_supported;
932 	asoc->reconfig_supported = inp->reconfig_supported;
933 	asoc->nrsack_supported = inp->nrsack_supported;
934 	asoc->pktdrop_supported = inp->pktdrop_supported;
935 	asoc->sctp_cmt_pf = (uint8_t) 0;
936 	asoc->sctp_frag_point = inp->sctp_frag_point;
937 	asoc->sctp_features = inp->sctp_features;
938 	asoc->default_dscp = inp->sctp_ep.default_dscp;
939 #ifdef INET6
940 	if (inp->sctp_ep.default_flowlabel) {
941 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
942 	} else {
943 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
944 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
945 			asoc->default_flowlabel &= 0x000fffff;
946 			asoc->default_flowlabel |= 0x80000000;
947 		} else {
948 			asoc->default_flowlabel = 0;
949 		}
950 	}
951 #endif
952 	asoc->sb_send_resv = 0;
953 	if (override_tag) {
954 		asoc->my_vtag = override_tag;
955 	} else {
956 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
957 	}
958 	/* Get the nonce tags */
959 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
960 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
961 	asoc->vrf_id = vrf_id;
962 
963 #ifdef SCTP_ASOCLOG_OF_TSNS
964 	asoc->tsn_in_at = 0;
965 	asoc->tsn_out_at = 0;
966 	asoc->tsn_in_wrapped = 0;
967 	asoc->tsn_out_wrapped = 0;
968 	asoc->cumack_log_at = 0;
969 	asoc->cumack_log_atsnt = 0;
970 #endif
971 #ifdef SCTP_FS_SPEC_LOG
972 	asoc->fs_index = 0;
973 #endif
974 	asoc->refcnt = 0;
975 	asoc->assoc_up_sent = 0;
976 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
977 	    sctp_select_initial_TSN(&inp->sctp_ep);
978 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
979 	/* we are optimisitic here */
980 	asoc->peer_supports_nat = 0;
981 	asoc->sent_queue_retran_cnt = 0;
982 
983 	/* for CMT */
984 	asoc->last_net_cmt_send_started = NULL;
985 
986 	/* This will need to be adjusted */
987 	asoc->last_acked_seq = asoc->init_seq_number - 1;
988 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
989 	asoc->asconf_seq_in = asoc->last_acked_seq;
990 
991 	/* here we are different, we hold the next one we expect */
992 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
993 
994 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
995 	asoc->initial_rto = inp->sctp_ep.initial_rto;
996 
997 	asoc->max_init_times = inp->sctp_ep.max_init_times;
998 	asoc->max_send_times = inp->sctp_ep.max_send_times;
999 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1000 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1001 	asoc->free_chunk_cnt = 0;
1002 
1003 	asoc->iam_blocking = 0;
1004 	asoc->context = inp->sctp_context;
1005 	asoc->local_strreset_support = inp->local_strreset_support;
1006 	asoc->def_send = inp->def_send;
1007 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1008 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1009 	asoc->pr_sctp_cnt = 0;
1010 	asoc->total_output_queue_size = 0;
1011 
1012 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1013 		asoc->scope.ipv6_addr_legal = 1;
1014 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1015 			asoc->scope.ipv4_addr_legal = 1;
1016 		} else {
1017 			asoc->scope.ipv4_addr_legal = 0;
1018 		}
1019 	} else {
1020 		asoc->scope.ipv6_addr_legal = 0;
1021 		asoc->scope.ipv4_addr_legal = 1;
1022 	}
1023 
1024 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1025 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1026 
1027 	asoc->smallest_mtu = inp->sctp_frag_point;
1028 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1029 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1030 
1031 	asoc->locked_on_sending = NULL;
1032 	asoc->stream_locked_on = 0;
1033 	asoc->ecn_echo_cnt_onq = 0;
1034 	asoc->stream_locked = 0;
1035 
1036 	asoc->send_sack = 1;
1037 
1038 	LIST_INIT(&asoc->sctp_restricted_addrs);
1039 
1040 	TAILQ_INIT(&asoc->nets);
1041 	TAILQ_INIT(&asoc->pending_reply_queue);
1042 	TAILQ_INIT(&asoc->asconf_ack_sent);
1043 	/* Setup to fill the hb random cache at first HB */
1044 	asoc->hb_random_idx = 4;
1045 
1046 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1047 
1048 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1049 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1050 
1051 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1052 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1053 
1054 	/*
1055 	 * Now the stream parameters, here we allocate space for all streams
1056 	 * that we request by default.
1057 	 */
1058 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1059 	    inp->sctp_ep.pre_open_stream_count;
1060 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1061 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1062 	    SCTP_M_STRMO);
1063 	if (asoc->strmout == NULL) {
1064 		/* big trouble no memory */
1065 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1066 		return (ENOMEM);
1067 	}
1068 	for (i = 0; i < asoc->streamoutcnt; i++) {
1069 		/*
1070 		 * inbound side must be set to 0xffff, also NOTE when we get
1071 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1072 		 * count (streamoutcnt) but first check if we sent to any of
1073 		 * the upper streams that were dropped (if some were). Those
1074 		 * that were dropped must be notified to the upper layer as
1075 		 * failed to send.
1076 		 */
1077 		asoc->strmout[i].next_sequence_send = 0x0;
1078 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1079 		asoc->strmout[i].chunks_on_queues = 0;
1080 #if defined(SCTP_DETAILED_STR_STATS)
1081 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1082 			asoc->strmout[i].abandoned_sent[j] = 0;
1083 			asoc->strmout[i].abandoned_unsent[j] = 0;
1084 		}
1085 #else
1086 		asoc->strmout[i].abandoned_sent[0] = 0;
1087 		asoc->strmout[i].abandoned_unsent[0] = 0;
1088 #endif
1089 		asoc->strmout[i].stream_no = i;
1090 		asoc->strmout[i].last_msg_incomplete = 0;
1091 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1092 	}
1093 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1094 
1095 	/* Now the mapping array */
1096 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1097 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1098 	    SCTP_M_MAP);
1099 	if (asoc->mapping_array == NULL) {
1100 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1101 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1102 		return (ENOMEM);
1103 	}
1104 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1105 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1106 	    SCTP_M_MAP);
1107 	if (asoc->nr_mapping_array == NULL) {
1108 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1109 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1110 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1111 		return (ENOMEM);
1112 	}
1113 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1114 
1115 	/* Now the init of the other outqueues */
1116 	TAILQ_INIT(&asoc->free_chunks);
1117 	TAILQ_INIT(&asoc->control_send_queue);
1118 	TAILQ_INIT(&asoc->asconf_send_queue);
1119 	TAILQ_INIT(&asoc->send_queue);
1120 	TAILQ_INIT(&asoc->sent_queue);
1121 	TAILQ_INIT(&asoc->reasmqueue);
1122 	TAILQ_INIT(&asoc->resetHead);
1123 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1124 	TAILQ_INIT(&asoc->asconf_queue);
1125 	/* authentication fields */
1126 	asoc->authinfo.random = NULL;
1127 	asoc->authinfo.active_keyid = 0;
1128 	asoc->authinfo.assoc_key = NULL;
1129 	asoc->authinfo.assoc_keyid = 0;
1130 	asoc->authinfo.recv_key = NULL;
1131 	asoc->authinfo.recv_keyid = 0;
1132 	LIST_INIT(&asoc->shared_keys);
1133 	asoc->marked_retrans = 0;
1134 	asoc->port = inp->sctp_ep.port;
1135 	asoc->timoinit = 0;
1136 	asoc->timodata = 0;
1137 	asoc->timosack = 0;
1138 	asoc->timoshutdown = 0;
1139 	asoc->timoheartbeat = 0;
1140 	asoc->timocookie = 0;
1141 	asoc->timoshutdownack = 0;
1142 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1143 	asoc->discontinuity_time = asoc->start_time;
1144 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1145 		asoc->abandoned_unsent[i] = 0;
1146 		asoc->abandoned_sent[i] = 0;
1147 	}
1148 	/*
1149 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1150 	 * freed later when the association is freed.
1151 	 */
1152 	return (0);
1153 }
1154 
1155 void
1156 sctp_print_mapping_array(struct sctp_association *asoc)
1157 {
1158 	unsigned int i, limit;
1159 
1160 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1161 	    asoc->mapping_array_size,
1162 	    asoc->mapping_array_base_tsn,
1163 	    asoc->cumulative_tsn,
1164 	    asoc->highest_tsn_inside_map,
1165 	    asoc->highest_tsn_inside_nr_map);
1166 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1167 		if (asoc->mapping_array[limit - 1] != 0) {
1168 			break;
1169 		}
1170 	}
1171 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1172 	for (i = 0; i < limit; i++) {
1173 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1174 	}
1175 	if (limit % 16)
1176 		SCTP_PRINTF("\n");
1177 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1178 		if (asoc->nr_mapping_array[limit - 1]) {
1179 			break;
1180 		}
1181 	}
1182 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1183 	for (i = 0; i < limit; i++) {
1184 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1185 	}
1186 	if (limit % 16)
1187 		SCTP_PRINTF("\n");
1188 }
1189 
1190 int
1191 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1192 {
1193 	/* mapping array needs to grow */
1194 	uint8_t *new_array1, *new_array2;
1195 	uint32_t new_size;
1196 
1197 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1198 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1199 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1200 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1201 		/* can't get more, forget it */
1202 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1203 		if (new_array1) {
1204 			SCTP_FREE(new_array1, SCTP_M_MAP);
1205 		}
1206 		if (new_array2) {
1207 			SCTP_FREE(new_array2, SCTP_M_MAP);
1208 		}
1209 		return (-1);
1210 	}
1211 	memset(new_array1, 0, new_size);
1212 	memset(new_array2, 0, new_size);
1213 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1214 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1215 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1216 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1217 	asoc->mapping_array = new_array1;
1218 	asoc->nr_mapping_array = new_array2;
1219 	asoc->mapping_array_size = new_size;
1220 	return (0);
1221 }
1222 
1223 
1224 static void
1225 sctp_iterator_work(struct sctp_iterator *it)
1226 {
1227 	int iteration_count = 0;
1228 	int inp_skip = 0;
1229 	int first_in = 1;
1230 	struct sctp_inpcb *tinp;
1231 
1232 	SCTP_INP_INFO_RLOCK();
1233 	SCTP_ITERATOR_LOCK();
1234 	if (it->inp) {
1235 		SCTP_INP_RLOCK(it->inp);
1236 		SCTP_INP_DECR_REF(it->inp);
1237 	}
1238 	if (it->inp == NULL) {
1239 		/* iterator is complete */
1240 done_with_iterator:
1241 		SCTP_ITERATOR_UNLOCK();
1242 		SCTP_INP_INFO_RUNLOCK();
1243 		if (it->function_atend != NULL) {
1244 			(*it->function_atend) (it->pointer, it->val);
1245 		}
1246 		SCTP_FREE(it, SCTP_M_ITER);
1247 		return;
1248 	}
1249 select_a_new_ep:
1250 	if (first_in) {
1251 		first_in = 0;
1252 	} else {
1253 		SCTP_INP_RLOCK(it->inp);
1254 	}
1255 	while (((it->pcb_flags) &&
1256 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1257 	    ((it->pcb_features) &&
1258 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1259 		/* endpoint flags or features don't match, so keep looking */
1260 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1261 			SCTP_INP_RUNLOCK(it->inp);
1262 			goto done_with_iterator;
1263 		}
1264 		tinp = it->inp;
1265 		it->inp = LIST_NEXT(it->inp, sctp_list);
1266 		SCTP_INP_RUNLOCK(tinp);
1267 		if (it->inp == NULL) {
1268 			goto done_with_iterator;
1269 		}
1270 		SCTP_INP_RLOCK(it->inp);
1271 	}
1272 	/* now go through each assoc which is in the desired state */
1273 	if (it->done_current_ep == 0) {
1274 		if (it->function_inp != NULL)
1275 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1276 		it->done_current_ep = 1;
1277 	}
1278 	if (it->stcb == NULL) {
1279 		/* run the per instance function */
1280 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1281 	}
1282 	if ((inp_skip) || it->stcb == NULL) {
1283 		if (it->function_inp_end != NULL) {
1284 			inp_skip = (*it->function_inp_end) (it->inp,
1285 			    it->pointer,
1286 			    it->val);
1287 		}
1288 		SCTP_INP_RUNLOCK(it->inp);
1289 		goto no_stcb;
1290 	}
1291 	while (it->stcb) {
1292 		SCTP_TCB_LOCK(it->stcb);
1293 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1294 			/* not in the right state... keep looking */
1295 			SCTP_TCB_UNLOCK(it->stcb);
1296 			goto next_assoc;
1297 		}
1298 		/* see if we have limited out the iterator loop */
1299 		iteration_count++;
1300 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1301 			/* Pause to let others grab the lock */
1302 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1303 			SCTP_TCB_UNLOCK(it->stcb);
1304 			SCTP_INP_INCR_REF(it->inp);
1305 			SCTP_INP_RUNLOCK(it->inp);
1306 			SCTP_ITERATOR_UNLOCK();
1307 			SCTP_INP_INFO_RUNLOCK();
1308 			SCTP_INP_INFO_RLOCK();
1309 			SCTP_ITERATOR_LOCK();
1310 			if (sctp_it_ctl.iterator_flags) {
1311 				/* We won't be staying here */
1312 				SCTP_INP_DECR_REF(it->inp);
1313 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1314 				if (sctp_it_ctl.iterator_flags &
1315 				    SCTP_ITERATOR_STOP_CUR_IT) {
1316 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1317 					goto done_with_iterator;
1318 				}
1319 				if (sctp_it_ctl.iterator_flags &
1320 				    SCTP_ITERATOR_STOP_CUR_INP) {
1321 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1322 					goto no_stcb;
1323 				}
1324 				/* If we reach here huh? */
1325 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1326 				    sctp_it_ctl.iterator_flags);
1327 				sctp_it_ctl.iterator_flags = 0;
1328 			}
1329 			SCTP_INP_RLOCK(it->inp);
1330 			SCTP_INP_DECR_REF(it->inp);
1331 			SCTP_TCB_LOCK(it->stcb);
1332 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1333 			iteration_count = 0;
1334 		}
1335 		/* run function on this one */
1336 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1337 
1338 		/*
1339 		 * we lie here, it really needs to have its own type but
1340 		 * first I must verify that this won't effect things :-0
1341 		 */
1342 		if (it->no_chunk_output == 0)
1343 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1344 
1345 		SCTP_TCB_UNLOCK(it->stcb);
1346 next_assoc:
1347 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1348 		if (it->stcb == NULL) {
1349 			/* Run last function */
1350 			if (it->function_inp_end != NULL) {
1351 				inp_skip = (*it->function_inp_end) (it->inp,
1352 				    it->pointer,
1353 				    it->val);
1354 			}
1355 		}
1356 	}
1357 	SCTP_INP_RUNLOCK(it->inp);
1358 no_stcb:
1359 	/* done with all assocs on this endpoint, move on to next endpoint */
1360 	it->done_current_ep = 0;
1361 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1362 		it->inp = NULL;
1363 	} else {
1364 		it->inp = LIST_NEXT(it->inp, sctp_list);
1365 	}
1366 	if (it->inp == NULL) {
1367 		goto done_with_iterator;
1368 	}
1369 	goto select_a_new_ep;
1370 }
1371 
1372 void
1373 sctp_iterator_worker(void)
1374 {
1375 	struct sctp_iterator *it, *nit;
1376 
1377 	/* This function is called with the WQ lock in place */
1378 
1379 	sctp_it_ctl.iterator_running = 1;
1380 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1381 		sctp_it_ctl.cur_it = it;
1382 		/* now lets work on this one */
1383 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1384 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1385 		CURVNET_SET(it->vn);
1386 		sctp_iterator_work(it);
1387 		sctp_it_ctl.cur_it = NULL;
1388 		CURVNET_RESTORE();
1389 		SCTP_IPI_ITERATOR_WQ_LOCK();
1390 		/* sa_ignore FREED_MEMORY */
1391 	}
1392 	sctp_it_ctl.iterator_running = 0;
1393 	return;
1394 }
1395 
1396 
1397 static void
1398 sctp_handle_addr_wq(void)
1399 {
1400 	/* deal with the ADDR wq from the rtsock calls */
1401 	struct sctp_laddr *wi, *nwi;
1402 	struct sctp_asconf_iterator *asc;
1403 
1404 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1405 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1406 	if (asc == NULL) {
1407 		/* Try later, no memory */
1408 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1409 		    (struct sctp_inpcb *)NULL,
1410 		    (struct sctp_tcb *)NULL,
1411 		    (struct sctp_nets *)NULL);
1412 		return;
1413 	}
1414 	LIST_INIT(&asc->list_of_work);
1415 	asc->cnt = 0;
1416 
1417 	SCTP_WQ_ADDR_LOCK();
1418 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1419 		LIST_REMOVE(wi, sctp_nxt_addr);
1420 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1421 		asc->cnt++;
1422 	}
1423 	SCTP_WQ_ADDR_UNLOCK();
1424 
1425 	if (asc->cnt == 0) {
1426 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1427 	} else {
1428 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1429 		    sctp_asconf_iterator_stcb,
1430 		    NULL,	/* No ep end for boundall */
1431 		    SCTP_PCB_FLAGS_BOUNDALL,
1432 		    SCTP_PCB_ANY_FEATURES,
1433 		    SCTP_ASOC_ANY_STATE,
1434 		    (void *)asc, 0,
1435 		    sctp_asconf_iterator_end, NULL, 0);
1436 	}
1437 }
1438 
1439 void
1440 sctp_timeout_handler(void *t)
1441 {
1442 	struct sctp_inpcb *inp;
1443 	struct sctp_tcb *stcb;
1444 	struct sctp_nets *net;
1445 	struct sctp_timer *tmr;
1446 
1447 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1448 	struct socket *so;
1449 
1450 #endif
1451 	int did_output, type;
1452 
1453 	tmr = (struct sctp_timer *)t;
1454 	inp = (struct sctp_inpcb *)tmr->ep;
1455 	stcb = (struct sctp_tcb *)tmr->tcb;
1456 	net = (struct sctp_nets *)tmr->net;
1457 	CURVNET_SET((struct vnet *)tmr->vnet);
1458 	did_output = 1;
1459 
1460 #ifdef SCTP_AUDITING_ENABLED
1461 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1462 	sctp_auditing(3, inp, stcb, net);
1463 #endif
1464 
1465 	/* sanity checks... */
1466 	if (tmr->self != (void *)tmr) {
1467 		/*
1468 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1469 		 * (void *)tmr);
1470 		 */
1471 		CURVNET_RESTORE();
1472 		return;
1473 	}
1474 	tmr->stopped_from = 0xa001;
1475 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1476 		/*
1477 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1478 		 * tmr->type);
1479 		 */
1480 		CURVNET_RESTORE();
1481 		return;
1482 	}
1483 	tmr->stopped_from = 0xa002;
1484 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1485 		CURVNET_RESTORE();
1486 		return;
1487 	}
1488 	/* if this is an iterator timeout, get the struct and clear inp */
1489 	tmr->stopped_from = 0xa003;
1490 	type = tmr->type;
1491 	if (inp) {
1492 		SCTP_INP_INCR_REF(inp);
1493 		if ((inp->sctp_socket == NULL) &&
1494 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1495 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1496 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1499 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1500 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1501 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1502 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1503 		    ) {
1504 			SCTP_INP_DECR_REF(inp);
1505 			CURVNET_RESTORE();
1506 			return;
1507 		}
1508 	}
1509 	tmr->stopped_from = 0xa004;
1510 	if (stcb) {
1511 		atomic_add_int(&stcb->asoc.refcnt, 1);
1512 		if (stcb->asoc.state == 0) {
1513 			atomic_add_int(&stcb->asoc.refcnt, -1);
1514 			if (inp) {
1515 				SCTP_INP_DECR_REF(inp);
1516 			}
1517 			CURVNET_RESTORE();
1518 			return;
1519 		}
1520 	}
1521 	tmr->stopped_from = 0xa005;
1522 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1523 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1524 		if (inp) {
1525 			SCTP_INP_DECR_REF(inp);
1526 		}
1527 		if (stcb) {
1528 			atomic_add_int(&stcb->asoc.refcnt, -1);
1529 		}
1530 		CURVNET_RESTORE();
1531 		return;
1532 	}
1533 	tmr->stopped_from = 0xa006;
1534 
1535 	if (stcb) {
1536 		SCTP_TCB_LOCK(stcb);
1537 		atomic_add_int(&stcb->asoc.refcnt, -1);
1538 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1539 		    ((stcb->asoc.state == 0) ||
1540 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1541 			SCTP_TCB_UNLOCK(stcb);
1542 			if (inp) {
1543 				SCTP_INP_DECR_REF(inp);
1544 			}
1545 			CURVNET_RESTORE();
1546 			return;
1547 		}
1548 	}
1549 	/* record in stopped what t-o occured */
1550 	tmr->stopped_from = tmr->type;
1551 
1552 	/* mark as being serviced now */
1553 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1554 		/*
1555 		 * Callout has been rescheduled.
1556 		 */
1557 		goto get_out;
1558 	}
1559 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1560 		/*
1561 		 * Not active, so no action.
1562 		 */
1563 		goto get_out;
1564 	}
1565 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1566 
1567 	/* call the handler for the appropriate timer type */
1568 	switch (tmr->type) {
1569 	case SCTP_TIMER_TYPE_ZERO_COPY:
1570 		if (inp == NULL) {
1571 			break;
1572 		}
1573 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1574 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1575 		}
1576 		break;
1577 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1578 		if (inp == NULL) {
1579 			break;
1580 		}
1581 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1582 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1583 		}
1584 		break;
1585 	case SCTP_TIMER_TYPE_ADDR_WQ:
1586 		sctp_handle_addr_wq();
1587 		break;
1588 	case SCTP_TIMER_TYPE_SEND:
1589 		if ((stcb == NULL) || (inp == NULL)) {
1590 			break;
1591 		}
1592 		SCTP_STAT_INCR(sctps_timodata);
1593 		stcb->asoc.timodata++;
1594 		stcb->asoc.num_send_timers_up--;
1595 		if (stcb->asoc.num_send_timers_up < 0) {
1596 			stcb->asoc.num_send_timers_up = 0;
1597 		}
1598 		SCTP_TCB_LOCK_ASSERT(stcb);
1599 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1600 			/* no need to unlock on tcb its gone */
1601 
1602 			goto out_decr;
1603 		}
1604 		SCTP_TCB_LOCK_ASSERT(stcb);
1605 #ifdef SCTP_AUDITING_ENABLED
1606 		sctp_auditing(4, inp, stcb, net);
1607 #endif
1608 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1609 		if ((stcb->asoc.num_send_timers_up == 0) &&
1610 		    (stcb->asoc.sent_queue_cnt > 0)) {
1611 			struct sctp_tmit_chunk *chk;
1612 
1613 			/*
1614 			 * safeguard. If there on some on the sent queue
1615 			 * somewhere but no timers running something is
1616 			 * wrong... so we start a timer on the first chunk
1617 			 * on the send queue on whatever net it is sent to.
1618 			 */
1619 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1620 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1621 			    chk->whoTo);
1622 		}
1623 		break;
1624 	case SCTP_TIMER_TYPE_INIT:
1625 		if ((stcb == NULL) || (inp == NULL)) {
1626 			break;
1627 		}
1628 		SCTP_STAT_INCR(sctps_timoinit);
1629 		stcb->asoc.timoinit++;
1630 		if (sctp_t1init_timer(inp, stcb, net)) {
1631 			/* no need to unlock on tcb its gone */
1632 			goto out_decr;
1633 		}
1634 		/* We do output but not here */
1635 		did_output = 0;
1636 		break;
1637 	case SCTP_TIMER_TYPE_RECV:
1638 		if ((stcb == NULL) || (inp == NULL)) {
1639 			break;
1640 		}
1641 		SCTP_STAT_INCR(sctps_timosack);
1642 		stcb->asoc.timosack++;
1643 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1644 #ifdef SCTP_AUDITING_ENABLED
1645 		sctp_auditing(4, inp, stcb, net);
1646 #endif
1647 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1648 		break;
1649 	case SCTP_TIMER_TYPE_SHUTDOWN:
1650 		if ((stcb == NULL) || (inp == NULL)) {
1651 			break;
1652 		}
1653 		if (sctp_shutdown_timer(inp, stcb, net)) {
1654 			/* no need to unlock on tcb its gone */
1655 			goto out_decr;
1656 		}
1657 		SCTP_STAT_INCR(sctps_timoshutdown);
1658 		stcb->asoc.timoshutdown++;
1659 #ifdef SCTP_AUDITING_ENABLED
1660 		sctp_auditing(4, inp, stcb, net);
1661 #endif
1662 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1663 		break;
1664 	case SCTP_TIMER_TYPE_HEARTBEAT:
1665 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1666 			break;
1667 		}
1668 		SCTP_STAT_INCR(sctps_timoheartbeat);
1669 		stcb->asoc.timoheartbeat++;
1670 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1671 			/* no need to unlock on tcb its gone */
1672 			goto out_decr;
1673 		}
1674 #ifdef SCTP_AUDITING_ENABLED
1675 		sctp_auditing(4, inp, stcb, net);
1676 #endif
1677 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1678 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1679 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1680 		}
1681 		break;
1682 	case SCTP_TIMER_TYPE_COOKIE:
1683 		if ((stcb == NULL) || (inp == NULL)) {
1684 			break;
1685 		}
1686 		if (sctp_cookie_timer(inp, stcb, net)) {
1687 			/* no need to unlock on tcb its gone */
1688 			goto out_decr;
1689 		}
1690 		SCTP_STAT_INCR(sctps_timocookie);
1691 		stcb->asoc.timocookie++;
1692 #ifdef SCTP_AUDITING_ENABLED
1693 		sctp_auditing(4, inp, stcb, net);
1694 #endif
1695 		/*
1696 		 * We consider T3 and Cookie timer pretty much the same with
1697 		 * respect to where from in chunk_output.
1698 		 */
1699 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1700 		break;
1701 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1702 		{
1703 			struct timeval tv;
1704 			int i, secret;
1705 
1706 			if (inp == NULL) {
1707 				break;
1708 			}
1709 			SCTP_STAT_INCR(sctps_timosecret);
1710 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1711 			SCTP_INP_WLOCK(inp);
1712 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1713 			inp->sctp_ep.last_secret_number =
1714 			    inp->sctp_ep.current_secret_number;
1715 			inp->sctp_ep.current_secret_number++;
1716 			if (inp->sctp_ep.current_secret_number >=
1717 			    SCTP_HOW_MANY_SECRETS) {
1718 				inp->sctp_ep.current_secret_number = 0;
1719 			}
1720 			secret = (int)inp->sctp_ep.current_secret_number;
1721 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1722 				inp->sctp_ep.secret_key[secret][i] =
1723 				    sctp_select_initial_TSN(&inp->sctp_ep);
1724 			}
1725 			SCTP_INP_WUNLOCK(inp);
1726 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1727 		}
1728 		did_output = 0;
1729 		break;
1730 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1731 		if ((stcb == NULL) || (inp == NULL)) {
1732 			break;
1733 		}
1734 		SCTP_STAT_INCR(sctps_timopathmtu);
1735 		sctp_pathmtu_timer(inp, stcb, net);
1736 		did_output = 0;
1737 		break;
1738 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1739 		if ((stcb == NULL) || (inp == NULL)) {
1740 			break;
1741 		}
1742 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1743 			/* no need to unlock on tcb its gone */
1744 			goto out_decr;
1745 		}
1746 		SCTP_STAT_INCR(sctps_timoshutdownack);
1747 		stcb->asoc.timoshutdownack++;
1748 #ifdef SCTP_AUDITING_ENABLED
1749 		sctp_auditing(4, inp, stcb, net);
1750 #endif
1751 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1752 		break;
1753 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1758 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1759 		/* no need to unlock on tcb its gone */
1760 		goto out_decr;
1761 
1762 	case SCTP_TIMER_TYPE_STRRESET:
1763 		if ((stcb == NULL) || (inp == NULL)) {
1764 			break;
1765 		}
1766 		if (sctp_strreset_timer(inp, stcb, net)) {
1767 			/* no need to unlock on tcb its gone */
1768 			goto out_decr;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timostrmrst);
1771 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1772 		break;
1773 	case SCTP_TIMER_TYPE_ASCONF:
1774 		if ((stcb == NULL) || (inp == NULL)) {
1775 			break;
1776 		}
1777 		if (sctp_asconf_timer(inp, stcb, net)) {
1778 			/* no need to unlock on tcb its gone */
1779 			goto out_decr;
1780 		}
1781 		SCTP_STAT_INCR(sctps_timoasconf);
1782 #ifdef SCTP_AUDITING_ENABLED
1783 		sctp_auditing(4, inp, stcb, net);
1784 #endif
1785 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1786 		break;
1787 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1788 		if ((stcb == NULL) || (inp == NULL)) {
1789 			break;
1790 		}
1791 		sctp_delete_prim_timer(inp, stcb, net);
1792 		SCTP_STAT_INCR(sctps_timodelprim);
1793 		break;
1794 
1795 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1796 		if ((stcb == NULL) || (inp == NULL)) {
1797 			break;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoautoclose);
1800 		sctp_autoclose_timer(inp, stcb, net);
1801 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1802 		did_output = 0;
1803 		break;
1804 	case SCTP_TIMER_TYPE_ASOCKILL:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoassockill);
1809 		/* Can we free it yet? */
1810 		SCTP_INP_DECR_REF(inp);
1811 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1812 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1813 		so = SCTP_INP_SO(inp);
1814 		atomic_add_int(&stcb->asoc.refcnt, 1);
1815 		SCTP_TCB_UNLOCK(stcb);
1816 		SCTP_SOCKET_LOCK(so, 1);
1817 		SCTP_TCB_LOCK(stcb);
1818 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1819 #endif
1820 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1821 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1822 		SCTP_SOCKET_UNLOCK(so, 1);
1823 #endif
1824 		/*
1825 		 * free asoc, always unlocks (or destroy's) so prevent
1826 		 * duplicate unlock or unlock of a free mtx :-0
1827 		 */
1828 		stcb = NULL;
1829 		goto out_no_decr;
1830 	case SCTP_TIMER_TYPE_INPKILL:
1831 		SCTP_STAT_INCR(sctps_timoinpkill);
1832 		if (inp == NULL) {
1833 			break;
1834 		}
1835 		/*
1836 		 * special case, take away our increment since WE are the
1837 		 * killer
1838 		 */
1839 		SCTP_INP_DECR_REF(inp);
1840 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1841 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1842 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1843 		inp = NULL;
1844 		goto out_no_decr;
1845 	default:
1846 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1847 		    tmr->type);
1848 		break;
1849 	}
1850 #ifdef SCTP_AUDITING_ENABLED
1851 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1852 	if (inp)
1853 		sctp_auditing(5, inp, stcb, net);
1854 #endif
1855 	if ((did_output) && stcb) {
1856 		/*
1857 		 * Now we need to clean up the control chunk chain if an
1858 		 * ECNE is on it. It must be marked as UNSENT again so next
1859 		 * call will continue to send it until such time that we get
1860 		 * a CWR, to remove it. It is, however, less likely that we
1861 		 * will find a ecn echo on the chain though.
1862 		 */
1863 		sctp_fix_ecn_echo(&stcb->asoc);
1864 	}
1865 get_out:
1866 	if (stcb) {
1867 		SCTP_TCB_UNLOCK(stcb);
1868 	}
1869 out_decr:
1870 	if (inp) {
1871 		SCTP_INP_DECR_REF(inp);
1872 	}
1873 out_no_decr:
1874 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1875 	    type);
1876 	CURVNET_RESTORE();
1877 }
1878 
1879 void
1880 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1881     struct sctp_nets *net)
1882 {
1883 	uint32_t to_ticks;
1884 	struct sctp_timer *tmr;
1885 
1886 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1887 		return;
1888 
1889 	tmr = NULL;
1890 	if (stcb) {
1891 		SCTP_TCB_LOCK_ASSERT(stcb);
1892 	}
1893 	switch (t_type) {
1894 	case SCTP_TIMER_TYPE_ZERO_COPY:
1895 		tmr = &inp->sctp_ep.zero_copy_timer;
1896 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1897 		break;
1898 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1899 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1900 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1901 		break;
1902 	case SCTP_TIMER_TYPE_ADDR_WQ:
1903 		/* Only 1 tick away :-) */
1904 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1905 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1906 		break;
1907 	case SCTP_TIMER_TYPE_SEND:
1908 		/* Here we use the RTO timer */
1909 		{
1910 			int rto_val;
1911 
1912 			if ((stcb == NULL) || (net == NULL)) {
1913 				return;
1914 			}
1915 			tmr = &net->rxt_timer;
1916 			if (net->RTO == 0) {
1917 				rto_val = stcb->asoc.initial_rto;
1918 			} else {
1919 				rto_val = net->RTO;
1920 			}
1921 			to_ticks = MSEC_TO_TICKS(rto_val);
1922 		}
1923 		break;
1924 	case SCTP_TIMER_TYPE_INIT:
1925 		/*
1926 		 * Here we use the INIT timer default usually about 1
1927 		 * minute.
1928 		 */
1929 		if ((stcb == NULL) || (net == NULL)) {
1930 			return;
1931 		}
1932 		tmr = &net->rxt_timer;
1933 		if (net->RTO == 0) {
1934 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1935 		} else {
1936 			to_ticks = MSEC_TO_TICKS(net->RTO);
1937 		}
1938 		break;
1939 	case SCTP_TIMER_TYPE_RECV:
1940 		/*
1941 		 * Here we use the Delayed-Ack timer value from the inp
1942 		 * ususually about 200ms.
1943 		 */
1944 		if (stcb == NULL) {
1945 			return;
1946 		}
1947 		tmr = &stcb->asoc.dack_timer;
1948 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1949 		break;
1950 	case SCTP_TIMER_TYPE_SHUTDOWN:
1951 		/* Here we use the RTO of the destination. */
1952 		if ((stcb == NULL) || (net == NULL)) {
1953 			return;
1954 		}
1955 		if (net->RTO == 0) {
1956 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1957 		} else {
1958 			to_ticks = MSEC_TO_TICKS(net->RTO);
1959 		}
1960 		tmr = &net->rxt_timer;
1961 		break;
1962 	case SCTP_TIMER_TYPE_HEARTBEAT:
1963 		/*
1964 		 * the net is used here so that we can add in the RTO. Even
1965 		 * though we use a different timer. We also add the HB timer
1966 		 * PLUS a random jitter.
1967 		 */
1968 		if ((stcb == NULL) || (net == NULL)) {
1969 			return;
1970 		} else {
1971 			uint32_t rndval;
1972 			uint32_t jitter;
1973 
1974 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1975 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1976 				return;
1977 			}
1978 			if (net->RTO == 0) {
1979 				to_ticks = stcb->asoc.initial_rto;
1980 			} else {
1981 				to_ticks = net->RTO;
1982 			}
1983 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1984 			jitter = rndval % to_ticks;
1985 			if (jitter >= (to_ticks >> 1)) {
1986 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1987 			} else {
1988 				to_ticks = to_ticks - jitter;
1989 			}
1990 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1991 			    !(net->dest_state & SCTP_ADDR_PF)) {
1992 				to_ticks += net->heart_beat_delay;
1993 			}
1994 			/*
1995 			 * Now we must convert the to_ticks that are now in
1996 			 * ms to ticks.
1997 			 */
1998 			to_ticks = MSEC_TO_TICKS(to_ticks);
1999 			tmr = &net->hb_timer;
2000 		}
2001 		break;
2002 	case SCTP_TIMER_TYPE_COOKIE:
2003 		/*
2004 		 * Here we can use the RTO timer from the network since one
2005 		 * RTT was compelete. If a retran happened then we will be
2006 		 * using the RTO initial value.
2007 		 */
2008 		if ((stcb == NULL) || (net == NULL)) {
2009 			return;
2010 		}
2011 		if (net->RTO == 0) {
2012 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2013 		} else {
2014 			to_ticks = MSEC_TO_TICKS(net->RTO);
2015 		}
2016 		tmr = &net->rxt_timer;
2017 		break;
2018 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2019 		/*
2020 		 * nothing needed but the endpoint here ususually about 60
2021 		 * minutes.
2022 		 */
2023 		tmr = &inp->sctp_ep.signature_change;
2024 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2025 		break;
2026 	case SCTP_TIMER_TYPE_ASOCKILL:
2027 		if (stcb == NULL) {
2028 			return;
2029 		}
2030 		tmr = &stcb->asoc.strreset_timer;
2031 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2032 		break;
2033 	case SCTP_TIMER_TYPE_INPKILL:
2034 		/*
2035 		 * The inp is setup to die. We re-use the signature_chage
2036 		 * timer since that has stopped and we are in the GONE
2037 		 * state.
2038 		 */
2039 		tmr = &inp->sctp_ep.signature_change;
2040 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2041 		break;
2042 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2043 		/*
2044 		 * Here we use the value found in the EP for PMTU ususually
2045 		 * about 10 minutes.
2046 		 */
2047 		if ((stcb == NULL) || (net == NULL)) {
2048 			return;
2049 		}
2050 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2051 			return;
2052 		}
2053 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2054 		tmr = &net->pmtu_timer;
2055 		break;
2056 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2057 		/* Here we use the RTO of the destination */
2058 		if ((stcb == NULL) || (net == NULL)) {
2059 			return;
2060 		}
2061 		if (net->RTO == 0) {
2062 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2063 		} else {
2064 			to_ticks = MSEC_TO_TICKS(net->RTO);
2065 		}
2066 		tmr = &net->rxt_timer;
2067 		break;
2068 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2069 		/*
2070 		 * Here we use the endpoints shutdown guard timer usually
2071 		 * about 3 minutes.
2072 		 */
2073 		if (stcb == NULL) {
2074 			return;
2075 		}
2076 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2077 		tmr = &stcb->asoc.shut_guard_timer;
2078 		break;
2079 	case SCTP_TIMER_TYPE_STRRESET:
2080 		/*
2081 		 * Here the timer comes from the stcb but its value is from
2082 		 * the net's RTO.
2083 		 */
2084 		if ((stcb == NULL) || (net == NULL)) {
2085 			return;
2086 		}
2087 		if (net->RTO == 0) {
2088 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2089 		} else {
2090 			to_ticks = MSEC_TO_TICKS(net->RTO);
2091 		}
2092 		tmr = &stcb->asoc.strreset_timer;
2093 		break;
2094 	case SCTP_TIMER_TYPE_ASCONF:
2095 		/*
2096 		 * Here the timer comes from the stcb but its value is from
2097 		 * the net's RTO.
2098 		 */
2099 		if ((stcb == NULL) || (net == NULL)) {
2100 			return;
2101 		}
2102 		if (net->RTO == 0) {
2103 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2104 		} else {
2105 			to_ticks = MSEC_TO_TICKS(net->RTO);
2106 		}
2107 		tmr = &stcb->asoc.asconf_timer;
2108 		break;
2109 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2110 		if ((stcb == NULL) || (net != NULL)) {
2111 			return;
2112 		}
2113 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2114 		tmr = &stcb->asoc.delete_prim_timer;
2115 		break;
2116 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2117 		if (stcb == NULL) {
2118 			return;
2119 		}
2120 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2121 			/*
2122 			 * Really an error since stcb is NOT set to
2123 			 * autoclose
2124 			 */
2125 			return;
2126 		}
2127 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2128 		tmr = &stcb->asoc.autoclose_timer;
2129 		break;
2130 	default:
2131 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2132 		    __FUNCTION__, t_type);
2133 		return;
2134 		break;
2135 	}
2136 	if ((to_ticks <= 0) || (tmr == NULL)) {
2137 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2138 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2139 		return;
2140 	}
2141 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2142 		/*
2143 		 * we do NOT allow you to have it already running. if it is
2144 		 * we leave the current one up unchanged
2145 		 */
2146 		return;
2147 	}
2148 	/* At this point we can proceed */
2149 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2150 		stcb->asoc.num_send_timers_up++;
2151 	}
2152 	tmr->stopped_from = 0;
2153 	tmr->type = t_type;
2154 	tmr->ep = (void *)inp;
2155 	tmr->tcb = (void *)stcb;
2156 	tmr->net = (void *)net;
2157 	tmr->self = (void *)tmr;
2158 	tmr->vnet = (void *)curvnet;
2159 	tmr->ticks = sctp_get_tick_count();
2160 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2161 	return;
2162 }
2163 
2164 void
2165 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2166     struct sctp_nets *net, uint32_t from)
2167 {
2168 	struct sctp_timer *tmr;
2169 
2170 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2171 	    (inp == NULL))
2172 		return;
2173 
2174 	tmr = NULL;
2175 	if (stcb) {
2176 		SCTP_TCB_LOCK_ASSERT(stcb);
2177 	}
2178 	switch (t_type) {
2179 	case SCTP_TIMER_TYPE_ZERO_COPY:
2180 		tmr = &inp->sctp_ep.zero_copy_timer;
2181 		break;
2182 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2183 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_ADDR_WQ:
2186 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2187 		break;
2188 	case SCTP_TIMER_TYPE_SEND:
2189 		if ((stcb == NULL) || (net == NULL)) {
2190 			return;
2191 		}
2192 		tmr = &net->rxt_timer;
2193 		break;
2194 	case SCTP_TIMER_TYPE_INIT:
2195 		if ((stcb == NULL) || (net == NULL)) {
2196 			return;
2197 		}
2198 		tmr = &net->rxt_timer;
2199 		break;
2200 	case SCTP_TIMER_TYPE_RECV:
2201 		if (stcb == NULL) {
2202 			return;
2203 		}
2204 		tmr = &stcb->asoc.dack_timer;
2205 		break;
2206 	case SCTP_TIMER_TYPE_SHUTDOWN:
2207 		if ((stcb == NULL) || (net == NULL)) {
2208 			return;
2209 		}
2210 		tmr = &net->rxt_timer;
2211 		break;
2212 	case SCTP_TIMER_TYPE_HEARTBEAT:
2213 		if ((stcb == NULL) || (net == NULL)) {
2214 			return;
2215 		}
2216 		tmr = &net->hb_timer;
2217 		break;
2218 	case SCTP_TIMER_TYPE_COOKIE:
2219 		if ((stcb == NULL) || (net == NULL)) {
2220 			return;
2221 		}
2222 		tmr = &net->rxt_timer;
2223 		break;
2224 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2225 		/* nothing needed but the endpoint here */
2226 		tmr = &inp->sctp_ep.signature_change;
2227 		/*
2228 		 * We re-use the newcookie timer for the INP kill timer. We
2229 		 * must assure that we do not kill it by accident.
2230 		 */
2231 		break;
2232 	case SCTP_TIMER_TYPE_ASOCKILL:
2233 		/*
2234 		 * Stop the asoc kill timer.
2235 		 */
2236 		if (stcb == NULL) {
2237 			return;
2238 		}
2239 		tmr = &stcb->asoc.strreset_timer;
2240 		break;
2241 
2242 	case SCTP_TIMER_TYPE_INPKILL:
2243 		/*
2244 		 * The inp is setup to die. We re-use the signature_chage
2245 		 * timer since that has stopped and we are in the GONE
2246 		 * state.
2247 		 */
2248 		tmr = &inp->sctp_ep.signature_change;
2249 		break;
2250 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2251 		if ((stcb == NULL) || (net == NULL)) {
2252 			return;
2253 		}
2254 		tmr = &net->pmtu_timer;
2255 		break;
2256 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2257 		if ((stcb == NULL) || (net == NULL)) {
2258 			return;
2259 		}
2260 		tmr = &net->rxt_timer;
2261 		break;
2262 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2263 		if (stcb == NULL) {
2264 			return;
2265 		}
2266 		tmr = &stcb->asoc.shut_guard_timer;
2267 		break;
2268 	case SCTP_TIMER_TYPE_STRRESET:
2269 		if (stcb == NULL) {
2270 			return;
2271 		}
2272 		tmr = &stcb->asoc.strreset_timer;
2273 		break;
2274 	case SCTP_TIMER_TYPE_ASCONF:
2275 		if (stcb == NULL) {
2276 			return;
2277 		}
2278 		tmr = &stcb->asoc.asconf_timer;
2279 		break;
2280 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2281 		if (stcb == NULL) {
2282 			return;
2283 		}
2284 		tmr = &stcb->asoc.delete_prim_timer;
2285 		break;
2286 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2287 		if (stcb == NULL) {
2288 			return;
2289 		}
2290 		tmr = &stcb->asoc.autoclose_timer;
2291 		break;
2292 	default:
2293 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2294 		    __FUNCTION__, t_type);
2295 		break;
2296 	}
2297 	if (tmr == NULL) {
2298 		return;
2299 	}
2300 	if ((tmr->type != t_type) && tmr->type) {
2301 		/*
2302 		 * Ok we have a timer that is under joint use. Cookie timer
2303 		 * per chance with the SEND timer. We therefore are NOT
2304 		 * running the timer that the caller wants stopped.  So just
2305 		 * return.
2306 		 */
2307 		return;
2308 	}
2309 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2310 		stcb->asoc.num_send_timers_up--;
2311 		if (stcb->asoc.num_send_timers_up < 0) {
2312 			stcb->asoc.num_send_timers_up = 0;
2313 		}
2314 	}
2315 	tmr->self = NULL;
2316 	tmr->stopped_from = from;
2317 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2318 	return;
2319 }
2320 
2321 uint32_t
2322 sctp_calculate_len(struct mbuf *m)
2323 {
2324 	uint32_t tlen = 0;
2325 	struct mbuf *at;
2326 
2327 	at = m;
2328 	while (at) {
2329 		tlen += SCTP_BUF_LEN(at);
2330 		at = SCTP_BUF_NEXT(at);
2331 	}
2332 	return (tlen);
2333 }
2334 
2335 void
2336 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2337     struct sctp_association *asoc, uint32_t mtu)
2338 {
2339 	/*
2340 	 * Reset the P-MTU size on this association, this involves changing
2341 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2342 	 * allow the DF flag to be cleared.
2343 	 */
2344 	struct sctp_tmit_chunk *chk;
2345 	unsigned int eff_mtu, ovh;
2346 
2347 	asoc->smallest_mtu = mtu;
2348 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2349 		ovh = SCTP_MIN_OVERHEAD;
2350 	} else {
2351 		ovh = SCTP_MIN_V4_OVERHEAD;
2352 	}
2353 	eff_mtu = mtu - ovh;
2354 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2355 		if (chk->send_size > eff_mtu) {
2356 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2357 		}
2358 	}
2359 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2360 		if (chk->send_size > eff_mtu) {
2361 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2362 		}
2363 	}
2364 }
2365 
2366 
2367 /*
2368  * given an association and starting time of the current RTT period return
2369  * RTO in number of msecs net should point to the current network
2370  */
2371 
2372 uint32_t
2373 sctp_calculate_rto(struct sctp_tcb *stcb,
2374     struct sctp_association *asoc,
2375     struct sctp_nets *net,
2376     struct timeval *told,
2377     int safe, int rtt_from_sack)
2378 {
2379 	/*-
2380 	 * given an association and the starting time of the current RTT
2381 	 * period (in value1/value2) return RTO in number of msecs.
2382 	 */
2383 	int32_t rtt;		/* RTT in ms */
2384 	uint32_t new_rto;
2385 	int first_measure = 0;
2386 	struct timeval now, then, *old;
2387 
2388 	/* Copy it out for sparc64 */
2389 	if (safe == sctp_align_unsafe_makecopy) {
2390 		old = &then;
2391 		memcpy(&then, told, sizeof(struct timeval));
2392 	} else if (safe == sctp_align_safe_nocopy) {
2393 		old = told;
2394 	} else {
2395 		/* error */
2396 		SCTP_PRINTF("Huh, bad rto calc call\n");
2397 		return (0);
2398 	}
2399 	/************************/
2400 	/* 1. calculate new RTT */
2401 	/************************/
2402 	/* get the current time */
2403 	if (stcb->asoc.use_precise_time) {
2404 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2405 	} else {
2406 		(void)SCTP_GETTIME_TIMEVAL(&now);
2407 	}
2408 	timevalsub(&now, old);
2409 	/* store the current RTT in us */
2410 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2411 	        (uint64_t) now.tv_usec;
2412 
2413 	/* compute rtt in ms */
2414 	rtt = (int32_t) (net->rtt / 1000);
2415 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2416 		/*
2417 		 * Tell the CC module that a new update has just occurred
2418 		 * from a sack
2419 		 */
2420 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2421 	}
2422 	/*
2423 	 * Do we need to determine the lan? We do this only on sacks i.e.
2424 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2425 	 */
2426 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2427 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2428 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2429 			net->lan_type = SCTP_LAN_INTERNET;
2430 		} else {
2431 			net->lan_type = SCTP_LAN_LOCAL;
2432 		}
2433 	}
2434 	/***************************/
2435 	/* 2. update RTTVAR & SRTT */
2436 	/***************************/
2437 	/*-
2438 	 * Compute the scaled average lastsa and the
2439 	 * scaled variance lastsv as described in van Jacobson
2440 	 * Paper "Congestion Avoidance and Control", Annex A.
2441 	 *
2442 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2443 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2444 	 */
2445 	if (net->RTO_measured) {
2446 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2447 		net->lastsa += rtt;
2448 		if (rtt < 0) {
2449 			rtt = -rtt;
2450 		}
2451 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2452 		net->lastsv += rtt;
2453 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2454 			rto_logging(net, SCTP_LOG_RTTVAR);
2455 		}
2456 	} else {
2457 		/* First RTO measurment */
2458 		net->RTO_measured = 1;
2459 		first_measure = 1;
2460 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2461 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2462 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2463 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2464 		}
2465 	}
2466 	if (net->lastsv == 0) {
2467 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2468 	}
2469 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2470 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2471 	    (stcb->asoc.sat_network_lockout == 0)) {
2472 		stcb->asoc.sat_network = 1;
2473 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2474 		stcb->asoc.sat_network = 0;
2475 		stcb->asoc.sat_network_lockout = 1;
2476 	}
2477 	/* bound it, per C6/C7 in Section 5.3.1 */
2478 	if (new_rto < stcb->asoc.minrto) {
2479 		new_rto = stcb->asoc.minrto;
2480 	}
2481 	if (new_rto > stcb->asoc.maxrto) {
2482 		new_rto = stcb->asoc.maxrto;
2483 	}
2484 	/* we are now returning the RTO */
2485 	return (new_rto);
2486 }
2487 
2488 /*
2489  * return a pointer to a contiguous piece of data from the given mbuf chain
2490  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2491  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2492  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2493  */
2494 caddr_t
2495 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2496 {
2497 	uint32_t count;
2498 	uint8_t *ptr;
2499 
2500 	ptr = in_ptr;
2501 	if ((off < 0) || (len <= 0))
2502 		return (NULL);
2503 
2504 	/* find the desired start location */
2505 	while ((m != NULL) && (off > 0)) {
2506 		if (off < SCTP_BUF_LEN(m))
2507 			break;
2508 		off -= SCTP_BUF_LEN(m);
2509 		m = SCTP_BUF_NEXT(m);
2510 	}
2511 	if (m == NULL)
2512 		return (NULL);
2513 
2514 	/* is the current mbuf large enough (eg. contiguous)? */
2515 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2516 		return (mtod(m, caddr_t)+off);
2517 	} else {
2518 		/* else, it spans more than one mbuf, so save a temp copy... */
2519 		while ((m != NULL) && (len > 0)) {
2520 			count = min(SCTP_BUF_LEN(m) - off, len);
2521 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2522 			len -= count;
2523 			ptr += count;
2524 			off = 0;
2525 			m = SCTP_BUF_NEXT(m);
2526 		}
2527 		if ((m == NULL) && (len > 0))
2528 			return (NULL);
2529 		else
2530 			return ((caddr_t)in_ptr);
2531 	}
2532 }
2533 
2534 
2535 
2536 struct sctp_paramhdr *
2537 sctp_get_next_param(struct mbuf *m,
2538     int offset,
2539     struct sctp_paramhdr *pull,
2540     int pull_limit)
2541 {
2542 	/* This just provides a typed signature to Peter's Pull routine */
2543 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2544 	    (uint8_t *) pull));
2545 }
2546 
2547 
2548 struct mbuf *
2549 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2550 {
2551 	struct mbuf *m_last;
2552 	caddr_t dp;
2553 
2554 	if (padlen > 3) {
2555 		return (NULL);
2556 	}
2557 	if (padlen <= M_TRAILINGSPACE(m)) {
2558 		/*
2559 		 * The easy way. We hope the majority of the time we hit
2560 		 * here :)
2561 		 */
2562 		m_last = m;
2563 	} else {
2564 		/* Hard way we must grow the mbuf chain */
2565 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2566 		if (m_last == NULL) {
2567 			return (NULL);
2568 		}
2569 		SCTP_BUF_LEN(m_last) = 0;
2570 		SCTP_BUF_NEXT(m_last) = NULL;
2571 		SCTP_BUF_NEXT(m) = m_last;
2572 	}
2573 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2574 	SCTP_BUF_LEN(m_last) += padlen;
2575 	memset(dp, 0, padlen);
2576 	return (m_last);
2577 }
2578 
2579 struct mbuf *
2580 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2581 {
2582 	/* find the last mbuf in chain and pad it */
2583 	struct mbuf *m_at;
2584 
2585 	if (last_mbuf != NULL) {
2586 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2587 	} else {
2588 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2589 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2590 				return (sctp_add_pad_tombuf(m_at, padval));
2591 			}
2592 		}
2593 	}
2594 	return (NULL);
2595 }
2596 
2597 static void
2598 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2599     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2600 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2601     SCTP_UNUSED
2602 #endif
2603 )
2604 {
2605 	struct mbuf *m_notify;
2606 	struct sctp_assoc_change *sac;
2607 	struct sctp_queued_to_read *control;
2608 	size_t notif_len, abort_len;
2609 	unsigned int i;
2610 
2611 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2612 	struct socket *so;
2613 
2614 #endif
2615 
2616 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2617 		notif_len = sizeof(struct sctp_assoc_change);
2618 		if (abort != NULL) {
2619 			abort_len = ntohs(abort->ch.chunk_length);
2620 		} else {
2621 			abort_len = 0;
2622 		}
2623 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2624 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2625 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2626 			notif_len += abort_len;
2627 		}
2628 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2629 		if (m_notify == NULL) {
2630 			/* Retry with smaller value. */
2631 			notif_len = sizeof(struct sctp_assoc_change);
2632 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2633 			if (m_notify == NULL) {
2634 				goto set_error;
2635 			}
2636 		}
2637 		SCTP_BUF_NEXT(m_notify) = NULL;
2638 		sac = mtod(m_notify, struct sctp_assoc_change *);
2639 		memset(sac, 0, notif_len);
2640 		sac->sac_type = SCTP_ASSOC_CHANGE;
2641 		sac->sac_flags = 0;
2642 		sac->sac_length = sizeof(struct sctp_assoc_change);
2643 		sac->sac_state = state;
2644 		sac->sac_error = error;
2645 		/* XXX verify these stream counts */
2646 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2647 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2648 		sac->sac_assoc_id = sctp_get_associd(stcb);
2649 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2650 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2651 				i = 0;
2652 				if (stcb->asoc.prsctp_supported == 1) {
2653 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2654 				}
2655 				if (stcb->asoc.auth_supported == 1) {
2656 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2657 				}
2658 				if (stcb->asoc.asconf_supported == 1) {
2659 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2660 				}
2661 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2662 				if (stcb->asoc.reconfig_supported == 1) {
2663 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2664 				}
2665 				sac->sac_length += i;
2666 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2667 				memcpy(sac->sac_info, abort, abort_len);
2668 				sac->sac_length += abort_len;
2669 			}
2670 		}
2671 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2672 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2673 		    0, 0, stcb->asoc.context, 0, 0, 0,
2674 		    m_notify);
2675 		if (control != NULL) {
2676 			control->length = SCTP_BUF_LEN(m_notify);
2677 			/* not that we need this */
2678 			control->tail_mbuf = m_notify;
2679 			control->spec_flags = M_NOTIFICATION;
2680 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2681 			    control,
2682 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2683 			    so_locked);
2684 		} else {
2685 			sctp_m_freem(m_notify);
2686 		}
2687 	}
2688 	/*
2689 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2690 	 * comes in.
2691 	 */
2692 set_error:
2693 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2694 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2695 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2696 		SOCK_LOCK(stcb->sctp_socket);
2697 		if (from_peer) {
2698 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2699 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2700 				stcb->sctp_socket->so_error = ECONNREFUSED;
2701 			} else {
2702 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2703 				stcb->sctp_socket->so_error = ECONNRESET;
2704 			}
2705 		} else {
2706 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2707 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2708 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2709 				stcb->sctp_socket->so_error = ETIMEDOUT;
2710 			} else {
2711 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2712 				stcb->sctp_socket->so_error = ECONNABORTED;
2713 			}
2714 		}
2715 	}
2716 	/* Wake ANY sleepers */
2717 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2718 	so = SCTP_INP_SO(stcb->sctp_ep);
2719 	if (!so_locked) {
2720 		atomic_add_int(&stcb->asoc.refcnt, 1);
2721 		SCTP_TCB_UNLOCK(stcb);
2722 		SCTP_SOCKET_LOCK(so, 1);
2723 		SCTP_TCB_LOCK(stcb);
2724 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2725 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2726 			SCTP_SOCKET_UNLOCK(so, 1);
2727 			return;
2728 		}
2729 	}
2730 #endif
2731 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2732 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2733 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2734 		socantrcvmore_locked(stcb->sctp_socket);
2735 	}
2736 	sorwakeup(stcb->sctp_socket);
2737 	sowwakeup(stcb->sctp_socket);
2738 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2739 	if (!so_locked) {
2740 		SCTP_SOCKET_UNLOCK(so, 1);
2741 	}
2742 #endif
2743 }
2744 
2745 static void
2746 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2747     struct sockaddr *sa, uint32_t error)
2748 {
2749 	struct mbuf *m_notify;
2750 	struct sctp_paddr_change *spc;
2751 	struct sctp_queued_to_read *control;
2752 
2753 	if ((stcb == NULL) ||
2754 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2755 		/* event not enabled */
2756 		return;
2757 	}
2758 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2759 	if (m_notify == NULL)
2760 		return;
2761 	SCTP_BUF_LEN(m_notify) = 0;
2762 	spc = mtod(m_notify, struct sctp_paddr_change *);
2763 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2764 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2765 	spc->spc_flags = 0;
2766 	spc->spc_length = sizeof(struct sctp_paddr_change);
2767 	switch (sa->sa_family) {
2768 #ifdef INET
2769 	case AF_INET:
2770 #ifdef INET6
2771 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2772 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2773 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2774 		} else {
2775 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2776 		}
2777 #else
2778 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2779 #endif
2780 		break;
2781 #endif
2782 #ifdef INET6
2783 	case AF_INET6:
2784 		{
2785 			struct sockaddr_in6 *sin6;
2786 
2787 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2788 
2789 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2790 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2791 				if (sin6->sin6_scope_id == 0) {
2792 					/* recover scope_id for user */
2793 					(void)sa6_recoverscope(sin6);
2794 				} else {
2795 					/* clear embedded scope_id for user */
2796 					in6_clearscope(&sin6->sin6_addr);
2797 				}
2798 			}
2799 			break;
2800 		}
2801 #endif
2802 	default:
2803 		/* TSNH */
2804 		break;
2805 	}
2806 	spc->spc_state = state;
2807 	spc->spc_error = error;
2808 	spc->spc_assoc_id = sctp_get_associd(stcb);
2809 
2810 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2811 	SCTP_BUF_NEXT(m_notify) = NULL;
2812 
2813 	/* append to socket */
2814 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2815 	    0, 0, stcb->asoc.context, 0, 0, 0,
2816 	    m_notify);
2817 	if (control == NULL) {
2818 		/* no memory */
2819 		sctp_m_freem(m_notify);
2820 		return;
2821 	}
2822 	control->length = SCTP_BUF_LEN(m_notify);
2823 	control->spec_flags = M_NOTIFICATION;
2824 	/* not that we need this */
2825 	control->tail_mbuf = m_notify;
2826 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2827 	    control,
2828 	    &stcb->sctp_socket->so_rcv, 1,
2829 	    SCTP_READ_LOCK_NOT_HELD,
2830 	    SCTP_SO_NOT_LOCKED);
2831 }
2832 
2833 
2834 static void
2835 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2836     struct sctp_tmit_chunk *chk, int so_locked
2837 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2838     SCTP_UNUSED
2839 #endif
2840 )
2841 {
2842 	struct mbuf *m_notify;
2843 	struct sctp_send_failed *ssf;
2844 	struct sctp_send_failed_event *ssfe;
2845 	struct sctp_queued_to_read *control;
2846 	int length;
2847 
2848 	if ((stcb == NULL) ||
2849 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2850 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2851 		/* event not enabled */
2852 		return;
2853 	}
2854 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2855 		length = sizeof(struct sctp_send_failed_event);
2856 	} else {
2857 		length = sizeof(struct sctp_send_failed);
2858 	}
2859 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2860 	if (m_notify == NULL)
2861 		/* no space left */
2862 		return;
2863 	SCTP_BUF_LEN(m_notify) = 0;
2864 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2865 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2866 		memset(ssfe, 0, length);
2867 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2868 		if (sent) {
2869 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2870 		} else {
2871 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2872 		}
2873 		length += chk->send_size;
2874 		length -= sizeof(struct sctp_data_chunk);
2875 		ssfe->ssfe_length = length;
2876 		ssfe->ssfe_error = error;
2877 		/* not exactly what the user sent in, but should be close :) */
2878 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2879 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2880 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2881 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2882 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2883 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2884 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2885 	} else {
2886 		ssf = mtod(m_notify, struct sctp_send_failed *);
2887 		memset(ssf, 0, length);
2888 		ssf->ssf_type = SCTP_SEND_FAILED;
2889 		if (sent) {
2890 			ssf->ssf_flags = SCTP_DATA_SENT;
2891 		} else {
2892 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2893 		}
2894 		length += chk->send_size;
2895 		length -= sizeof(struct sctp_data_chunk);
2896 		ssf->ssf_length = length;
2897 		ssf->ssf_error = error;
2898 		/* not exactly what the user sent in, but should be close :) */
2899 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2900 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2901 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2902 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2903 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2904 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2905 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2906 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2907 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2908 	}
2909 	if (chk->data) {
2910 		/*
2911 		 * trim off the sctp chunk header(it should be there)
2912 		 */
2913 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2914 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2915 			sctp_mbuf_crush(chk->data);
2916 			chk->send_size -= sizeof(struct sctp_data_chunk);
2917 		}
2918 	}
2919 	SCTP_BUF_NEXT(m_notify) = chk->data;
2920 	/* Steal off the mbuf */
2921 	chk->data = NULL;
2922 	/*
2923 	 * For this case, we check the actual socket buffer, since the assoc
2924 	 * is going away we don't want to overfill the socket buffer for a
2925 	 * non-reader
2926 	 */
2927 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2928 		sctp_m_freem(m_notify);
2929 		return;
2930 	}
2931 	/* append to socket */
2932 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2933 	    0, 0, stcb->asoc.context, 0, 0, 0,
2934 	    m_notify);
2935 	if (control == NULL) {
2936 		/* no memory */
2937 		sctp_m_freem(m_notify);
2938 		return;
2939 	}
2940 	control->spec_flags = M_NOTIFICATION;
2941 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2942 	    control,
2943 	    &stcb->sctp_socket->so_rcv, 1,
2944 	    SCTP_READ_LOCK_NOT_HELD,
2945 	    so_locked);
2946 }
2947 
2948 
2949 static void
2950 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2951     struct sctp_stream_queue_pending *sp, int so_locked
2952 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2953     SCTP_UNUSED
2954 #endif
2955 )
2956 {
2957 	struct mbuf *m_notify;
2958 	struct sctp_send_failed *ssf;
2959 	struct sctp_send_failed_event *ssfe;
2960 	struct sctp_queued_to_read *control;
2961 	int length;
2962 
2963 	if ((stcb == NULL) ||
2964 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2965 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2966 		/* event not enabled */
2967 		return;
2968 	}
2969 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2970 		length = sizeof(struct sctp_send_failed_event);
2971 	} else {
2972 		length = sizeof(struct sctp_send_failed);
2973 	}
2974 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2975 	if (m_notify == NULL) {
2976 		/* no space left */
2977 		return;
2978 	}
2979 	SCTP_BUF_LEN(m_notify) = 0;
2980 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2981 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2982 		memset(ssfe, 0, length);
2983 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2984 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2985 		length += sp->length;
2986 		ssfe->ssfe_length = length;
2987 		ssfe->ssfe_error = error;
2988 		/* not exactly what the user sent in, but should be close :) */
2989 		ssfe->ssfe_info.snd_sid = sp->stream;
2990 		if (sp->some_taken) {
2991 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2992 		} else {
2993 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2994 		}
2995 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2996 		ssfe->ssfe_info.snd_context = sp->context;
2997 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2998 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2999 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3000 	} else {
3001 		ssf = mtod(m_notify, struct sctp_send_failed *);
3002 		memset(ssf, 0, length);
3003 		ssf->ssf_type = SCTP_SEND_FAILED;
3004 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3005 		length += sp->length;
3006 		ssf->ssf_length = length;
3007 		ssf->ssf_error = error;
3008 		/* not exactly what the user sent in, but should be close :) */
3009 		ssf->ssf_info.sinfo_stream = sp->stream;
3010 		ssf->ssf_info.sinfo_ssn = 0;
3011 		if (sp->some_taken) {
3012 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3013 		} else {
3014 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3015 		}
3016 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3017 		ssf->ssf_info.sinfo_context = sp->context;
3018 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3019 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3020 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3021 	}
3022 	SCTP_BUF_NEXT(m_notify) = sp->data;
3023 
3024 	/* Steal off the mbuf */
3025 	sp->data = NULL;
3026 	/*
3027 	 * For this case, we check the actual socket buffer, since the assoc
3028 	 * is going away we don't want to overfill the socket buffer for a
3029 	 * non-reader
3030 	 */
3031 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3032 		sctp_m_freem(m_notify);
3033 		return;
3034 	}
3035 	/* append to socket */
3036 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3037 	    0, 0, stcb->asoc.context, 0, 0, 0,
3038 	    m_notify);
3039 	if (control == NULL) {
3040 		/* no memory */
3041 		sctp_m_freem(m_notify);
3042 		return;
3043 	}
3044 	control->spec_flags = M_NOTIFICATION;
3045 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3046 	    control,
3047 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3048 }
3049 
3050 
3051 
3052 static void
3053 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3054 {
3055 	struct mbuf *m_notify;
3056 	struct sctp_adaptation_event *sai;
3057 	struct sctp_queued_to_read *control;
3058 
3059 	if ((stcb == NULL) ||
3060 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3061 		/* event not enabled */
3062 		return;
3063 	}
3064 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3065 	if (m_notify == NULL)
3066 		/* no space left */
3067 		return;
3068 	SCTP_BUF_LEN(m_notify) = 0;
3069 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3070 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3071 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3072 	sai->sai_flags = 0;
3073 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3074 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3075 	sai->sai_assoc_id = sctp_get_associd(stcb);
3076 
3077 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3078 	SCTP_BUF_NEXT(m_notify) = NULL;
3079 
3080 	/* append to socket */
3081 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3082 	    0, 0, stcb->asoc.context, 0, 0, 0,
3083 	    m_notify);
3084 	if (control == NULL) {
3085 		/* no memory */
3086 		sctp_m_freem(m_notify);
3087 		return;
3088 	}
3089 	control->length = SCTP_BUF_LEN(m_notify);
3090 	control->spec_flags = M_NOTIFICATION;
3091 	/* not that we need this */
3092 	control->tail_mbuf = m_notify;
3093 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3094 	    control,
3095 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3096 }
3097 
3098 /* This always must be called with the read-queue LOCKED in the INP */
3099 static void
3100 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3101     uint32_t val, int so_locked
3102 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3103     SCTP_UNUSED
3104 #endif
3105 )
3106 {
3107 	struct mbuf *m_notify;
3108 	struct sctp_pdapi_event *pdapi;
3109 	struct sctp_queued_to_read *control;
3110 	struct sockbuf *sb;
3111 
3112 	if ((stcb == NULL) ||
3113 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3114 		/* event not enabled */
3115 		return;
3116 	}
3117 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3118 		return;
3119 	}
3120 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3121 	if (m_notify == NULL)
3122 		/* no space left */
3123 		return;
3124 	SCTP_BUF_LEN(m_notify) = 0;
3125 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3126 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3127 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3128 	pdapi->pdapi_flags = 0;
3129 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3130 	pdapi->pdapi_indication = error;
3131 	pdapi->pdapi_stream = (val >> 16);
3132 	pdapi->pdapi_seq = (val & 0x0000ffff);
3133 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3134 
3135 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3136 	SCTP_BUF_NEXT(m_notify) = NULL;
3137 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3138 	    0, 0, stcb->asoc.context, 0, 0, 0,
3139 	    m_notify);
3140 	if (control == NULL) {
3141 		/* no memory */
3142 		sctp_m_freem(m_notify);
3143 		return;
3144 	}
3145 	control->spec_flags = M_NOTIFICATION;
3146 	control->length = SCTP_BUF_LEN(m_notify);
3147 	/* not that we need this */
3148 	control->tail_mbuf = m_notify;
3149 	control->held_length = 0;
3150 	control->length = 0;
3151 	sb = &stcb->sctp_socket->so_rcv;
3152 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3153 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3154 	}
3155 	sctp_sballoc(stcb, sb, m_notify);
3156 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3157 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3158 	}
3159 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3160 	control->end_added = 1;
3161 	if (stcb->asoc.control_pdapi)
3162 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3163 	else {
3164 		/* we really should not see this case */
3165 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3166 	}
3167 	if (stcb->sctp_ep && stcb->sctp_socket) {
3168 		/* This should always be the case */
3169 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3170 		struct socket *so;
3171 
3172 		so = SCTP_INP_SO(stcb->sctp_ep);
3173 		if (!so_locked) {
3174 			atomic_add_int(&stcb->asoc.refcnt, 1);
3175 			SCTP_TCB_UNLOCK(stcb);
3176 			SCTP_SOCKET_LOCK(so, 1);
3177 			SCTP_TCB_LOCK(stcb);
3178 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3179 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3180 				SCTP_SOCKET_UNLOCK(so, 1);
3181 				return;
3182 			}
3183 		}
3184 #endif
3185 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3186 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3187 		if (!so_locked) {
3188 			SCTP_SOCKET_UNLOCK(so, 1);
3189 		}
3190 #endif
3191 	}
3192 }
3193 
3194 static void
3195 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3196 {
3197 	struct mbuf *m_notify;
3198 	struct sctp_shutdown_event *sse;
3199 	struct sctp_queued_to_read *control;
3200 
3201 	/*
3202 	 * For TCP model AND UDP connected sockets we will send an error up
3203 	 * when an SHUTDOWN completes
3204 	 */
3205 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3206 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3207 		/* mark socket closed for read/write and wakeup! */
3208 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3209 		struct socket *so;
3210 
3211 		so = SCTP_INP_SO(stcb->sctp_ep);
3212 		atomic_add_int(&stcb->asoc.refcnt, 1);
3213 		SCTP_TCB_UNLOCK(stcb);
3214 		SCTP_SOCKET_LOCK(so, 1);
3215 		SCTP_TCB_LOCK(stcb);
3216 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3217 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3218 			SCTP_SOCKET_UNLOCK(so, 1);
3219 			return;
3220 		}
3221 #endif
3222 		socantsendmore(stcb->sctp_socket);
3223 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3224 		SCTP_SOCKET_UNLOCK(so, 1);
3225 #endif
3226 	}
3227 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3228 		/* event not enabled */
3229 		return;
3230 	}
3231 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3232 	if (m_notify == NULL)
3233 		/* no space left */
3234 		return;
3235 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3236 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3237 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3238 	sse->sse_flags = 0;
3239 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3240 	sse->sse_assoc_id = sctp_get_associd(stcb);
3241 
3242 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3243 	SCTP_BUF_NEXT(m_notify) = NULL;
3244 
3245 	/* append to socket */
3246 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3247 	    0, 0, stcb->asoc.context, 0, 0, 0,
3248 	    m_notify);
3249 	if (control == NULL) {
3250 		/* no memory */
3251 		sctp_m_freem(m_notify);
3252 		return;
3253 	}
3254 	control->spec_flags = M_NOTIFICATION;
3255 	control->length = SCTP_BUF_LEN(m_notify);
3256 	/* not that we need this */
3257 	control->tail_mbuf = m_notify;
3258 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3259 	    control,
3260 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3261 }
3262 
3263 static void
3264 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3265     int so_locked
3266 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3267     SCTP_UNUSED
3268 #endif
3269 )
3270 {
3271 	struct mbuf *m_notify;
3272 	struct sctp_sender_dry_event *event;
3273 	struct sctp_queued_to_read *control;
3274 
3275 	if ((stcb == NULL) ||
3276 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3277 		/* event not enabled */
3278 		return;
3279 	}
3280 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3281 	if (m_notify == NULL) {
3282 		/* no space left */
3283 		return;
3284 	}
3285 	SCTP_BUF_LEN(m_notify) = 0;
3286 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3287 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3288 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3289 	event->sender_dry_flags = 0;
3290 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3291 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3292 
3293 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3294 	SCTP_BUF_NEXT(m_notify) = NULL;
3295 
3296 	/* append to socket */
3297 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3298 	    0, 0, stcb->asoc.context, 0, 0, 0,
3299 	    m_notify);
3300 	if (control == NULL) {
3301 		/* no memory */
3302 		sctp_m_freem(m_notify);
3303 		return;
3304 	}
3305 	control->length = SCTP_BUF_LEN(m_notify);
3306 	control->spec_flags = M_NOTIFICATION;
3307 	/* not that we need this */
3308 	control->tail_mbuf = m_notify;
3309 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3310 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3311 }
3312 
3313 
3314 void
3315 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3316 {
3317 	struct mbuf *m_notify;
3318 	struct sctp_queued_to_read *control;
3319 	struct sctp_stream_change_event *stradd;
3320 
3321 	if ((stcb == NULL) ||
3322 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3323 		/* event not enabled */
3324 		return;
3325 	}
3326 	if ((stcb->asoc.peer_req_out) && flag) {
3327 		/* Peer made the request, don't tell the local user */
3328 		stcb->asoc.peer_req_out = 0;
3329 		return;
3330 	}
3331 	stcb->asoc.peer_req_out = 0;
3332 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3333 	if (m_notify == NULL)
3334 		/* no space left */
3335 		return;
3336 	SCTP_BUF_LEN(m_notify) = 0;
3337 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3338 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3339 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3340 	stradd->strchange_flags = flag;
3341 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3342 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3343 	stradd->strchange_instrms = numberin;
3344 	stradd->strchange_outstrms = numberout;
3345 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3346 	SCTP_BUF_NEXT(m_notify) = NULL;
3347 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3348 		/* no space */
3349 		sctp_m_freem(m_notify);
3350 		return;
3351 	}
3352 	/* append to socket */
3353 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3354 	    0, 0, stcb->asoc.context, 0, 0, 0,
3355 	    m_notify);
3356 	if (control == NULL) {
3357 		/* no memory */
3358 		sctp_m_freem(m_notify);
3359 		return;
3360 	}
3361 	control->spec_flags = M_NOTIFICATION;
3362 	control->length = SCTP_BUF_LEN(m_notify);
3363 	/* not that we need this */
3364 	control->tail_mbuf = m_notify;
3365 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3366 	    control,
3367 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3368 }
3369 
3370 void
3371 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3372 {
3373 	struct mbuf *m_notify;
3374 	struct sctp_queued_to_read *control;
3375 	struct sctp_assoc_reset_event *strasoc;
3376 
3377 	if ((stcb == NULL) ||
3378 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3379 		/* event not enabled */
3380 		return;
3381 	}
3382 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3383 	if (m_notify == NULL)
3384 		/* no space left */
3385 		return;
3386 	SCTP_BUF_LEN(m_notify) = 0;
3387 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3388 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3389 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3390 	strasoc->assocreset_flags = flag;
3391 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3392 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3393 	strasoc->assocreset_local_tsn = sending_tsn;
3394 	strasoc->assocreset_remote_tsn = recv_tsn;
3395 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3396 	SCTP_BUF_NEXT(m_notify) = NULL;
3397 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3398 		/* no space */
3399 		sctp_m_freem(m_notify);
3400 		return;
3401 	}
3402 	/* append to socket */
3403 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3404 	    0, 0, stcb->asoc.context, 0, 0, 0,
3405 	    m_notify);
3406 	if (control == NULL) {
3407 		/* no memory */
3408 		sctp_m_freem(m_notify);
3409 		return;
3410 	}
3411 	control->spec_flags = M_NOTIFICATION;
3412 	control->length = SCTP_BUF_LEN(m_notify);
3413 	/* not that we need this */
3414 	control->tail_mbuf = m_notify;
3415 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3416 	    control,
3417 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3418 }
3419 
3420 
3421 
3422 static void
3423 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3424     int number_entries, uint16_t * list, int flag)
3425 {
3426 	struct mbuf *m_notify;
3427 	struct sctp_queued_to_read *control;
3428 	struct sctp_stream_reset_event *strreset;
3429 	int len;
3430 
3431 	if ((stcb == NULL) ||
3432 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3433 		/* event not enabled */
3434 		return;
3435 	}
3436 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3437 	if (m_notify == NULL)
3438 		/* no space left */
3439 		return;
3440 	SCTP_BUF_LEN(m_notify) = 0;
3441 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3442 	if (len > M_TRAILINGSPACE(m_notify)) {
3443 		/* never enough room */
3444 		sctp_m_freem(m_notify);
3445 		return;
3446 	}
3447 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3448 	memset(strreset, 0, len);
3449 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3450 	strreset->strreset_flags = flag;
3451 	strreset->strreset_length = len;
3452 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3453 	if (number_entries) {
3454 		int i;
3455 
3456 		for (i = 0; i < number_entries; i++) {
3457 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3458 		}
3459 	}
3460 	SCTP_BUF_LEN(m_notify) = len;
3461 	SCTP_BUF_NEXT(m_notify) = NULL;
3462 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3463 		/* no space */
3464 		sctp_m_freem(m_notify);
3465 		return;
3466 	}
3467 	/* append to socket */
3468 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3469 	    0, 0, stcb->asoc.context, 0, 0, 0,
3470 	    m_notify);
3471 	if (control == NULL) {
3472 		/* no memory */
3473 		sctp_m_freem(m_notify);
3474 		return;
3475 	}
3476 	control->spec_flags = M_NOTIFICATION;
3477 	control->length = SCTP_BUF_LEN(m_notify);
3478 	/* not that we need this */
3479 	control->tail_mbuf = m_notify;
3480 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3481 	    control,
3482 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3483 }
3484 
3485 
3486 static void
3487 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3488 {
3489 	struct mbuf *m_notify;
3490 	struct sctp_remote_error *sre;
3491 	struct sctp_queued_to_read *control;
3492 	size_t notif_len, chunk_len;
3493 
3494 	if ((stcb == NULL) ||
3495 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3496 		return;
3497 	}
3498 	if (chunk != NULL) {
3499 		chunk_len = ntohs(chunk->ch.chunk_length);
3500 	} else {
3501 		chunk_len = 0;
3502 	}
3503 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3504 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3505 	if (m_notify == NULL) {
3506 		/* Retry with smaller value. */
3507 		notif_len = sizeof(struct sctp_remote_error);
3508 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3509 		if (m_notify == NULL) {
3510 			return;
3511 		}
3512 	}
3513 	SCTP_BUF_NEXT(m_notify) = NULL;
3514 	sre = mtod(m_notify, struct sctp_remote_error *);
3515 	memset(sre, 0, notif_len);
3516 	sre->sre_type = SCTP_REMOTE_ERROR;
3517 	sre->sre_flags = 0;
3518 	sre->sre_length = sizeof(struct sctp_remote_error);
3519 	sre->sre_error = error;
3520 	sre->sre_assoc_id = sctp_get_associd(stcb);
3521 	if (notif_len > sizeof(struct sctp_remote_error)) {
3522 		memcpy(sre->sre_data, chunk, chunk_len);
3523 		sre->sre_length += chunk_len;
3524 	}
3525 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3526 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3527 	    0, 0, stcb->asoc.context, 0, 0, 0,
3528 	    m_notify);
3529 	if (control != NULL) {
3530 		control->length = SCTP_BUF_LEN(m_notify);
3531 		/* not that we need this */
3532 		control->tail_mbuf = m_notify;
3533 		control->spec_flags = M_NOTIFICATION;
3534 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3535 		    control,
3536 		    &stcb->sctp_socket->so_rcv, 1,
3537 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3538 	} else {
3539 		sctp_m_freem(m_notify);
3540 	}
3541 }
3542 
3543 
3544 void
3545 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3546     uint32_t error, void *data, int so_locked
3547 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3548     SCTP_UNUSED
3549 #endif
3550 )
3551 {
3552 	if ((stcb == NULL) ||
3553 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3554 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3555 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3556 		/* If the socket is gone we are out of here */
3557 		return;
3558 	}
3559 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3560 		return;
3561 	}
3562 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3563 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3564 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3565 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3566 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3567 			/* Don't report these in front states */
3568 			return;
3569 		}
3570 	}
3571 	switch (notification) {
3572 	case SCTP_NOTIFY_ASSOC_UP:
3573 		if (stcb->asoc.assoc_up_sent == 0) {
3574 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3575 			stcb->asoc.assoc_up_sent = 1;
3576 		}
3577 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3578 			sctp_notify_adaptation_layer(stcb);
3579 		}
3580 		if (stcb->asoc.auth_supported == 0) {
3581 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3582 			    NULL, so_locked);
3583 		}
3584 		break;
3585 	case SCTP_NOTIFY_ASSOC_DOWN:
3586 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3587 		break;
3588 	case SCTP_NOTIFY_INTERFACE_DOWN:
3589 		{
3590 			struct sctp_nets *net;
3591 
3592 			net = (struct sctp_nets *)data;
3593 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3594 			    (struct sockaddr *)&net->ro._l_addr, error);
3595 			break;
3596 		}
3597 	case SCTP_NOTIFY_INTERFACE_UP:
3598 		{
3599 			struct sctp_nets *net;
3600 
3601 			net = (struct sctp_nets *)data;
3602 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3603 			    (struct sockaddr *)&net->ro._l_addr, error);
3604 			break;
3605 		}
3606 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3607 		{
3608 			struct sctp_nets *net;
3609 
3610 			net = (struct sctp_nets *)data;
3611 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3612 			    (struct sockaddr *)&net->ro._l_addr, error);
3613 			break;
3614 		}
3615 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3616 		sctp_notify_send_failed2(stcb, error,
3617 		    (struct sctp_stream_queue_pending *)data, so_locked);
3618 		break;
3619 	case SCTP_NOTIFY_SENT_DG_FAIL:
3620 		sctp_notify_send_failed(stcb, 1, error,
3621 		    (struct sctp_tmit_chunk *)data, so_locked);
3622 		break;
3623 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3624 		sctp_notify_send_failed(stcb, 0, error,
3625 		    (struct sctp_tmit_chunk *)data, so_locked);
3626 		break;
3627 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3628 		{
3629 			uint32_t val;
3630 
3631 			val = *((uint32_t *) data);
3632 
3633 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3634 			break;
3635 		}
3636 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3637 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3638 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3639 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3640 		} else {
3641 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3642 		}
3643 		break;
3644 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3645 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3646 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3647 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3648 		} else {
3649 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3650 		}
3651 		break;
3652 	case SCTP_NOTIFY_ASSOC_RESTART:
3653 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3654 		if (stcb->asoc.auth_supported == 0) {
3655 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3656 			    NULL, so_locked);
3657 		}
3658 		break;
3659 	case SCTP_NOTIFY_STR_RESET_SEND:
3660 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3661 		break;
3662 	case SCTP_NOTIFY_STR_RESET_RECV:
3663 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3664 		break;
3665 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3666 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3667 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3668 		break;
3669 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3670 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3671 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3672 		break;
3673 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3674 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3675 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3676 		break;
3677 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3678 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3679 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3680 		break;
3681 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3682 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3683 		    error);
3684 		break;
3685 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3686 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3687 		    error);
3688 		break;
3689 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3690 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3691 		    error);
3692 		break;
3693 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3694 		sctp_notify_shutdown_event(stcb);
3695 		break;
3696 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3697 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3698 		    (uint16_t) (uintptr_t) data,
3699 		    so_locked);
3700 		break;
3701 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3702 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3703 		    (uint16_t) (uintptr_t) data,
3704 		    so_locked);
3705 		break;
3706 	case SCTP_NOTIFY_NO_PEER_AUTH:
3707 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3708 		    (uint16_t) (uintptr_t) data,
3709 		    so_locked);
3710 		break;
3711 	case SCTP_NOTIFY_SENDER_DRY:
3712 		sctp_notify_sender_dry_event(stcb, so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_REMOTE_ERROR:
3715 		sctp_notify_remote_error(stcb, error, data);
3716 		break;
3717 	default:
3718 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3719 		    __FUNCTION__, notification, notification);
3720 		break;
3721 	}			/* end switch */
3722 }
3723 
3724 void
3725 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3726 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3727     SCTP_UNUSED
3728 #endif
3729 )
3730 {
3731 	struct sctp_association *asoc;
3732 	struct sctp_stream_out *outs;
3733 	struct sctp_tmit_chunk *chk, *nchk;
3734 	struct sctp_stream_queue_pending *sp, *nsp;
3735 	int i;
3736 
3737 	if (stcb == NULL) {
3738 		return;
3739 	}
3740 	asoc = &stcb->asoc;
3741 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3742 		/* already being freed */
3743 		return;
3744 	}
3745 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3746 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3747 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3748 		return;
3749 	}
3750 	/* now through all the gunk freeing chunks */
3751 	if (holds_lock == 0) {
3752 		SCTP_TCB_SEND_LOCK(stcb);
3753 	}
3754 	/* sent queue SHOULD be empty */
3755 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3756 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3757 		asoc->sent_queue_cnt--;
3758 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3759 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3760 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3761 #ifdef INVARIANTS
3762 			} else {
3763 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3764 #endif
3765 			}
3766 		}
3767 		if (chk->data != NULL) {
3768 			sctp_free_bufspace(stcb, asoc, chk, 1);
3769 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3770 			    error, chk, so_locked);
3771 			if (chk->data) {
3772 				sctp_m_freem(chk->data);
3773 				chk->data = NULL;
3774 			}
3775 		}
3776 		sctp_free_a_chunk(stcb, chk, so_locked);
3777 		/* sa_ignore FREED_MEMORY */
3778 	}
3779 	/* pending send queue SHOULD be empty */
3780 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3781 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3782 		asoc->send_queue_cnt--;
3783 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3784 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3785 #ifdef INVARIANTS
3786 		} else {
3787 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3788 #endif
3789 		}
3790 		if (chk->data != NULL) {
3791 			sctp_free_bufspace(stcb, asoc, chk, 1);
3792 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3793 			    error, chk, so_locked);
3794 			if (chk->data) {
3795 				sctp_m_freem(chk->data);
3796 				chk->data = NULL;
3797 			}
3798 		}
3799 		sctp_free_a_chunk(stcb, chk, so_locked);
3800 		/* sa_ignore FREED_MEMORY */
3801 	}
3802 	for (i = 0; i < asoc->streamoutcnt; i++) {
3803 		/* For each stream */
3804 		outs = &asoc->strmout[i];
3805 		/* clean up any sends there */
3806 		asoc->locked_on_sending = NULL;
3807 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3808 			asoc->stream_queue_cnt--;
3809 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3810 			sctp_free_spbufspace(stcb, asoc, sp);
3811 			if (sp->data) {
3812 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3813 				    error, (void *)sp, so_locked);
3814 				if (sp->data) {
3815 					sctp_m_freem(sp->data);
3816 					sp->data = NULL;
3817 					sp->tail_mbuf = NULL;
3818 					sp->length = 0;
3819 				}
3820 			}
3821 			if (sp->net) {
3822 				sctp_free_remote_addr(sp->net);
3823 				sp->net = NULL;
3824 			}
3825 			/* Free the chunk */
3826 			sctp_free_a_strmoq(stcb, sp, so_locked);
3827 			/* sa_ignore FREED_MEMORY */
3828 		}
3829 	}
3830 
3831 	if (holds_lock == 0) {
3832 		SCTP_TCB_SEND_UNLOCK(stcb);
3833 	}
3834 }
3835 
3836 void
3837 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3838     struct sctp_abort_chunk *abort, int so_locked
3839 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3840     SCTP_UNUSED
3841 #endif
3842 )
3843 {
3844 	if (stcb == NULL) {
3845 		return;
3846 	}
3847 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3848 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3849 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3850 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3851 	}
3852 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3853 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3854 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3855 		return;
3856 	}
3857 	/* Tell them we lost the asoc */
3858 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3859 	if (from_peer) {
3860 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3861 	} else {
3862 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3863 	}
3864 }
3865 
3866 void
3867 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3868     struct mbuf *m, int iphlen,
3869     struct sockaddr *src, struct sockaddr *dst,
3870     struct sctphdr *sh, struct mbuf *op_err,
3871     uint8_t mflowtype, uint32_t mflowid,
3872     uint32_t vrf_id, uint16_t port)
3873 {
3874 	uint32_t vtag;
3875 
3876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3877 	struct socket *so;
3878 
3879 #endif
3880 
3881 	vtag = 0;
3882 	if (stcb != NULL) {
3883 		/* We have a TCB to abort, send notification too */
3884 		vtag = stcb->asoc.peer_vtag;
3885 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3886 		/* get the assoc vrf id and table id */
3887 		vrf_id = stcb->asoc.vrf_id;
3888 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3889 	}
3890 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3891 	    mflowtype, mflowid,
3892 	    vrf_id, port);
3893 	if (stcb != NULL) {
3894 		/* Ok, now lets free it */
3895 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3896 		so = SCTP_INP_SO(inp);
3897 		atomic_add_int(&stcb->asoc.refcnt, 1);
3898 		SCTP_TCB_UNLOCK(stcb);
3899 		SCTP_SOCKET_LOCK(so, 1);
3900 		SCTP_TCB_LOCK(stcb);
3901 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3902 #endif
3903 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3904 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3905 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3906 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3907 		}
3908 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3909 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3910 		SCTP_SOCKET_UNLOCK(so, 1);
3911 #endif
3912 	}
3913 }
3914 
3915 #ifdef SCTP_ASOCLOG_OF_TSNS
3916 void
3917 sctp_print_out_track_log(struct sctp_tcb *stcb)
3918 {
3919 #ifdef NOSIY_PRINTS
3920 	int i;
3921 
3922 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3923 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3924 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3925 		SCTP_PRINTF("None rcvd\n");
3926 		goto none_in;
3927 	}
3928 	if (stcb->asoc.tsn_in_wrapped) {
3929 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3930 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3931 			    stcb->asoc.in_tsnlog[i].tsn,
3932 			    stcb->asoc.in_tsnlog[i].strm,
3933 			    stcb->asoc.in_tsnlog[i].seq,
3934 			    stcb->asoc.in_tsnlog[i].flgs,
3935 			    stcb->asoc.in_tsnlog[i].sz);
3936 		}
3937 	}
3938 	if (stcb->asoc.tsn_in_at) {
3939 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3940 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3941 			    stcb->asoc.in_tsnlog[i].tsn,
3942 			    stcb->asoc.in_tsnlog[i].strm,
3943 			    stcb->asoc.in_tsnlog[i].seq,
3944 			    stcb->asoc.in_tsnlog[i].flgs,
3945 			    stcb->asoc.in_tsnlog[i].sz);
3946 		}
3947 	}
3948 none_in:
3949 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3950 	if ((stcb->asoc.tsn_out_at == 0) &&
3951 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3952 		SCTP_PRINTF("None sent\n");
3953 	}
3954 	if (stcb->asoc.tsn_out_wrapped) {
3955 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3956 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3957 			    stcb->asoc.out_tsnlog[i].tsn,
3958 			    stcb->asoc.out_tsnlog[i].strm,
3959 			    stcb->asoc.out_tsnlog[i].seq,
3960 			    stcb->asoc.out_tsnlog[i].flgs,
3961 			    stcb->asoc.out_tsnlog[i].sz);
3962 		}
3963 	}
3964 	if (stcb->asoc.tsn_out_at) {
3965 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3966 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3967 			    stcb->asoc.out_tsnlog[i].tsn,
3968 			    stcb->asoc.out_tsnlog[i].strm,
3969 			    stcb->asoc.out_tsnlog[i].seq,
3970 			    stcb->asoc.out_tsnlog[i].flgs,
3971 			    stcb->asoc.out_tsnlog[i].sz);
3972 		}
3973 	}
3974 #endif
3975 }
3976 
3977 #endif
3978 
3979 void
3980 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3981     struct mbuf *op_err,
3982     int so_locked
3983 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3984     SCTP_UNUSED
3985 #endif
3986 )
3987 {
3988 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3989 	struct socket *so;
3990 
3991 #endif
3992 
3993 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3994 	so = SCTP_INP_SO(inp);
3995 #endif
3996 	if (stcb == NULL) {
3997 		/* Got to have a TCB */
3998 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3999 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4000 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4001 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4002 			}
4003 		}
4004 		return;
4005 	} else {
4006 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4007 	}
4008 	/* notify the ulp */
4009 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4010 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4011 	}
4012 	/* notify the peer */
4013 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4014 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4015 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4016 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4017 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4018 	}
4019 	/* now free the asoc */
4020 #ifdef SCTP_ASOCLOG_OF_TSNS
4021 	sctp_print_out_track_log(stcb);
4022 #endif
4023 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4024 	if (!so_locked) {
4025 		atomic_add_int(&stcb->asoc.refcnt, 1);
4026 		SCTP_TCB_UNLOCK(stcb);
4027 		SCTP_SOCKET_LOCK(so, 1);
4028 		SCTP_TCB_LOCK(stcb);
4029 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4030 	}
4031 #endif
4032 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4033 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4034 	if (!so_locked) {
4035 		SCTP_SOCKET_UNLOCK(so, 1);
4036 	}
4037 #endif
4038 }
4039 
4040 void
4041 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4042     struct sockaddr *src, struct sockaddr *dst,
4043     struct sctphdr *sh, struct sctp_inpcb *inp,
4044     struct mbuf *cause,
4045     uint8_t mflowtype, uint32_t mflowid,
4046     uint32_t vrf_id, uint16_t port)
4047 {
4048 	struct sctp_chunkhdr *ch, chunk_buf;
4049 	unsigned int chk_length;
4050 	int contains_init_chunk;
4051 
4052 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4053 	/* Generate a TO address for future reference */
4054 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4055 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4056 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4057 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4058 		}
4059 	}
4060 	contains_init_chunk = 0;
4061 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4062 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4063 	while (ch != NULL) {
4064 		chk_length = ntohs(ch->chunk_length);
4065 		if (chk_length < sizeof(*ch)) {
4066 			/* break to abort land */
4067 			break;
4068 		}
4069 		switch (ch->chunk_type) {
4070 		case SCTP_INIT:
4071 			contains_init_chunk = 1;
4072 			break;
4073 		case SCTP_PACKET_DROPPED:
4074 			/* we don't respond to pkt-dropped */
4075 			return;
4076 		case SCTP_ABORT_ASSOCIATION:
4077 			/* we don't respond with an ABORT to an ABORT */
4078 			return;
4079 		case SCTP_SHUTDOWN_COMPLETE:
4080 			/*
4081 			 * we ignore it since we are not waiting for it and
4082 			 * peer is gone
4083 			 */
4084 			return;
4085 		case SCTP_SHUTDOWN_ACK:
4086 			sctp_send_shutdown_complete2(src, dst, sh,
4087 			    mflowtype, mflowid,
4088 			    vrf_id, port);
4089 			return;
4090 		default:
4091 			break;
4092 		}
4093 		offset += SCTP_SIZE32(chk_length);
4094 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4095 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4096 	}
4097 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4098 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4099 	    (contains_init_chunk == 0))) {
4100 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4101 		    mflowtype, mflowid,
4102 		    vrf_id, port);
4103 	}
4104 }
4105 
4106 /*
4107  * check the inbound datagram to make sure there is not an abort inside it,
4108  * if there is return 1, else return 0.
4109  */
4110 int
4111 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4112 {
4113 	struct sctp_chunkhdr *ch;
4114 	struct sctp_init_chunk *init_chk, chunk_buf;
4115 	int offset;
4116 	unsigned int chk_length;
4117 
4118 	offset = iphlen + sizeof(struct sctphdr);
4119 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4120 	    (uint8_t *) & chunk_buf);
4121 	while (ch != NULL) {
4122 		chk_length = ntohs(ch->chunk_length);
4123 		if (chk_length < sizeof(*ch)) {
4124 			/* packet is probably corrupt */
4125 			break;
4126 		}
4127 		/* we seem to be ok, is it an abort? */
4128 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4129 			/* yep, tell them */
4130 			return (1);
4131 		}
4132 		if (ch->chunk_type == SCTP_INITIATION) {
4133 			/* need to update the Vtag */
4134 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4135 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4136 			if (init_chk != NULL) {
4137 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4138 			}
4139 		}
4140 		/* Nope, move to the next chunk */
4141 		offset += SCTP_SIZE32(chk_length);
4142 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4143 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4144 	}
4145 	return (0);
4146 }
4147 
4148 /*
4149  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4150  * set (i.e. it's 0) so, create this function to compare link local scopes
4151  */
4152 #ifdef INET6
4153 uint32_t
4154 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4155 {
4156 	struct sockaddr_in6 a, b;
4157 
4158 	/* save copies */
4159 	a = *addr1;
4160 	b = *addr2;
4161 
4162 	if (a.sin6_scope_id == 0)
4163 		if (sa6_recoverscope(&a)) {
4164 			/* can't get scope, so can't match */
4165 			return (0);
4166 		}
4167 	if (b.sin6_scope_id == 0)
4168 		if (sa6_recoverscope(&b)) {
4169 			/* can't get scope, so can't match */
4170 			return (0);
4171 		}
4172 	if (a.sin6_scope_id != b.sin6_scope_id)
4173 		return (0);
4174 
4175 	return (1);
4176 }
4177 
4178 /*
4179  * returns a sockaddr_in6 with embedded scope recovered and removed
4180  */
4181 struct sockaddr_in6 *
4182 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4183 {
4184 	/* check and strip embedded scope junk */
4185 	if (addr->sin6_family == AF_INET6) {
4186 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4187 			if (addr->sin6_scope_id == 0) {
4188 				*store = *addr;
4189 				if (!sa6_recoverscope(store)) {
4190 					/* use the recovered scope */
4191 					addr = store;
4192 				}
4193 			} else {
4194 				/* else, return the original "to" addr */
4195 				in6_clearscope(&addr->sin6_addr);
4196 			}
4197 		}
4198 	}
4199 	return (addr);
4200 }
4201 
4202 #endif
4203 
4204 /*
4205  * are the two addresses the same?  currently a "scopeless" check returns: 1
4206  * if same, 0 if not
4207  */
4208 int
4209 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4210 {
4211 
4212 	/* must be valid */
4213 	if (sa1 == NULL || sa2 == NULL)
4214 		return (0);
4215 
4216 	/* must be the same family */
4217 	if (sa1->sa_family != sa2->sa_family)
4218 		return (0);
4219 
4220 	switch (sa1->sa_family) {
4221 #ifdef INET6
4222 	case AF_INET6:
4223 		{
4224 			/* IPv6 addresses */
4225 			struct sockaddr_in6 *sin6_1, *sin6_2;
4226 
4227 			sin6_1 = (struct sockaddr_in6 *)sa1;
4228 			sin6_2 = (struct sockaddr_in6 *)sa2;
4229 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4230 			    sin6_2));
4231 		}
4232 #endif
4233 #ifdef INET
4234 	case AF_INET:
4235 		{
4236 			/* IPv4 addresses */
4237 			struct sockaddr_in *sin_1, *sin_2;
4238 
4239 			sin_1 = (struct sockaddr_in *)sa1;
4240 			sin_2 = (struct sockaddr_in *)sa2;
4241 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4242 		}
4243 #endif
4244 	default:
4245 		/* we don't do these... */
4246 		return (0);
4247 	}
4248 }
4249 
4250 void
4251 sctp_print_address(struct sockaddr *sa)
4252 {
4253 #ifdef INET6
4254 	char ip6buf[INET6_ADDRSTRLEN];
4255 
4256 #endif
4257 
4258 	switch (sa->sa_family) {
4259 #ifdef INET6
4260 	case AF_INET6:
4261 		{
4262 			struct sockaddr_in6 *sin6;
4263 
4264 			sin6 = (struct sockaddr_in6 *)sa;
4265 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4266 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4267 			    ntohs(sin6->sin6_port),
4268 			    sin6->sin6_scope_id);
4269 			break;
4270 		}
4271 #endif
4272 #ifdef INET
4273 	case AF_INET:
4274 		{
4275 			struct sockaddr_in *sin;
4276 			unsigned char *p;
4277 
4278 			sin = (struct sockaddr_in *)sa;
4279 			p = (unsigned char *)&sin->sin_addr;
4280 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4281 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4282 			break;
4283 		}
4284 #endif
4285 	default:
4286 		SCTP_PRINTF("?\n");
4287 		break;
4288 	}
4289 }
4290 
4291 void
4292 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4293     struct sctp_inpcb *new_inp,
4294     struct sctp_tcb *stcb,
4295     int waitflags)
4296 {
4297 	/*
4298 	 * go through our old INP and pull off any control structures that
4299 	 * belong to stcb and move then to the new inp.
4300 	 */
4301 	struct socket *old_so, *new_so;
4302 	struct sctp_queued_to_read *control, *nctl;
4303 	struct sctp_readhead tmp_queue;
4304 	struct mbuf *m;
4305 	int error = 0;
4306 
4307 	old_so = old_inp->sctp_socket;
4308 	new_so = new_inp->sctp_socket;
4309 	TAILQ_INIT(&tmp_queue);
4310 	error = sblock(&old_so->so_rcv, waitflags);
4311 	if (error) {
4312 		/*
4313 		 * Gak, can't get sblock, we have a problem. data will be
4314 		 * left stranded.. and we don't dare look at it since the
4315 		 * other thread may be reading something. Oh well, its a
4316 		 * screwed up app that does a peeloff OR a accept while
4317 		 * reading from the main socket... actually its only the
4318 		 * peeloff() case, since I think read will fail on a
4319 		 * listening socket..
4320 		 */
4321 		return;
4322 	}
4323 	/* lock the socket buffers */
4324 	SCTP_INP_READ_LOCK(old_inp);
4325 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4326 		/* Pull off all for out target stcb */
4327 		if (control->stcb == stcb) {
4328 			/* remove it we want it */
4329 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4330 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4331 			m = control->data;
4332 			while (m) {
4333 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4334 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4335 				}
4336 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4337 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4338 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4339 				}
4340 				m = SCTP_BUF_NEXT(m);
4341 			}
4342 		}
4343 	}
4344 	SCTP_INP_READ_UNLOCK(old_inp);
4345 	/* Remove the sb-lock on the old socket */
4346 
4347 	sbunlock(&old_so->so_rcv);
4348 	/* Now we move them over to the new socket buffer */
4349 	SCTP_INP_READ_LOCK(new_inp);
4350 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4351 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4352 		m = control->data;
4353 		while (m) {
4354 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4355 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4356 			}
4357 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4358 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4359 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4360 			}
4361 			m = SCTP_BUF_NEXT(m);
4362 		}
4363 	}
4364 	SCTP_INP_READ_UNLOCK(new_inp);
4365 }
4366 
4367 void
4368 sctp_add_to_readq(struct sctp_inpcb *inp,
4369     struct sctp_tcb *stcb,
4370     struct sctp_queued_to_read *control,
4371     struct sockbuf *sb,
4372     int end,
4373     int inp_read_lock_held,
4374     int so_locked
4375 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4376     SCTP_UNUSED
4377 #endif
4378 )
4379 {
4380 	/*
4381 	 * Here we must place the control on the end of the socket read
4382 	 * queue AND increment sb_cc so that select will work properly on
4383 	 * read.
4384 	 */
4385 	struct mbuf *m, *prev = NULL;
4386 
4387 	if (inp == NULL) {
4388 		/* Gak, TSNH!! */
4389 #ifdef INVARIANTS
4390 		panic("Gak, inp NULL on add_to_readq");
4391 #endif
4392 		return;
4393 	}
4394 	if (inp_read_lock_held == 0)
4395 		SCTP_INP_READ_LOCK(inp);
4396 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4397 		sctp_free_remote_addr(control->whoFrom);
4398 		if (control->data) {
4399 			sctp_m_freem(control->data);
4400 			control->data = NULL;
4401 		}
4402 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4403 		if (inp_read_lock_held == 0)
4404 			SCTP_INP_READ_UNLOCK(inp);
4405 		return;
4406 	}
4407 	if (!(control->spec_flags & M_NOTIFICATION)) {
4408 		atomic_add_int(&inp->total_recvs, 1);
4409 		if (!control->do_not_ref_stcb) {
4410 			atomic_add_int(&stcb->total_recvs, 1);
4411 		}
4412 	}
4413 	m = control->data;
4414 	control->held_length = 0;
4415 	control->length = 0;
4416 	while (m) {
4417 		if (SCTP_BUF_LEN(m) == 0) {
4418 			/* Skip mbufs with NO length */
4419 			if (prev == NULL) {
4420 				/* First one */
4421 				control->data = sctp_m_free(m);
4422 				m = control->data;
4423 			} else {
4424 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4425 				m = SCTP_BUF_NEXT(prev);
4426 			}
4427 			if (m == NULL) {
4428 				control->tail_mbuf = prev;
4429 			}
4430 			continue;
4431 		}
4432 		prev = m;
4433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4434 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4435 		}
4436 		sctp_sballoc(stcb, sb, m);
4437 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4438 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4439 		}
4440 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4441 		m = SCTP_BUF_NEXT(m);
4442 	}
4443 	if (prev != NULL) {
4444 		control->tail_mbuf = prev;
4445 	} else {
4446 		/* Everything got collapsed out?? */
4447 		sctp_free_remote_addr(control->whoFrom);
4448 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4449 		if (inp_read_lock_held == 0)
4450 			SCTP_INP_READ_UNLOCK(inp);
4451 		return;
4452 	}
4453 	if (end) {
4454 		control->end_added = 1;
4455 	}
4456 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4457 	if (inp_read_lock_held == 0)
4458 		SCTP_INP_READ_UNLOCK(inp);
4459 	if (inp && inp->sctp_socket) {
4460 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4461 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4462 		} else {
4463 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4464 			struct socket *so;
4465 
4466 			so = SCTP_INP_SO(inp);
4467 			if (!so_locked) {
4468 				if (stcb) {
4469 					atomic_add_int(&stcb->asoc.refcnt, 1);
4470 					SCTP_TCB_UNLOCK(stcb);
4471 				}
4472 				SCTP_SOCKET_LOCK(so, 1);
4473 				if (stcb) {
4474 					SCTP_TCB_LOCK(stcb);
4475 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4476 				}
4477 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4478 					SCTP_SOCKET_UNLOCK(so, 1);
4479 					return;
4480 				}
4481 			}
4482 #endif
4483 			sctp_sorwakeup(inp, inp->sctp_socket);
4484 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4485 			if (!so_locked) {
4486 				SCTP_SOCKET_UNLOCK(so, 1);
4487 			}
4488 #endif
4489 		}
4490 	}
4491 }
4492 
4493 
4494 int
4495 sctp_append_to_readq(struct sctp_inpcb *inp,
4496     struct sctp_tcb *stcb,
4497     struct sctp_queued_to_read *control,
4498     struct mbuf *m,
4499     int end,
4500     int ctls_cumack,
4501     struct sockbuf *sb)
4502 {
4503 	/*
4504 	 * A partial delivery API event is underway. OR we are appending on
4505 	 * the reassembly queue.
4506 	 *
4507 	 * If PDAPI this means we need to add m to the end of the data.
4508 	 * Increase the length in the control AND increment the sb_cc.
4509 	 * Otherwise sb is NULL and all we need to do is put it at the end
4510 	 * of the mbuf chain.
4511 	 */
4512 	int len = 0;
4513 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4514 
4515 	if (inp) {
4516 		SCTP_INP_READ_LOCK(inp);
4517 	}
4518 	if (control == NULL) {
4519 get_out:
4520 		if (inp) {
4521 			SCTP_INP_READ_UNLOCK(inp);
4522 		}
4523 		return (-1);
4524 	}
4525 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4526 		SCTP_INP_READ_UNLOCK(inp);
4527 		return (0);
4528 	}
4529 	if (control->end_added) {
4530 		/* huh this one is complete? */
4531 		goto get_out;
4532 	}
4533 	mm = m;
4534 	if (mm == NULL) {
4535 		goto get_out;
4536 	}
4537 	while (mm) {
4538 		if (SCTP_BUF_LEN(mm) == 0) {
4539 			/* Skip mbufs with NO lenght */
4540 			if (prev == NULL) {
4541 				/* First one */
4542 				m = sctp_m_free(mm);
4543 				mm = m;
4544 			} else {
4545 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4546 				mm = SCTP_BUF_NEXT(prev);
4547 			}
4548 			continue;
4549 		}
4550 		prev = mm;
4551 		len += SCTP_BUF_LEN(mm);
4552 		if (sb) {
4553 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4554 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4555 			}
4556 			sctp_sballoc(stcb, sb, mm);
4557 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4558 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4559 			}
4560 		}
4561 		mm = SCTP_BUF_NEXT(mm);
4562 	}
4563 	if (prev) {
4564 		tail = prev;
4565 	} else {
4566 		/* Really there should always be a prev */
4567 		if (m == NULL) {
4568 			/* Huh nothing left? */
4569 #ifdef INVARIANTS
4570 			panic("Nothing left to add?");
4571 #else
4572 			goto get_out;
4573 #endif
4574 		}
4575 		tail = m;
4576 	}
4577 	if (control->tail_mbuf) {
4578 		/* append */
4579 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4580 		control->tail_mbuf = tail;
4581 	} else {
4582 		/* nothing there */
4583 #ifdef INVARIANTS
4584 		if (control->data != NULL) {
4585 			panic("This should NOT happen");
4586 		}
4587 #endif
4588 		control->data = m;
4589 		control->tail_mbuf = tail;
4590 	}
4591 	atomic_add_int(&control->length, len);
4592 	if (end) {
4593 		/* message is complete */
4594 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4595 			stcb->asoc.control_pdapi = NULL;
4596 		}
4597 		control->held_length = 0;
4598 		control->end_added = 1;
4599 	}
4600 	if (stcb == NULL) {
4601 		control->do_not_ref_stcb = 1;
4602 	}
4603 	/*
4604 	 * When we are appending in partial delivery, the cum-ack is used
4605 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4606 	 * is populated in the outbound sinfo structure from the true cumack
4607 	 * if the association exists...
4608 	 */
4609 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4610 	if (inp) {
4611 		SCTP_INP_READ_UNLOCK(inp);
4612 	}
4613 	if (inp && inp->sctp_socket) {
4614 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4615 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4616 		} else {
4617 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4618 			struct socket *so;
4619 
4620 			so = SCTP_INP_SO(inp);
4621 			if (stcb) {
4622 				atomic_add_int(&stcb->asoc.refcnt, 1);
4623 				SCTP_TCB_UNLOCK(stcb);
4624 			}
4625 			SCTP_SOCKET_LOCK(so, 1);
4626 			if (stcb) {
4627 				SCTP_TCB_LOCK(stcb);
4628 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4629 			}
4630 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4631 				SCTP_SOCKET_UNLOCK(so, 1);
4632 				return (0);
4633 			}
4634 #endif
4635 			sctp_sorwakeup(inp, inp->sctp_socket);
4636 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4637 			SCTP_SOCKET_UNLOCK(so, 1);
4638 #endif
4639 		}
4640 	}
4641 	return (0);
4642 }
4643 
4644 
4645 
4646 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4647  *************ALTERNATE ROUTING CODE
4648  */
4649 
4650 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4651  *************ALTERNATE ROUTING CODE
4652  */
4653 
4654 struct mbuf *
4655 sctp_generate_cause(uint16_t code, char *info)
4656 {
4657 	struct mbuf *m;
4658 	struct sctp_gen_error_cause *cause;
4659 	size_t info_len, len;
4660 
4661 	if ((code == 0) || (info == NULL)) {
4662 		return (NULL);
4663 	}
4664 	info_len = strlen(info);
4665 	len = sizeof(struct sctp_paramhdr) + info_len;
4666 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4667 	if (m != NULL) {
4668 		SCTP_BUF_LEN(m) = len;
4669 		cause = mtod(m, struct sctp_gen_error_cause *);
4670 		cause->code = htons(code);
4671 		cause->length = htons((uint16_t) len);
4672 		memcpy(cause->info, info, info_len);
4673 	}
4674 	return (m);
4675 }
4676 
4677 struct mbuf *
4678 sctp_generate_no_user_data_cause(uint32_t tsn)
4679 {
4680 	struct mbuf *m;
4681 	struct sctp_error_no_user_data *no_user_data_cause;
4682 	size_t len;
4683 
4684 	len = sizeof(struct sctp_error_no_user_data);
4685 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4686 	if (m != NULL) {
4687 		SCTP_BUF_LEN(m) = len;
4688 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4689 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4690 		no_user_data_cause->cause.length = htons((uint16_t) len);
4691 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4692 	}
4693 	return (m);
4694 }
4695 
4696 #ifdef SCTP_MBCNT_LOGGING
4697 void
4698 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4699     struct sctp_tmit_chunk *tp1, int chk_cnt)
4700 {
4701 	if (tp1->data == NULL) {
4702 		return;
4703 	}
4704 	asoc->chunks_on_out_queue -= chk_cnt;
4705 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4706 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4707 		    asoc->total_output_queue_size,
4708 		    tp1->book_size,
4709 		    0,
4710 		    tp1->mbcnt);
4711 	}
4712 	if (asoc->total_output_queue_size >= tp1->book_size) {
4713 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4714 	} else {
4715 		asoc->total_output_queue_size = 0;
4716 	}
4717 
4718 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4719 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4720 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4721 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4722 		} else {
4723 			stcb->sctp_socket->so_snd.sb_cc = 0;
4724 
4725 		}
4726 	}
4727 }
4728 
4729 #endif
4730 
4731 int
4732 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4733     uint8_t sent, int so_locked
4734 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4735     SCTP_UNUSED
4736 #endif
4737 )
4738 {
4739 	struct sctp_stream_out *strq;
4740 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4741 	struct sctp_stream_queue_pending *sp;
4742 	uint16_t stream = 0, seq = 0;
4743 	uint8_t foundeom = 0;
4744 	int ret_sz = 0;
4745 	int notdone;
4746 	int do_wakeup_routine = 0;
4747 
4748 	stream = tp1->rec.data.stream_number;
4749 	seq = tp1->rec.data.stream_seq;
4750 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4751 		stcb->asoc.abandoned_sent[0]++;
4752 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4753 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4754 #if defined(SCTP_DETAILED_STR_STATS)
4755 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4756 #endif
4757 	} else {
4758 		stcb->asoc.abandoned_unsent[0]++;
4759 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4760 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4761 #if defined(SCTP_DETAILED_STR_STATS)
4762 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4763 #endif
4764 	}
4765 	do {
4766 		ret_sz += tp1->book_size;
4767 		if (tp1->data != NULL) {
4768 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4769 				sctp_flight_size_decrease(tp1);
4770 				sctp_total_flight_decrease(stcb, tp1);
4771 			}
4772 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4773 			stcb->asoc.peers_rwnd += tp1->send_size;
4774 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4775 			if (sent) {
4776 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4777 			} else {
4778 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4779 			}
4780 			if (tp1->data) {
4781 				sctp_m_freem(tp1->data);
4782 				tp1->data = NULL;
4783 			}
4784 			do_wakeup_routine = 1;
4785 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4786 				stcb->asoc.sent_queue_cnt_removeable--;
4787 			}
4788 		}
4789 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4790 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4791 		    SCTP_DATA_NOT_FRAG) {
4792 			/* not frag'ed we ae done   */
4793 			notdone = 0;
4794 			foundeom = 1;
4795 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4796 			/* end of frag, we are done */
4797 			notdone = 0;
4798 			foundeom = 1;
4799 		} else {
4800 			/*
4801 			 * Its a begin or middle piece, we must mark all of
4802 			 * it
4803 			 */
4804 			notdone = 1;
4805 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4806 		}
4807 	} while (tp1 && notdone);
4808 	if (foundeom == 0) {
4809 		/*
4810 		 * The multi-part message was scattered across the send and
4811 		 * sent queue.
4812 		 */
4813 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4814 			if ((tp1->rec.data.stream_number != stream) ||
4815 			    (tp1->rec.data.stream_seq != seq)) {
4816 				break;
4817 			}
4818 			/*
4819 			 * save to chk in case we have some on stream out
4820 			 * queue. If so and we have an un-transmitted one we
4821 			 * don't have to fudge the TSN.
4822 			 */
4823 			chk = tp1;
4824 			ret_sz += tp1->book_size;
4825 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4826 			if (sent) {
4827 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4828 			} else {
4829 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4830 			}
4831 			if (tp1->data) {
4832 				sctp_m_freem(tp1->data);
4833 				tp1->data = NULL;
4834 			}
4835 			/* No flight involved here book the size to 0 */
4836 			tp1->book_size = 0;
4837 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4838 				foundeom = 1;
4839 			}
4840 			do_wakeup_routine = 1;
4841 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4842 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4843 			/*
4844 			 * on to the sent queue so we can wait for it to be
4845 			 * passed by.
4846 			 */
4847 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4848 			    sctp_next);
4849 			stcb->asoc.send_queue_cnt--;
4850 			stcb->asoc.sent_queue_cnt++;
4851 		}
4852 	}
4853 	if (foundeom == 0) {
4854 		/*
4855 		 * Still no eom found. That means there is stuff left on the
4856 		 * stream out queue.. yuck.
4857 		 */
4858 		SCTP_TCB_SEND_LOCK(stcb);
4859 		strq = &stcb->asoc.strmout[stream];
4860 		sp = TAILQ_FIRST(&strq->outqueue);
4861 		if (sp != NULL) {
4862 			sp->discard_rest = 1;
4863 			/*
4864 			 * We may need to put a chunk on the queue that
4865 			 * holds the TSN that would have been sent with the
4866 			 * LAST bit.
4867 			 */
4868 			if (chk == NULL) {
4869 				/* Yep, we have to */
4870 				sctp_alloc_a_chunk(stcb, chk);
4871 				if (chk == NULL) {
4872 					/*
4873 					 * we are hosed. All we can do is
4874 					 * nothing.. which will cause an
4875 					 * abort if the peer is paying
4876 					 * attention.
4877 					 */
4878 					goto oh_well;
4879 				}
4880 				memset(chk, 0, sizeof(*chk));
4881 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4882 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4883 				chk->asoc = &stcb->asoc;
4884 				chk->rec.data.stream_seq = strq->next_sequence_send;
4885 				chk->rec.data.stream_number = sp->stream;
4886 				chk->rec.data.payloadtype = sp->ppid;
4887 				chk->rec.data.context = sp->context;
4888 				chk->flags = sp->act_flags;
4889 				if (sp->net)
4890 					chk->whoTo = sp->net;
4891 				else
4892 					chk->whoTo = stcb->asoc.primary_destination;
4893 				atomic_add_int(&chk->whoTo->ref_count, 1);
4894 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4895 				stcb->asoc.pr_sctp_cnt++;
4896 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4897 				stcb->asoc.sent_queue_cnt++;
4898 				stcb->asoc.pr_sctp_cnt++;
4899 			} else {
4900 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4901 			}
4902 			strq->next_sequence_send++;
4903 	oh_well:
4904 			if (sp->data) {
4905 				/*
4906 				 * Pull any data to free up the SB and allow
4907 				 * sender to "add more" while we will throw
4908 				 * away :-)
4909 				 */
4910 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4911 				ret_sz += sp->length;
4912 				do_wakeup_routine = 1;
4913 				sp->some_taken = 1;
4914 				sctp_m_freem(sp->data);
4915 				sp->data = NULL;
4916 				sp->tail_mbuf = NULL;
4917 				sp->length = 0;
4918 			}
4919 		}
4920 		SCTP_TCB_SEND_UNLOCK(stcb);
4921 	}
4922 	if (do_wakeup_routine) {
4923 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4924 		struct socket *so;
4925 
4926 		so = SCTP_INP_SO(stcb->sctp_ep);
4927 		if (!so_locked) {
4928 			atomic_add_int(&stcb->asoc.refcnt, 1);
4929 			SCTP_TCB_UNLOCK(stcb);
4930 			SCTP_SOCKET_LOCK(so, 1);
4931 			SCTP_TCB_LOCK(stcb);
4932 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4933 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4934 				/* assoc was freed while we were unlocked */
4935 				SCTP_SOCKET_UNLOCK(so, 1);
4936 				return (ret_sz);
4937 			}
4938 		}
4939 #endif
4940 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4941 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4942 		if (!so_locked) {
4943 			SCTP_SOCKET_UNLOCK(so, 1);
4944 		}
4945 #endif
4946 	}
4947 	return (ret_sz);
4948 }
4949 
4950 /*
4951  * checks to see if the given address, sa, is one that is currently known by
4952  * the kernel note: can't distinguish the same address on multiple interfaces
4953  * and doesn't handle multiple addresses with different zone/scope id's note:
4954  * ifa_ifwithaddr() compares the entire sockaddr struct
4955  */
4956 struct sctp_ifa *
4957 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4958     int holds_lock)
4959 {
4960 	struct sctp_laddr *laddr;
4961 
4962 	if (holds_lock == 0) {
4963 		SCTP_INP_RLOCK(inp);
4964 	}
4965 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4966 		if (laddr->ifa == NULL)
4967 			continue;
4968 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4969 			continue;
4970 #ifdef INET
4971 		if (addr->sa_family == AF_INET) {
4972 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4973 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4974 				/* found him. */
4975 				if (holds_lock == 0) {
4976 					SCTP_INP_RUNLOCK(inp);
4977 				}
4978 				return (laddr->ifa);
4979 				break;
4980 			}
4981 		}
4982 #endif
4983 #ifdef INET6
4984 		if (addr->sa_family == AF_INET6) {
4985 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4986 			    &laddr->ifa->address.sin6)) {
4987 				/* found him. */
4988 				if (holds_lock == 0) {
4989 					SCTP_INP_RUNLOCK(inp);
4990 				}
4991 				return (laddr->ifa);
4992 				break;
4993 			}
4994 		}
4995 #endif
4996 	}
4997 	if (holds_lock == 0) {
4998 		SCTP_INP_RUNLOCK(inp);
4999 	}
5000 	return (NULL);
5001 }
5002 
5003 uint32_t
5004 sctp_get_ifa_hash_val(struct sockaddr *addr)
5005 {
5006 	switch (addr->sa_family) {
5007 #ifdef INET
5008 	case AF_INET:
5009 		{
5010 			struct sockaddr_in *sin;
5011 
5012 			sin = (struct sockaddr_in *)addr;
5013 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5014 		}
5015 #endif
5016 #ifdef INET6
5017 	case AF_INET6:
5018 		{
5019 			struct sockaddr_in6 *sin6;
5020 			uint32_t hash_of_addr;
5021 
5022 			sin6 = (struct sockaddr_in6 *)addr;
5023 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5024 			    sin6->sin6_addr.s6_addr32[1] +
5025 			    sin6->sin6_addr.s6_addr32[2] +
5026 			    sin6->sin6_addr.s6_addr32[3]);
5027 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5028 			return (hash_of_addr);
5029 		}
5030 #endif
5031 	default:
5032 		break;
5033 	}
5034 	return (0);
5035 }
5036 
5037 struct sctp_ifa *
5038 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5039 {
5040 	struct sctp_ifa *sctp_ifap;
5041 	struct sctp_vrf *vrf;
5042 	struct sctp_ifalist *hash_head;
5043 	uint32_t hash_of_addr;
5044 
5045 	if (holds_lock == 0)
5046 		SCTP_IPI_ADDR_RLOCK();
5047 
5048 	vrf = sctp_find_vrf(vrf_id);
5049 	if (vrf == NULL) {
5050 		if (holds_lock == 0)
5051 			SCTP_IPI_ADDR_RUNLOCK();
5052 		return (NULL);
5053 	}
5054 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5055 
5056 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5057 	if (hash_head == NULL) {
5058 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5059 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5060 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5061 		sctp_print_address(addr);
5062 		SCTP_PRINTF("No such bucket for address\n");
5063 		if (holds_lock == 0)
5064 			SCTP_IPI_ADDR_RUNLOCK();
5065 
5066 		return (NULL);
5067 	}
5068 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5069 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5070 			continue;
5071 #ifdef INET
5072 		if (addr->sa_family == AF_INET) {
5073 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5074 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5075 				/* found him. */
5076 				if (holds_lock == 0)
5077 					SCTP_IPI_ADDR_RUNLOCK();
5078 				return (sctp_ifap);
5079 				break;
5080 			}
5081 		}
5082 #endif
5083 #ifdef INET6
5084 		if (addr->sa_family == AF_INET6) {
5085 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5086 			    &sctp_ifap->address.sin6)) {
5087 				/* found him. */
5088 				if (holds_lock == 0)
5089 					SCTP_IPI_ADDR_RUNLOCK();
5090 				return (sctp_ifap);
5091 				break;
5092 			}
5093 		}
5094 #endif
5095 	}
5096 	if (holds_lock == 0)
5097 		SCTP_IPI_ADDR_RUNLOCK();
5098 	return (NULL);
5099 }
5100 
5101 static void
5102 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5103     uint32_t rwnd_req)
5104 {
5105 	/* User pulled some data, do we need a rwnd update? */
5106 	int r_unlocked = 0;
5107 	uint32_t dif, rwnd;
5108 	struct socket *so = NULL;
5109 
5110 	if (stcb == NULL)
5111 		return;
5112 
5113 	atomic_add_int(&stcb->asoc.refcnt, 1);
5114 
5115 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5116 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5117 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5118 		/* Pre-check If we are freeing no update */
5119 		goto no_lock;
5120 	}
5121 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5122 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5123 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5124 		goto out;
5125 	}
5126 	so = stcb->sctp_socket;
5127 	if (so == NULL) {
5128 		goto out;
5129 	}
5130 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5131 	/* Have you have freed enough to look */
5132 	*freed_so_far = 0;
5133 	/* Yep, its worth a look and the lock overhead */
5134 
5135 	/* Figure out what the rwnd would be */
5136 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5137 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5138 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5139 	} else {
5140 		dif = 0;
5141 	}
5142 	if (dif >= rwnd_req) {
5143 		if (hold_rlock) {
5144 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5145 			r_unlocked = 1;
5146 		}
5147 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5148 			/*
5149 			 * One last check before we allow the guy possibly
5150 			 * to get in. There is a race, where the guy has not
5151 			 * reached the gate. In that case
5152 			 */
5153 			goto out;
5154 		}
5155 		SCTP_TCB_LOCK(stcb);
5156 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5157 			/* No reports here */
5158 			SCTP_TCB_UNLOCK(stcb);
5159 			goto out;
5160 		}
5161 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5162 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5163 
5164 		sctp_chunk_output(stcb->sctp_ep, stcb,
5165 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5166 		/* make sure no timer is running */
5167 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5168 		SCTP_TCB_UNLOCK(stcb);
5169 	} else {
5170 		/* Update how much we have pending */
5171 		stcb->freed_by_sorcv_sincelast = dif;
5172 	}
5173 out:
5174 	if (so && r_unlocked && hold_rlock) {
5175 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5176 	}
5177 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5178 no_lock:
5179 	atomic_add_int(&stcb->asoc.refcnt, -1);
5180 	return;
5181 }
5182 
5183 int
5184 sctp_sorecvmsg(struct socket *so,
5185     struct uio *uio,
5186     struct mbuf **mp,
5187     struct sockaddr *from,
5188     int fromlen,
5189     int *msg_flags,
5190     struct sctp_sndrcvinfo *sinfo,
5191     int filling_sinfo)
5192 {
5193 	/*
5194 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5195 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5196 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5197 	 * On the way out we may send out any combination of:
5198 	 * MSG_NOTIFICATION MSG_EOR
5199 	 *
5200 	 */
5201 	struct sctp_inpcb *inp = NULL;
5202 	int my_len = 0;
5203 	int cp_len = 0, error = 0;
5204 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5205 	struct mbuf *m = NULL;
5206 	struct sctp_tcb *stcb = NULL;
5207 	int wakeup_read_socket = 0;
5208 	int freecnt_applied = 0;
5209 	int out_flags = 0, in_flags = 0;
5210 	int block_allowed = 1;
5211 	uint32_t freed_so_far = 0;
5212 	uint32_t copied_so_far = 0;
5213 	int in_eeor_mode = 0;
5214 	int no_rcv_needed = 0;
5215 	uint32_t rwnd_req = 0;
5216 	int hold_sblock = 0;
5217 	int hold_rlock = 0;
5218 	int slen = 0;
5219 	uint32_t held_length = 0;
5220 	int sockbuf_lock = 0;
5221 
5222 	if (uio == NULL) {
5223 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5224 		return (EINVAL);
5225 	}
5226 	if (msg_flags) {
5227 		in_flags = *msg_flags;
5228 		if (in_flags & MSG_PEEK)
5229 			SCTP_STAT_INCR(sctps_read_peeks);
5230 	} else {
5231 		in_flags = 0;
5232 	}
5233 	slen = uio->uio_resid;
5234 
5235 	/* Pull in and set up our int flags */
5236 	if (in_flags & MSG_OOB) {
5237 		/* Out of band's NOT supported */
5238 		return (EOPNOTSUPP);
5239 	}
5240 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5241 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5242 		return (EINVAL);
5243 	}
5244 	if ((in_flags & (MSG_DONTWAIT
5245 	    | MSG_NBIO
5246 	    )) ||
5247 	    SCTP_SO_IS_NBIO(so)) {
5248 		block_allowed = 0;
5249 	}
5250 	/* setup the endpoint */
5251 	inp = (struct sctp_inpcb *)so->so_pcb;
5252 	if (inp == NULL) {
5253 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5254 		return (EFAULT);
5255 	}
5256 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5257 	/* Must be at least a MTU's worth */
5258 	if (rwnd_req < SCTP_MIN_RWND)
5259 		rwnd_req = SCTP_MIN_RWND;
5260 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5261 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5262 		sctp_misc_ints(SCTP_SORECV_ENTER,
5263 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5264 	}
5265 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5266 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5267 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5268 	}
5269 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5270 	if (error) {
5271 		goto release_unlocked;
5272 	}
5273 	sockbuf_lock = 1;
5274 restart:
5275 
5276 
5277 restart_nosblocks:
5278 	if (hold_sblock == 0) {
5279 		SOCKBUF_LOCK(&so->so_rcv);
5280 		hold_sblock = 1;
5281 	}
5282 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5283 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5284 		goto out;
5285 	}
5286 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5287 		if (so->so_error) {
5288 			error = so->so_error;
5289 			if ((in_flags & MSG_PEEK) == 0)
5290 				so->so_error = 0;
5291 			goto out;
5292 		} else {
5293 			if (so->so_rcv.sb_cc == 0) {
5294 				/* indicate EOF */
5295 				error = 0;
5296 				goto out;
5297 			}
5298 		}
5299 	}
5300 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5301 		/* we need to wait for data */
5302 		if ((so->so_rcv.sb_cc == 0) &&
5303 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5304 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5305 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5306 				/*
5307 				 * For active open side clear flags for
5308 				 * re-use passive open is blocked by
5309 				 * connect.
5310 				 */
5311 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5312 					/*
5313 					 * You were aborted, passive side
5314 					 * always hits here
5315 					 */
5316 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5317 					error = ECONNRESET;
5318 				}
5319 				so->so_state &= ~(SS_ISCONNECTING |
5320 				    SS_ISDISCONNECTING |
5321 				    SS_ISCONFIRMING |
5322 				    SS_ISCONNECTED);
5323 				if (error == 0) {
5324 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5325 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5326 						error = ENOTCONN;
5327 					}
5328 				}
5329 				goto out;
5330 			}
5331 		}
5332 		error = sbwait(&so->so_rcv);
5333 		if (error) {
5334 			goto out;
5335 		}
5336 		held_length = 0;
5337 		goto restart_nosblocks;
5338 	} else if (so->so_rcv.sb_cc == 0) {
5339 		if (so->so_error) {
5340 			error = so->so_error;
5341 			if ((in_flags & MSG_PEEK) == 0)
5342 				so->so_error = 0;
5343 		} else {
5344 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5345 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5346 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5347 					/*
5348 					 * For active open side clear flags
5349 					 * for re-use passive open is
5350 					 * blocked by connect.
5351 					 */
5352 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5353 						/*
5354 						 * You were aborted, passive
5355 						 * side always hits here
5356 						 */
5357 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5358 						error = ECONNRESET;
5359 					}
5360 					so->so_state &= ~(SS_ISCONNECTING |
5361 					    SS_ISDISCONNECTING |
5362 					    SS_ISCONFIRMING |
5363 					    SS_ISCONNECTED);
5364 					if (error == 0) {
5365 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5366 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5367 							error = ENOTCONN;
5368 						}
5369 					}
5370 					goto out;
5371 				}
5372 			}
5373 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5374 			error = EWOULDBLOCK;
5375 		}
5376 		goto out;
5377 	}
5378 	if (hold_sblock == 1) {
5379 		SOCKBUF_UNLOCK(&so->so_rcv);
5380 		hold_sblock = 0;
5381 	}
5382 	/* we possibly have data we can read */
5383 	/* sa_ignore FREED_MEMORY */
5384 	control = TAILQ_FIRST(&inp->read_queue);
5385 	if (control == NULL) {
5386 		/*
5387 		 * This could be happening since the appender did the
5388 		 * increment but as not yet did the tailq insert onto the
5389 		 * read_queue
5390 		 */
5391 		if (hold_rlock == 0) {
5392 			SCTP_INP_READ_LOCK(inp);
5393 		}
5394 		control = TAILQ_FIRST(&inp->read_queue);
5395 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5396 #ifdef INVARIANTS
5397 			panic("Huh, its non zero and nothing on control?");
5398 #endif
5399 			so->so_rcv.sb_cc = 0;
5400 		}
5401 		SCTP_INP_READ_UNLOCK(inp);
5402 		hold_rlock = 0;
5403 		goto restart;
5404 	}
5405 	if ((control->length == 0) &&
5406 	    (control->do_not_ref_stcb)) {
5407 		/*
5408 		 * Clean up code for freeing assoc that left behind a
5409 		 * pdapi.. maybe a peer in EEOR that just closed after
5410 		 * sending and never indicated a EOR.
5411 		 */
5412 		if (hold_rlock == 0) {
5413 			hold_rlock = 1;
5414 			SCTP_INP_READ_LOCK(inp);
5415 		}
5416 		control->held_length = 0;
5417 		if (control->data) {
5418 			/* Hmm there is data here .. fix */
5419 			struct mbuf *m_tmp;
5420 			int cnt = 0;
5421 
5422 			m_tmp = control->data;
5423 			while (m_tmp) {
5424 				cnt += SCTP_BUF_LEN(m_tmp);
5425 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5426 					control->tail_mbuf = m_tmp;
5427 					control->end_added = 1;
5428 				}
5429 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5430 			}
5431 			control->length = cnt;
5432 		} else {
5433 			/* remove it */
5434 			TAILQ_REMOVE(&inp->read_queue, control, next);
5435 			/* Add back any hiddend data */
5436 			sctp_free_remote_addr(control->whoFrom);
5437 			sctp_free_a_readq(stcb, control);
5438 		}
5439 		if (hold_rlock) {
5440 			hold_rlock = 0;
5441 			SCTP_INP_READ_UNLOCK(inp);
5442 		}
5443 		goto restart;
5444 	}
5445 	if ((control->length == 0) &&
5446 	    (control->end_added == 1)) {
5447 		/*
5448 		 * Do we also need to check for (control->pdapi_aborted ==
5449 		 * 1)?
5450 		 */
5451 		if (hold_rlock == 0) {
5452 			hold_rlock = 1;
5453 			SCTP_INP_READ_LOCK(inp);
5454 		}
5455 		TAILQ_REMOVE(&inp->read_queue, control, next);
5456 		if (control->data) {
5457 #ifdef INVARIANTS
5458 			panic("control->data not null but control->length == 0");
5459 #else
5460 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5461 			sctp_m_freem(control->data);
5462 			control->data = NULL;
5463 #endif
5464 		}
5465 		if (control->aux_data) {
5466 			sctp_m_free(control->aux_data);
5467 			control->aux_data = NULL;
5468 		}
5469 		sctp_free_remote_addr(control->whoFrom);
5470 		sctp_free_a_readq(stcb, control);
5471 		if (hold_rlock) {
5472 			hold_rlock = 0;
5473 			SCTP_INP_READ_UNLOCK(inp);
5474 		}
5475 		goto restart;
5476 	}
5477 	if (control->length == 0) {
5478 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5479 		    (filling_sinfo)) {
5480 			/* find a more suitable one then this */
5481 			ctl = TAILQ_NEXT(control, next);
5482 			while (ctl) {
5483 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5484 				    (ctl->some_taken ||
5485 				    (ctl->spec_flags & M_NOTIFICATION) ||
5486 				    ((ctl->do_not_ref_stcb == 0) &&
5487 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5488 				    ) {
5489 					/*-
5490 					 * If we have a different TCB next, and there is data
5491 					 * present. If we have already taken some (pdapi), OR we can
5492 					 * ref the tcb and no delivery as started on this stream, we
5493 					 * take it. Note we allow a notification on a different
5494 					 * assoc to be delivered..
5495 					 */
5496 					control = ctl;
5497 					goto found_one;
5498 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5499 					    (ctl->length) &&
5500 					    ((ctl->some_taken) ||
5501 					    ((ctl->do_not_ref_stcb == 0) &&
5502 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5503 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5504 					/*-
5505 					 * If we have the same tcb, and there is data present, and we
5506 					 * have the strm interleave feature present. Then if we have
5507 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5508 					 * not started a delivery for this stream, we can take it.
5509 					 * Note we do NOT allow a notificaiton on the same assoc to
5510 					 * be delivered.
5511 					 */
5512 					control = ctl;
5513 					goto found_one;
5514 				}
5515 				ctl = TAILQ_NEXT(ctl, next);
5516 			}
5517 		}
5518 		/*
5519 		 * if we reach here, not suitable replacement is available
5520 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5521 		 * into the our held count, and its time to sleep again.
5522 		 */
5523 		held_length = so->so_rcv.sb_cc;
5524 		control->held_length = so->so_rcv.sb_cc;
5525 		goto restart;
5526 	}
5527 	/* Clear the held length since there is something to read */
5528 	control->held_length = 0;
5529 	if (hold_rlock) {
5530 		SCTP_INP_READ_UNLOCK(inp);
5531 		hold_rlock = 0;
5532 	}
5533 found_one:
5534 	/*
5535 	 * If we reach here, control has a some data for us to read off.
5536 	 * Note that stcb COULD be NULL.
5537 	 */
5538 	control->some_taken++;
5539 	if (hold_sblock) {
5540 		SOCKBUF_UNLOCK(&so->so_rcv);
5541 		hold_sblock = 0;
5542 	}
5543 	stcb = control->stcb;
5544 	if (stcb) {
5545 		if ((control->do_not_ref_stcb == 0) &&
5546 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5547 			if (freecnt_applied == 0)
5548 				stcb = NULL;
5549 		} else if (control->do_not_ref_stcb == 0) {
5550 			/* you can't free it on me please */
5551 			/*
5552 			 * The lock on the socket buffer protects us so the
5553 			 * free code will stop. But since we used the
5554 			 * socketbuf lock and the sender uses the tcb_lock
5555 			 * to increment, we need to use the atomic add to
5556 			 * the refcnt
5557 			 */
5558 			if (freecnt_applied) {
5559 #ifdef INVARIANTS
5560 				panic("refcnt already incremented");
5561 #else
5562 				SCTP_PRINTF("refcnt already incremented?\n");
5563 #endif
5564 			} else {
5565 				atomic_add_int(&stcb->asoc.refcnt, 1);
5566 				freecnt_applied = 1;
5567 			}
5568 			/*
5569 			 * Setup to remember how much we have not yet told
5570 			 * the peer our rwnd has opened up. Note we grab the
5571 			 * value from the tcb from last time. Note too that
5572 			 * sack sending clears this when a sack is sent,
5573 			 * which is fine. Once we hit the rwnd_req, we then
5574 			 * will go to the sctp_user_rcvd() that will not
5575 			 * lock until it KNOWs it MUST send a WUP-SACK.
5576 			 */
5577 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5578 			stcb->freed_by_sorcv_sincelast = 0;
5579 		}
5580 	}
5581 	if (stcb &&
5582 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5583 	    control->do_not_ref_stcb == 0) {
5584 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5585 	}
5586 	/* First lets get off the sinfo and sockaddr info */
5587 	if ((sinfo) && filling_sinfo) {
5588 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5589 		nxt = TAILQ_NEXT(control, next);
5590 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5591 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5592 			struct sctp_extrcvinfo *s_extra;
5593 
5594 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5595 			if ((nxt) &&
5596 			    (nxt->length)) {
5597 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5598 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5599 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5600 				}
5601 				if (nxt->spec_flags & M_NOTIFICATION) {
5602 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5603 				}
5604 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5605 				s_extra->sreinfo_next_length = nxt->length;
5606 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5607 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5608 				if (nxt->tail_mbuf != NULL) {
5609 					if (nxt->end_added) {
5610 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5611 					}
5612 				}
5613 			} else {
5614 				/*
5615 				 * we explicitly 0 this, since the memcpy
5616 				 * got some other things beyond the older
5617 				 * sinfo_ that is on the control's structure
5618 				 * :-D
5619 				 */
5620 				nxt = NULL;
5621 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5622 				s_extra->sreinfo_next_aid = 0;
5623 				s_extra->sreinfo_next_length = 0;
5624 				s_extra->sreinfo_next_ppid = 0;
5625 				s_extra->sreinfo_next_stream = 0;
5626 			}
5627 		}
5628 		/*
5629 		 * update off the real current cum-ack, if we have an stcb.
5630 		 */
5631 		if ((control->do_not_ref_stcb == 0) && stcb)
5632 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5633 		/*
5634 		 * mask off the high bits, we keep the actual chunk bits in
5635 		 * there.
5636 		 */
5637 		sinfo->sinfo_flags &= 0x00ff;
5638 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5639 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5640 		}
5641 	}
5642 #ifdef SCTP_ASOCLOG_OF_TSNS
5643 	{
5644 		int index, newindex;
5645 		struct sctp_pcbtsn_rlog *entry;
5646 
5647 		do {
5648 			index = inp->readlog_index;
5649 			newindex = index + 1;
5650 			if (newindex >= SCTP_READ_LOG_SIZE) {
5651 				newindex = 0;
5652 			}
5653 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5654 		entry = &inp->readlog[index];
5655 		entry->vtag = control->sinfo_assoc_id;
5656 		entry->strm = control->sinfo_stream;
5657 		entry->seq = control->sinfo_ssn;
5658 		entry->sz = control->length;
5659 		entry->flgs = control->sinfo_flags;
5660 	}
5661 #endif
5662 	if ((fromlen > 0) && (from != NULL)) {
5663 		union sctp_sockstore store;
5664 		size_t len;
5665 
5666 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5667 #ifdef INET6
5668 		case AF_INET6:
5669 			len = sizeof(struct sockaddr_in6);
5670 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5671 			store.sin6.sin6_port = control->port_from;
5672 			break;
5673 #endif
5674 #ifdef INET
5675 		case AF_INET:
5676 #ifdef INET6
5677 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5678 				len = sizeof(struct sockaddr_in6);
5679 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5680 				    &store.sin6);
5681 				store.sin6.sin6_port = control->port_from;
5682 			} else {
5683 				len = sizeof(struct sockaddr_in);
5684 				store.sin = control->whoFrom->ro._l_addr.sin;
5685 				store.sin.sin_port = control->port_from;
5686 			}
5687 #else
5688 			len = sizeof(struct sockaddr_in);
5689 			store.sin = control->whoFrom->ro._l_addr.sin;
5690 			store.sin.sin_port = control->port_from;
5691 #endif
5692 			break;
5693 #endif
5694 		default:
5695 			len = 0;
5696 			break;
5697 		}
5698 		memcpy(from, &store, min((size_t)fromlen, len));
5699 #ifdef INET6
5700 		{
5701 			struct sockaddr_in6 lsa6, *from6;
5702 
5703 			from6 = (struct sockaddr_in6 *)from;
5704 			sctp_recover_scope_mac(from6, (&lsa6));
5705 		}
5706 #endif
5707 	}
5708 	/* now copy out what data we can */
5709 	if (mp == NULL) {
5710 		/* copy out each mbuf in the chain up to length */
5711 get_more_data:
5712 		m = control->data;
5713 		while (m) {
5714 			/* Move out all we can */
5715 			cp_len = (int)uio->uio_resid;
5716 			my_len = (int)SCTP_BUF_LEN(m);
5717 			if (cp_len > my_len) {
5718 				/* not enough in this buf */
5719 				cp_len = my_len;
5720 			}
5721 			if (hold_rlock) {
5722 				SCTP_INP_READ_UNLOCK(inp);
5723 				hold_rlock = 0;
5724 			}
5725 			if (cp_len > 0)
5726 				error = uiomove(mtod(m, char *), cp_len, uio);
5727 			/* re-read */
5728 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5729 				goto release;
5730 			}
5731 			if ((control->do_not_ref_stcb == 0) && stcb &&
5732 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5733 				no_rcv_needed = 1;
5734 			}
5735 			if (error) {
5736 				/* error we are out of here */
5737 				goto release;
5738 			}
5739 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5740 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5741 			    ((control->end_added == 0) ||
5742 			    (control->end_added &&
5743 			    (TAILQ_NEXT(control, next) == NULL)))
5744 			    ) {
5745 				SCTP_INP_READ_LOCK(inp);
5746 				hold_rlock = 1;
5747 			}
5748 			if (cp_len == SCTP_BUF_LEN(m)) {
5749 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5750 				    (control->end_added)) {
5751 					out_flags |= MSG_EOR;
5752 					if ((control->do_not_ref_stcb == 0) &&
5753 					    (control->stcb != NULL) &&
5754 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5755 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5756 				}
5757 				if (control->spec_flags & M_NOTIFICATION) {
5758 					out_flags |= MSG_NOTIFICATION;
5759 				}
5760 				/* we ate up the mbuf */
5761 				if (in_flags & MSG_PEEK) {
5762 					/* just looking */
5763 					m = SCTP_BUF_NEXT(m);
5764 					copied_so_far += cp_len;
5765 				} else {
5766 					/* dispose of the mbuf */
5767 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5768 						sctp_sblog(&so->so_rcv,
5769 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5770 					}
5771 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5772 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5773 						sctp_sblog(&so->so_rcv,
5774 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5775 					}
5776 					copied_so_far += cp_len;
5777 					freed_so_far += cp_len;
5778 					freed_so_far += MSIZE;
5779 					atomic_subtract_int(&control->length, cp_len);
5780 					control->data = sctp_m_free(m);
5781 					m = control->data;
5782 					/*
5783 					 * been through it all, must hold sb
5784 					 * lock ok to null tail
5785 					 */
5786 					if (control->data == NULL) {
5787 #ifdef INVARIANTS
5788 						if ((control->end_added == 0) ||
5789 						    (TAILQ_NEXT(control, next) == NULL)) {
5790 							/*
5791 							 * If the end is not
5792 							 * added, OR the
5793 							 * next is NOT null
5794 							 * we MUST have the
5795 							 * lock.
5796 							 */
5797 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5798 								panic("Hmm we don't own the lock?");
5799 							}
5800 						}
5801 #endif
5802 						control->tail_mbuf = NULL;
5803 #ifdef INVARIANTS
5804 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5805 							panic("end_added, nothing left and no MSG_EOR");
5806 						}
5807 #endif
5808 					}
5809 				}
5810 			} else {
5811 				/* Do we need to trim the mbuf? */
5812 				if (control->spec_flags & M_NOTIFICATION) {
5813 					out_flags |= MSG_NOTIFICATION;
5814 				}
5815 				if ((in_flags & MSG_PEEK) == 0) {
5816 					SCTP_BUF_RESV_UF(m, cp_len);
5817 					SCTP_BUF_LEN(m) -= cp_len;
5818 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5819 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5820 					}
5821 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5822 					if ((control->do_not_ref_stcb == 0) &&
5823 					    stcb) {
5824 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5825 					}
5826 					copied_so_far += cp_len;
5827 					freed_so_far += cp_len;
5828 					freed_so_far += MSIZE;
5829 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5830 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5831 						    SCTP_LOG_SBRESULT, 0);
5832 					}
5833 					atomic_subtract_int(&control->length, cp_len);
5834 				} else {
5835 					copied_so_far += cp_len;
5836 				}
5837 			}
5838 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5839 				break;
5840 			}
5841 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5842 			    (control->do_not_ref_stcb == 0) &&
5843 			    (freed_so_far >= rwnd_req)) {
5844 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5845 			}
5846 		}		/* end while(m) */
5847 		/*
5848 		 * At this point we have looked at it all and we either have
5849 		 * a MSG_EOR/or read all the user wants... <OR>
5850 		 * control->length == 0.
5851 		 */
5852 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5853 			/* we are done with this control */
5854 			if (control->length == 0) {
5855 				if (control->data) {
5856 #ifdef INVARIANTS
5857 					panic("control->data not null at read eor?");
5858 #else
5859 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5860 					sctp_m_freem(control->data);
5861 					control->data = NULL;
5862 #endif
5863 				}
5864 		done_with_control:
5865 				if (TAILQ_NEXT(control, next) == NULL) {
5866 					/*
5867 					 * If we don't have a next we need a
5868 					 * lock, if there is a next
5869 					 * interrupt is filling ahead of us
5870 					 * and we don't need a lock to
5871 					 * remove this guy (which is the
5872 					 * head of the queue).
5873 					 */
5874 					if (hold_rlock == 0) {
5875 						SCTP_INP_READ_LOCK(inp);
5876 						hold_rlock = 1;
5877 					}
5878 				}
5879 				TAILQ_REMOVE(&inp->read_queue, control, next);
5880 				/* Add back any hiddend data */
5881 				if (control->held_length) {
5882 					held_length = 0;
5883 					control->held_length = 0;
5884 					wakeup_read_socket = 1;
5885 				}
5886 				if (control->aux_data) {
5887 					sctp_m_free(control->aux_data);
5888 					control->aux_data = NULL;
5889 				}
5890 				no_rcv_needed = control->do_not_ref_stcb;
5891 				sctp_free_remote_addr(control->whoFrom);
5892 				control->data = NULL;
5893 				sctp_free_a_readq(stcb, control);
5894 				control = NULL;
5895 				if ((freed_so_far >= rwnd_req) &&
5896 				    (no_rcv_needed == 0))
5897 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5898 
5899 			} else {
5900 				/*
5901 				 * The user did not read all of this
5902 				 * message, turn off the returned MSG_EOR
5903 				 * since we are leaving more behind on the
5904 				 * control to read.
5905 				 */
5906 #ifdef INVARIANTS
5907 				if (control->end_added &&
5908 				    (control->data == NULL) &&
5909 				    (control->tail_mbuf == NULL)) {
5910 					panic("Gak, control->length is corrupt?");
5911 				}
5912 #endif
5913 				no_rcv_needed = control->do_not_ref_stcb;
5914 				out_flags &= ~MSG_EOR;
5915 			}
5916 		}
5917 		if (out_flags & MSG_EOR) {
5918 			goto release;
5919 		}
5920 		if ((uio->uio_resid == 0) ||
5921 		    ((in_eeor_mode) &&
5922 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5923 			goto release;
5924 		}
5925 		/*
5926 		 * If I hit here the receiver wants more and this message is
5927 		 * NOT done (pd-api). So two questions. Can we block? if not
5928 		 * we are done. Did the user NOT set MSG_WAITALL?
5929 		 */
5930 		if (block_allowed == 0) {
5931 			goto release;
5932 		}
5933 		/*
5934 		 * We need to wait for more data a few things: - We don't
5935 		 * sbunlock() so we don't get someone else reading. - We
5936 		 * must be sure to account for the case where what is added
5937 		 * is NOT to our control when we wakeup.
5938 		 */
5939 
5940 		/*
5941 		 * Do we need to tell the transport a rwnd update might be
5942 		 * needed before we go to sleep?
5943 		 */
5944 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5945 		    ((freed_so_far >= rwnd_req) &&
5946 		    (control->do_not_ref_stcb == 0) &&
5947 		    (no_rcv_needed == 0))) {
5948 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5949 		}
5950 wait_some_more:
5951 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5952 			goto release;
5953 		}
5954 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5955 			goto release;
5956 
5957 		if (hold_rlock == 1) {
5958 			SCTP_INP_READ_UNLOCK(inp);
5959 			hold_rlock = 0;
5960 		}
5961 		if (hold_sblock == 0) {
5962 			SOCKBUF_LOCK(&so->so_rcv);
5963 			hold_sblock = 1;
5964 		}
5965 		if ((copied_so_far) && (control->length == 0) &&
5966 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5967 			goto release;
5968 		}
5969 		if (so->so_rcv.sb_cc <= control->held_length) {
5970 			error = sbwait(&so->so_rcv);
5971 			if (error) {
5972 				goto release;
5973 			}
5974 			control->held_length = 0;
5975 		}
5976 		if (hold_sblock) {
5977 			SOCKBUF_UNLOCK(&so->so_rcv);
5978 			hold_sblock = 0;
5979 		}
5980 		if (control->length == 0) {
5981 			/* still nothing here */
5982 			if (control->end_added == 1) {
5983 				/* he aborted, or is done i.e.did a shutdown */
5984 				out_flags |= MSG_EOR;
5985 				if (control->pdapi_aborted) {
5986 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5987 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5988 
5989 					out_flags |= MSG_TRUNC;
5990 				} else {
5991 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5992 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5993 				}
5994 				goto done_with_control;
5995 			}
5996 			if (so->so_rcv.sb_cc > held_length) {
5997 				control->held_length = so->so_rcv.sb_cc;
5998 				held_length = 0;
5999 			}
6000 			goto wait_some_more;
6001 		} else if (control->data == NULL) {
6002 			/*
6003 			 * we must re-sync since data is probably being
6004 			 * added
6005 			 */
6006 			SCTP_INP_READ_LOCK(inp);
6007 			if ((control->length > 0) && (control->data == NULL)) {
6008 				/*
6009 				 * big trouble.. we have the lock and its
6010 				 * corrupt?
6011 				 */
6012 #ifdef INVARIANTS
6013 				panic("Impossible data==NULL length !=0");
6014 #endif
6015 				out_flags |= MSG_EOR;
6016 				out_flags |= MSG_TRUNC;
6017 				control->length = 0;
6018 				SCTP_INP_READ_UNLOCK(inp);
6019 				goto done_with_control;
6020 			}
6021 			SCTP_INP_READ_UNLOCK(inp);
6022 			/* We will fall around to get more data */
6023 		}
6024 		goto get_more_data;
6025 	} else {
6026 		/*-
6027 		 * Give caller back the mbuf chain,
6028 		 * store in uio_resid the length
6029 		 */
6030 		wakeup_read_socket = 0;
6031 		if ((control->end_added == 0) ||
6032 		    (TAILQ_NEXT(control, next) == NULL)) {
6033 			/* Need to get rlock */
6034 			if (hold_rlock == 0) {
6035 				SCTP_INP_READ_LOCK(inp);
6036 				hold_rlock = 1;
6037 			}
6038 		}
6039 		if (control->end_added) {
6040 			out_flags |= MSG_EOR;
6041 			if ((control->do_not_ref_stcb == 0) &&
6042 			    (control->stcb != NULL) &&
6043 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6044 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6045 		}
6046 		if (control->spec_flags & M_NOTIFICATION) {
6047 			out_flags |= MSG_NOTIFICATION;
6048 		}
6049 		uio->uio_resid = control->length;
6050 		*mp = control->data;
6051 		m = control->data;
6052 		while (m) {
6053 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6054 				sctp_sblog(&so->so_rcv,
6055 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6056 			}
6057 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6058 			freed_so_far += SCTP_BUF_LEN(m);
6059 			freed_so_far += MSIZE;
6060 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6061 				sctp_sblog(&so->so_rcv,
6062 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6063 			}
6064 			m = SCTP_BUF_NEXT(m);
6065 		}
6066 		control->data = control->tail_mbuf = NULL;
6067 		control->length = 0;
6068 		if (out_flags & MSG_EOR) {
6069 			/* Done with this control */
6070 			goto done_with_control;
6071 		}
6072 	}
6073 release:
6074 	if (hold_rlock == 1) {
6075 		SCTP_INP_READ_UNLOCK(inp);
6076 		hold_rlock = 0;
6077 	}
6078 	if (hold_sblock == 1) {
6079 		SOCKBUF_UNLOCK(&so->so_rcv);
6080 		hold_sblock = 0;
6081 	}
6082 	sbunlock(&so->so_rcv);
6083 	sockbuf_lock = 0;
6084 
6085 release_unlocked:
6086 	if (hold_sblock) {
6087 		SOCKBUF_UNLOCK(&so->so_rcv);
6088 		hold_sblock = 0;
6089 	}
6090 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6091 		if ((freed_so_far >= rwnd_req) &&
6092 		    (control && (control->do_not_ref_stcb == 0)) &&
6093 		    (no_rcv_needed == 0))
6094 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6095 	}
6096 out:
6097 	if (msg_flags) {
6098 		*msg_flags = out_flags;
6099 	}
6100 	if (((out_flags & MSG_EOR) == 0) &&
6101 	    ((in_flags & MSG_PEEK) == 0) &&
6102 	    (sinfo) &&
6103 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6104 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6105 		struct sctp_extrcvinfo *s_extra;
6106 
6107 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6108 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6109 	}
6110 	if (hold_rlock == 1) {
6111 		SCTP_INP_READ_UNLOCK(inp);
6112 	}
6113 	if (hold_sblock) {
6114 		SOCKBUF_UNLOCK(&so->so_rcv);
6115 	}
6116 	if (sockbuf_lock) {
6117 		sbunlock(&so->so_rcv);
6118 	}
6119 	if (freecnt_applied) {
6120 		/*
6121 		 * The lock on the socket buffer protects us so the free
6122 		 * code will stop. But since we used the socketbuf lock and
6123 		 * the sender uses the tcb_lock to increment, we need to use
6124 		 * the atomic add to the refcnt.
6125 		 */
6126 		if (stcb == NULL) {
6127 #ifdef INVARIANTS
6128 			panic("stcb for refcnt has gone NULL?");
6129 			goto stage_left;
6130 #else
6131 			goto stage_left;
6132 #endif
6133 		}
6134 		atomic_add_int(&stcb->asoc.refcnt, -1);
6135 		/* Save the value back for next time */
6136 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6137 	}
6138 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6139 		if (stcb) {
6140 			sctp_misc_ints(SCTP_SORECV_DONE,
6141 			    freed_so_far,
6142 			    ((uio) ? (slen - uio->uio_resid) : slen),
6143 			    stcb->asoc.my_rwnd,
6144 			    so->so_rcv.sb_cc);
6145 		} else {
6146 			sctp_misc_ints(SCTP_SORECV_DONE,
6147 			    freed_so_far,
6148 			    ((uio) ? (slen - uio->uio_resid) : slen),
6149 			    0,
6150 			    so->so_rcv.sb_cc);
6151 		}
6152 	}
6153 stage_left:
6154 	if (wakeup_read_socket) {
6155 		sctp_sorwakeup(inp, so);
6156 	}
6157 	return (error);
6158 }
6159 
6160 
6161 #ifdef SCTP_MBUF_LOGGING
6162 struct mbuf *
6163 sctp_m_free(struct mbuf *m)
6164 {
6165 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6166 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6167 	}
6168 	return (m_free(m));
6169 }
6170 
6171 void
6172 sctp_m_freem(struct mbuf *mb)
6173 {
6174 	while (mb != NULL)
6175 		mb = sctp_m_free(mb);
6176 }
6177 
6178 #endif
6179 
6180 int
6181 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6182 {
6183 	/*
6184 	 * Given a local address. For all associations that holds the
6185 	 * address, request a peer-set-primary.
6186 	 */
6187 	struct sctp_ifa *ifa;
6188 	struct sctp_laddr *wi;
6189 
6190 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6191 	if (ifa == NULL) {
6192 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6193 		return (EADDRNOTAVAIL);
6194 	}
6195 	/*
6196 	 * Now that we have the ifa we must awaken the iterator with this
6197 	 * message.
6198 	 */
6199 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6200 	if (wi == NULL) {
6201 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6202 		return (ENOMEM);
6203 	}
6204 	/* Now incr the count and int wi structure */
6205 	SCTP_INCR_LADDR_COUNT();
6206 	bzero(wi, sizeof(*wi));
6207 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6208 	wi->ifa = ifa;
6209 	wi->action = SCTP_SET_PRIM_ADDR;
6210 	atomic_add_int(&ifa->refcount, 1);
6211 
6212 	/* Now add it to the work queue */
6213 	SCTP_WQ_ADDR_LOCK();
6214 	/*
6215 	 * Should this really be a tailq? As it is we will process the
6216 	 * newest first :-0
6217 	 */
6218 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6219 	SCTP_WQ_ADDR_UNLOCK();
6220 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6221 	    (struct sctp_inpcb *)NULL,
6222 	    (struct sctp_tcb *)NULL,
6223 	    (struct sctp_nets *)NULL);
6224 	return (0);
6225 }
6226 
6227 
6228 int
6229 sctp_soreceive(struct socket *so,
6230     struct sockaddr **psa,
6231     struct uio *uio,
6232     struct mbuf **mp0,
6233     struct mbuf **controlp,
6234     int *flagsp)
6235 {
6236 	int error, fromlen;
6237 	uint8_t sockbuf[256];
6238 	struct sockaddr *from;
6239 	struct sctp_extrcvinfo sinfo;
6240 	int filling_sinfo = 1;
6241 	struct sctp_inpcb *inp;
6242 
6243 	inp = (struct sctp_inpcb *)so->so_pcb;
6244 	/* pickup the assoc we are reading from */
6245 	if (inp == NULL) {
6246 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6247 		return (EINVAL);
6248 	}
6249 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6250 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6251 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6252 	    (controlp == NULL)) {
6253 		/* user does not want the sndrcv ctl */
6254 		filling_sinfo = 0;
6255 	}
6256 	if (psa) {
6257 		from = (struct sockaddr *)sockbuf;
6258 		fromlen = sizeof(sockbuf);
6259 		from->sa_len = 0;
6260 	} else {
6261 		from = NULL;
6262 		fromlen = 0;
6263 	}
6264 
6265 	if (filling_sinfo) {
6266 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6267 	}
6268 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6269 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6270 	if (controlp != NULL) {
6271 		/* copy back the sinfo in a CMSG format */
6272 		if (filling_sinfo)
6273 			*controlp = sctp_build_ctl_nchunk(inp,
6274 			    (struct sctp_sndrcvinfo *)&sinfo);
6275 		else
6276 			*controlp = NULL;
6277 	}
6278 	if (psa) {
6279 		/* copy back the address info */
6280 		if (from && from->sa_len) {
6281 			*psa = sodupsockaddr(from, M_NOWAIT);
6282 		} else {
6283 			*psa = NULL;
6284 		}
6285 	}
6286 	return (error);
6287 }
6288 
6289 
6290 
6291 
6292 
6293 int
6294 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6295     int totaddr, int *error)
6296 {
6297 	int added = 0;
6298 	int i;
6299 	struct sctp_inpcb *inp;
6300 	struct sockaddr *sa;
6301 	size_t incr = 0;
6302 
6303 #ifdef INET
6304 	struct sockaddr_in *sin;
6305 
6306 #endif
6307 #ifdef INET6
6308 	struct sockaddr_in6 *sin6;
6309 
6310 #endif
6311 
6312 	sa = addr;
6313 	inp = stcb->sctp_ep;
6314 	*error = 0;
6315 	for (i = 0; i < totaddr; i++) {
6316 		switch (sa->sa_family) {
6317 #ifdef INET
6318 		case AF_INET:
6319 			incr = sizeof(struct sockaddr_in);
6320 			sin = (struct sockaddr_in *)sa;
6321 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6322 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6323 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6324 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6325 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6326 				*error = EINVAL;
6327 				goto out_now;
6328 			}
6329 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6330 				/* assoc gone no un-lock */
6331 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6332 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6333 				*error = ENOBUFS;
6334 				goto out_now;
6335 			}
6336 			added++;
6337 			break;
6338 #endif
6339 #ifdef INET6
6340 		case AF_INET6:
6341 			incr = sizeof(struct sockaddr_in6);
6342 			sin6 = (struct sockaddr_in6 *)sa;
6343 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6344 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6345 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6346 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6347 				*error = EINVAL;
6348 				goto out_now;
6349 			}
6350 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6351 				/* assoc gone no un-lock */
6352 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6353 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6354 				*error = ENOBUFS;
6355 				goto out_now;
6356 			}
6357 			added++;
6358 			break;
6359 #endif
6360 		default:
6361 			break;
6362 		}
6363 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6364 	}
6365 out_now:
6366 	return (added);
6367 }
6368 
6369 struct sctp_tcb *
6370 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6371     int *totaddr, int *num_v4, int *num_v6, int *error,
6372     int limit, int *bad_addr)
6373 {
6374 	struct sockaddr *sa;
6375 	struct sctp_tcb *stcb = NULL;
6376 	size_t incr, at, i;
6377 
6378 	at = incr = 0;
6379 	sa = addr;
6380 
6381 	*error = *num_v6 = *num_v4 = 0;
6382 	/* account and validate addresses */
6383 	for (i = 0; i < (size_t)*totaddr; i++) {
6384 		switch (sa->sa_family) {
6385 #ifdef INET
6386 		case AF_INET:
6387 			(*num_v4) += 1;
6388 			incr = sizeof(struct sockaddr_in);
6389 			if (sa->sa_len != incr) {
6390 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6391 				*error = EINVAL;
6392 				*bad_addr = 1;
6393 				return (NULL);
6394 			}
6395 			break;
6396 #endif
6397 #ifdef INET6
6398 		case AF_INET6:
6399 			{
6400 				struct sockaddr_in6 *sin6;
6401 
6402 				sin6 = (struct sockaddr_in6 *)sa;
6403 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6404 					/* Must be non-mapped for connectx */
6405 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6406 					*error = EINVAL;
6407 					*bad_addr = 1;
6408 					return (NULL);
6409 				}
6410 				(*num_v6) += 1;
6411 				incr = sizeof(struct sockaddr_in6);
6412 				if (sa->sa_len != incr) {
6413 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6414 					*error = EINVAL;
6415 					*bad_addr = 1;
6416 					return (NULL);
6417 				}
6418 				break;
6419 			}
6420 #endif
6421 		default:
6422 			*totaddr = i;
6423 			/* we are done */
6424 			break;
6425 		}
6426 		if (i == (size_t)*totaddr) {
6427 			break;
6428 		}
6429 		SCTP_INP_INCR_REF(inp);
6430 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6431 		if (stcb != NULL) {
6432 			/* Already have or am bring up an association */
6433 			return (stcb);
6434 		} else {
6435 			SCTP_INP_DECR_REF(inp);
6436 		}
6437 		if ((at + incr) > (size_t)limit) {
6438 			*totaddr = i;
6439 			break;
6440 		}
6441 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6442 	}
6443 	return ((struct sctp_tcb *)NULL);
6444 }
6445 
6446 /*
6447  * sctp_bindx(ADD) for one address.
6448  * assumes all arguments are valid/checked by caller.
6449  */
6450 void
6451 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6452     struct sockaddr *sa, sctp_assoc_t assoc_id,
6453     uint32_t vrf_id, int *error, void *p)
6454 {
6455 	struct sockaddr *addr_touse;
6456 
6457 #if defined(INET) && defined(INET6)
6458 	struct sockaddr_in sin;
6459 
6460 #endif
6461 
6462 	/* see if we're bound all already! */
6463 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6464 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6465 		*error = EINVAL;
6466 		return;
6467 	}
6468 	addr_touse = sa;
6469 #ifdef INET6
6470 	if (sa->sa_family == AF_INET6) {
6471 #ifdef INET
6472 		struct sockaddr_in6 *sin6;
6473 
6474 #endif
6475 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6476 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6477 			*error = EINVAL;
6478 			return;
6479 		}
6480 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6481 			/* can only bind v6 on PF_INET6 sockets */
6482 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6483 			*error = EINVAL;
6484 			return;
6485 		}
6486 #ifdef INET
6487 		sin6 = (struct sockaddr_in6 *)addr_touse;
6488 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6489 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6490 			    SCTP_IPV6_V6ONLY(inp)) {
6491 				/* can't bind v4-mapped on PF_INET sockets */
6492 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 				*error = EINVAL;
6494 				return;
6495 			}
6496 			in6_sin6_2_sin(&sin, sin6);
6497 			addr_touse = (struct sockaddr *)&sin;
6498 		}
6499 #endif
6500 	}
6501 #endif
6502 #ifdef INET
6503 	if (sa->sa_family == AF_INET) {
6504 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6505 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6506 			*error = EINVAL;
6507 			return;
6508 		}
6509 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6510 		    SCTP_IPV6_V6ONLY(inp)) {
6511 			/* can't bind v4 on PF_INET sockets */
6512 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6513 			*error = EINVAL;
6514 			return;
6515 		}
6516 	}
6517 #endif
6518 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6519 		if (p == NULL) {
6520 			/* Can't get proc for Net/Open BSD */
6521 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6522 			*error = EINVAL;
6523 			return;
6524 		}
6525 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6526 		return;
6527 	}
6528 	/*
6529 	 * No locks required here since bind and mgmt_ep_sa all do their own
6530 	 * locking. If we do something for the FIX: below we may need to
6531 	 * lock in that case.
6532 	 */
6533 	if (assoc_id == 0) {
6534 		/* add the address */
6535 		struct sctp_inpcb *lep;
6536 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6537 
6538 		/* validate the incoming port */
6539 		if ((lsin->sin_port != 0) &&
6540 		    (lsin->sin_port != inp->sctp_lport)) {
6541 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6542 			*error = EINVAL;
6543 			return;
6544 		} else {
6545 			/* user specified 0 port, set it to existing port */
6546 			lsin->sin_port = inp->sctp_lport;
6547 		}
6548 
6549 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6550 		if (lep != NULL) {
6551 			/*
6552 			 * We must decrement the refcount since we have the
6553 			 * ep already and are binding. No remove going on
6554 			 * here.
6555 			 */
6556 			SCTP_INP_DECR_REF(lep);
6557 		}
6558 		if (lep == inp) {
6559 			/* already bound to it.. ok */
6560 			return;
6561 		} else if (lep == NULL) {
6562 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6563 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6564 			    SCTP_ADD_IP_ADDRESS,
6565 			    vrf_id, NULL);
6566 		} else {
6567 			*error = EADDRINUSE;
6568 		}
6569 		if (*error)
6570 			return;
6571 	} else {
6572 		/*
6573 		 * FIX: decide whether we allow assoc based bindx
6574 		 */
6575 	}
6576 }
6577 
6578 /*
6579  * sctp_bindx(DELETE) for one address.
6580  * assumes all arguments are valid/checked by caller.
6581  */
6582 void
6583 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6584     struct sockaddr *sa, sctp_assoc_t assoc_id,
6585     uint32_t vrf_id, int *error)
6586 {
6587 	struct sockaddr *addr_touse;
6588 
6589 #if defined(INET) && defined(INET6)
6590 	struct sockaddr_in sin;
6591 
6592 #endif
6593 
6594 	/* see if we're bound all already! */
6595 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6596 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 		*error = EINVAL;
6598 		return;
6599 	}
6600 	addr_touse = sa;
6601 #ifdef INET6
6602 	if (sa->sa_family == AF_INET6) {
6603 #ifdef INET
6604 		struct sockaddr_in6 *sin6;
6605 
6606 #endif
6607 
6608 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6609 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6610 			*error = EINVAL;
6611 			return;
6612 		}
6613 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6614 			/* can only bind v6 on PF_INET6 sockets */
6615 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6616 			*error = EINVAL;
6617 			return;
6618 		}
6619 #ifdef INET
6620 		sin6 = (struct sockaddr_in6 *)addr_touse;
6621 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6622 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6623 			    SCTP_IPV6_V6ONLY(inp)) {
6624 				/* can't bind mapped-v4 on PF_INET sockets */
6625 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626 				*error = EINVAL;
6627 				return;
6628 			}
6629 			in6_sin6_2_sin(&sin, sin6);
6630 			addr_touse = (struct sockaddr *)&sin;
6631 		}
6632 #endif
6633 	}
6634 #endif
6635 #ifdef INET
6636 	if (sa->sa_family == AF_INET) {
6637 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6638 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6639 			*error = EINVAL;
6640 			return;
6641 		}
6642 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6643 		    SCTP_IPV6_V6ONLY(inp)) {
6644 			/* can't bind v4 on PF_INET sockets */
6645 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6646 			*error = EINVAL;
6647 			return;
6648 		}
6649 	}
6650 #endif
6651 	/*
6652 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6653 	 * below is ever changed we may need to lock before calling
6654 	 * association level binding.
6655 	 */
6656 	if (assoc_id == 0) {
6657 		/* delete the address */
6658 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6659 		    SCTP_DEL_IP_ADDRESS,
6660 		    vrf_id, NULL);
6661 	} else {
6662 		/*
6663 		 * FIX: decide whether we allow assoc based bindx
6664 		 */
6665 	}
6666 }
6667 
6668 /*
6669  * returns the valid local address count for an assoc, taking into account
6670  * all scoping rules
6671  */
6672 int
6673 sctp_local_addr_count(struct sctp_tcb *stcb)
6674 {
6675 	int loopback_scope;
6676 
6677 #if defined(INET)
6678 	int ipv4_local_scope, ipv4_addr_legal;
6679 
6680 #endif
6681 #if defined (INET6)
6682 	int local_scope, site_scope, ipv6_addr_legal;
6683 
6684 #endif
6685 	struct sctp_vrf *vrf;
6686 	struct sctp_ifn *sctp_ifn;
6687 	struct sctp_ifa *sctp_ifa;
6688 	int count = 0;
6689 
6690 	/* Turn on all the appropriate scopes */
6691 	loopback_scope = stcb->asoc.scope.loopback_scope;
6692 #if defined(INET)
6693 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6694 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6695 #endif
6696 #if defined(INET6)
6697 	local_scope = stcb->asoc.scope.local_scope;
6698 	site_scope = stcb->asoc.scope.site_scope;
6699 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6700 #endif
6701 	SCTP_IPI_ADDR_RLOCK();
6702 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6703 	if (vrf == NULL) {
6704 		/* no vrf, no addresses */
6705 		SCTP_IPI_ADDR_RUNLOCK();
6706 		return (0);
6707 	}
6708 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6709 		/*
6710 		 * bound all case: go through all ifns on the vrf
6711 		 */
6712 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6713 			if ((loopback_scope == 0) &&
6714 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6715 				continue;
6716 			}
6717 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6718 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6719 					continue;
6720 				switch (sctp_ifa->address.sa.sa_family) {
6721 #ifdef INET
6722 				case AF_INET:
6723 					if (ipv4_addr_legal) {
6724 						struct sockaddr_in *sin;
6725 
6726 						sin = &sctp_ifa->address.sin;
6727 						if (sin->sin_addr.s_addr == 0) {
6728 							/*
6729 							 * skip unspecified
6730 							 * addrs
6731 							 */
6732 							continue;
6733 						}
6734 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6735 						    &sin->sin_addr) != 0) {
6736 							continue;
6737 						}
6738 						if ((ipv4_local_scope == 0) &&
6739 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6740 							continue;
6741 						}
6742 						/* count this one */
6743 						count++;
6744 					} else {
6745 						continue;
6746 					}
6747 					break;
6748 #endif
6749 #ifdef INET6
6750 				case AF_INET6:
6751 					if (ipv6_addr_legal) {
6752 						struct sockaddr_in6 *sin6;
6753 
6754 						sin6 = &sctp_ifa->address.sin6;
6755 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6756 							continue;
6757 						}
6758 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6759 						    &sin6->sin6_addr) != 0) {
6760 							continue;
6761 						}
6762 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6763 							if (local_scope == 0)
6764 								continue;
6765 							if (sin6->sin6_scope_id == 0) {
6766 								if (sa6_recoverscope(sin6) != 0)
6767 									/*
6768 									 *
6769 									 * bad
6770 									 *
6771 									 * li
6772 									 * nk
6773 									 *
6774 									 * loc
6775 									 * al
6776 									 *
6777 									 * add
6778 									 * re
6779 									 * ss
6780 									 * */
6781 									continue;
6782 							}
6783 						}
6784 						if ((site_scope == 0) &&
6785 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6786 							continue;
6787 						}
6788 						/* count this one */
6789 						count++;
6790 					}
6791 					break;
6792 #endif
6793 				default:
6794 					/* TSNH */
6795 					break;
6796 				}
6797 			}
6798 		}
6799 	} else {
6800 		/*
6801 		 * subset bound case
6802 		 */
6803 		struct sctp_laddr *laddr;
6804 
6805 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6806 		    sctp_nxt_addr) {
6807 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6808 				continue;
6809 			}
6810 			/* count this one */
6811 			count++;
6812 		}
6813 	}
6814 	SCTP_IPI_ADDR_RUNLOCK();
6815 	return (count);
6816 }
6817 
6818 #if defined(SCTP_LOCAL_TRACE_BUF)
6819 
6820 void
6821 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6822 {
6823 	uint32_t saveindex, newindex;
6824 
6825 	do {
6826 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6827 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6828 			newindex = 1;
6829 		} else {
6830 			newindex = saveindex + 1;
6831 		}
6832 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6833 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6834 		saveindex = 0;
6835 	}
6836 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6837 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6838 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6839 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6840 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6841 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6842 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6843 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6844 }
6845 
6846 #endif
6847 static void
6848 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored,
6849     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6850 {
6851 	struct ip *iph;
6852 
6853 #ifdef INET6
6854 	struct ip6_hdr *ip6;
6855 
6856 #endif
6857 	struct mbuf *sp, *last;
6858 	struct udphdr *uhdr;
6859 	uint16_t port;
6860 
6861 	if ((m->m_flags & M_PKTHDR) == 0) {
6862 		/* Can't handle one that is not a pkt hdr */
6863 		goto out;
6864 	}
6865 	/* Pull the src port */
6866 	iph = mtod(m, struct ip *);
6867 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6868 	port = uhdr->uh_sport;
6869 	/*
6870 	 * Split out the mbuf chain. Leave the IP header in m, place the
6871 	 * rest in the sp.
6872 	 */
6873 	sp = m_split(m, off, M_NOWAIT);
6874 	if (sp == NULL) {
6875 		/* Gak, drop packet, we can't do a split */
6876 		goto out;
6877 	}
6878 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6879 		/* Gak, packet can't have an SCTP header in it - too small */
6880 		m_freem(sp);
6881 		goto out;
6882 	}
6883 	/* Now pull up the UDP header and SCTP header together */
6884 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6885 	if (sp == NULL) {
6886 		/* Gak pullup failed */
6887 		goto out;
6888 	}
6889 	/* Trim out the UDP header */
6890 	m_adj(sp, sizeof(struct udphdr));
6891 
6892 	/* Now reconstruct the mbuf chain */
6893 	for (last = m; last->m_next; last = last->m_next);
6894 	last->m_next = sp;
6895 	m->m_pkthdr.len += sp->m_pkthdr.len;
6896 	iph = mtod(m, struct ip *);
6897 	switch (iph->ip_v) {
6898 #ifdef INET
6899 	case IPVERSION:
6900 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6901 		sctp_input_with_port(m, off, port);
6902 		break;
6903 #endif
6904 #ifdef INET6
6905 	case IPV6_VERSION >> 4:
6906 		ip6 = mtod(m, struct ip6_hdr *);
6907 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6908 		sctp6_input_with_port(&m, &off, port);
6909 		break;
6910 #endif
6911 	default:
6912 		goto out;
6913 		break;
6914 	}
6915 	return;
6916 out:
6917 	m_freem(m);
6918 }
6919 
6920 void
6921 sctp_over_udp_stop(void)
6922 {
6923 	/*
6924 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6925 	 * for writting!
6926 	 */
6927 #ifdef INET
6928 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6929 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6930 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6931 	}
6932 #endif
6933 #ifdef INET6
6934 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6935 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
6936 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
6937 	}
6938 #endif
6939 }
6940 
6941 int
6942 sctp_over_udp_start(void)
6943 {
6944 	uint16_t port;
6945 	int ret;
6946 
6947 #ifdef INET
6948 	struct sockaddr_in sin;
6949 
6950 #endif
6951 #ifdef INET6
6952 	struct sockaddr_in6 sin6;
6953 
6954 #endif
6955 	/*
6956 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6957 	 * for writting!
6958 	 */
6959 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6960 	if (ntohs(port) == 0) {
6961 		/* Must have a port set */
6962 		return (EINVAL);
6963 	}
6964 #ifdef INET
6965 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6966 		/* Already running -- must stop first */
6967 		return (EALREADY);
6968 	}
6969 #endif
6970 #ifdef INET6
6971 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6972 		/* Already running -- must stop first */
6973 		return (EALREADY);
6974 	}
6975 #endif
6976 #ifdef INET
6977 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
6978 	    SOCK_DGRAM, IPPROTO_UDP,
6979 	    curthread->td_ucred, curthread))) {
6980 		sctp_over_udp_stop();
6981 		return (ret);
6982 	}
6983 	/* Call the special UDP hook. */
6984 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
6985 	    sctp_recv_udp_tunneled_packet, NULL))) {
6986 		sctp_over_udp_stop();
6987 		return (ret);
6988 	}
6989 	/* Ok, we have a socket, bind it to the port. */
6990 	memset(&sin, 0, sizeof(struct sockaddr_in));
6991 	sin.sin_len = sizeof(struct sockaddr_in);
6992 	sin.sin_family = AF_INET;
6993 	sin.sin_port = htons(port);
6994 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
6995 	    (struct sockaddr *)&sin, curthread))) {
6996 		sctp_over_udp_stop();
6997 		return (ret);
6998 	}
6999 #endif
7000 #ifdef INET6
7001 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7002 	    SOCK_DGRAM, IPPROTO_UDP,
7003 	    curthread->td_ucred, curthread))) {
7004 		sctp_over_udp_stop();
7005 		return (ret);
7006 	}
7007 	/* Call the special UDP hook. */
7008 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7009 	    sctp_recv_udp_tunneled_packet, NULL))) {
7010 		sctp_over_udp_stop();
7011 		return (ret);
7012 	}
7013 	/* Ok, we have a socket, bind it to the port. */
7014 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7015 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7016 	sin6.sin6_family = AF_INET6;
7017 	sin6.sin6_port = htons(port);
7018 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7019 	    (struct sockaddr *)&sin6, curthread))) {
7020 		sctp_over_udp_stop();
7021 		return (ret);
7022 	}
7023 #endif
7024 	return (0);
7025 }
7026