xref: /freebsd/sys/netinet/sctputil.c (revision a5ff72cb0e51a7675d4e2b5810a2b6dad5b91960)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern const struct sctp_cc_functions sctp_cc_functions[];
62 extern const struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int32_t
897 sctp_map_assoc_state(int kernel_state)
898 {
899 	int32_t user_state;
900 
901 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
902 		user_state = SCTP_CLOSED;
903 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
904 		user_state = SCTP_SHUTDOWN_PENDING;
905 	} else {
906 		switch (kernel_state & SCTP_STATE_MASK) {
907 		case SCTP_STATE_EMPTY:
908 			user_state = SCTP_CLOSED;
909 			break;
910 		case SCTP_STATE_INUSE:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_COOKIE_WAIT:
914 			user_state = SCTP_COOKIE_WAIT;
915 			break;
916 		case SCTP_STATE_COOKIE_ECHOED:
917 			user_state = SCTP_COOKIE_ECHOED;
918 			break;
919 		case SCTP_STATE_OPEN:
920 			user_state = SCTP_ESTABLISHED;
921 			break;
922 		case SCTP_STATE_SHUTDOWN_SENT:
923 			user_state = SCTP_SHUTDOWN_SENT;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_RECEIVED:
926 			user_state = SCTP_SHUTDOWN_RECEIVED;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
929 			user_state = SCTP_SHUTDOWN_ACK_SENT;
930 			break;
931 		default:
932 			user_state = SCTP_CLOSED;
933 			break;
934 		}
935 	}
936 	return (user_state);
937 }
938 
939 int
940 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
941     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
942 {
943 	struct sctp_association *asoc;
944 
945 	/*
946 	 * Anything set to zero is taken care of by the allocation routine's
947 	 * bzero
948 	 */
949 
950 	/*
951 	 * Up front select what scoping to apply on addresses I tell my peer
952 	 * Not sure what to do with these right now, we will need to come up
953 	 * with a way to set them. We may need to pass them through from the
954 	 * caller in the sctp_aloc_assoc() function.
955 	 */
956 	int i;
957 
958 #if defined(SCTP_DETAILED_STR_STATS)
959 	int j;
960 
961 #endif
962 
963 	asoc = &stcb->asoc;
964 	/* init all variables to a known value. */
965 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
966 	asoc->max_burst = inp->sctp_ep.max_burst;
967 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
968 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
969 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
970 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
971 	asoc->ecn_supported = inp->ecn_supported;
972 	asoc->prsctp_supported = inp->prsctp_supported;
973 	asoc->idata_supported = inp->idata_supported;
974 	asoc->auth_supported = inp->auth_supported;
975 	asoc->asconf_supported = inp->asconf_supported;
976 	asoc->reconfig_supported = inp->reconfig_supported;
977 	asoc->nrsack_supported = inp->nrsack_supported;
978 	asoc->pktdrop_supported = inp->pktdrop_supported;
979 	asoc->idata_supported = inp->idata_supported;
980 	asoc->sctp_cmt_pf = (uint8_t) 0;
981 	asoc->sctp_frag_point = inp->sctp_frag_point;
982 	asoc->sctp_features = inp->sctp_features;
983 	asoc->default_dscp = inp->sctp_ep.default_dscp;
984 	asoc->max_cwnd = inp->max_cwnd;
985 #ifdef INET6
986 	if (inp->sctp_ep.default_flowlabel) {
987 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
988 	} else {
989 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
990 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
991 			asoc->default_flowlabel &= 0x000fffff;
992 			asoc->default_flowlabel |= 0x80000000;
993 		} else {
994 			asoc->default_flowlabel = 0;
995 		}
996 	}
997 #endif
998 	asoc->sb_send_resv = 0;
999 	if (override_tag) {
1000 		asoc->my_vtag = override_tag;
1001 	} else {
1002 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1003 	}
1004 	/* Get the nonce tags */
1005 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1006 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1007 	asoc->vrf_id = vrf_id;
1008 
1009 #ifdef SCTP_ASOCLOG_OF_TSNS
1010 	asoc->tsn_in_at = 0;
1011 	asoc->tsn_out_at = 0;
1012 	asoc->tsn_in_wrapped = 0;
1013 	asoc->tsn_out_wrapped = 0;
1014 	asoc->cumack_log_at = 0;
1015 	asoc->cumack_log_atsnt = 0;
1016 #endif
1017 #ifdef SCTP_FS_SPEC_LOG
1018 	asoc->fs_index = 0;
1019 #endif
1020 	asoc->refcnt = 0;
1021 	asoc->assoc_up_sent = 0;
1022 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1023 	    sctp_select_initial_TSN(&inp->sctp_ep);
1024 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1025 	/* we are optimisitic here */
1026 	asoc->peer_supports_nat = 0;
1027 	asoc->sent_queue_retran_cnt = 0;
1028 
1029 	/* for CMT */
1030 	asoc->last_net_cmt_send_started = NULL;
1031 
1032 	/* This will need to be adjusted */
1033 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1034 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1035 	asoc->asconf_seq_in = asoc->last_acked_seq;
1036 
1037 	/* here we are different, we hold the next one we expect */
1038 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1039 
1040 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1041 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1042 
1043 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1044 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1045 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1046 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1047 	asoc->free_chunk_cnt = 0;
1048 
1049 	asoc->iam_blocking = 0;
1050 	asoc->context = inp->sctp_context;
1051 	asoc->local_strreset_support = inp->local_strreset_support;
1052 	asoc->def_send = inp->def_send;
1053 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1054 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1055 	asoc->pr_sctp_cnt = 0;
1056 	asoc->total_output_queue_size = 0;
1057 
1058 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1059 		asoc->scope.ipv6_addr_legal = 1;
1060 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1061 			asoc->scope.ipv4_addr_legal = 1;
1062 		} else {
1063 			asoc->scope.ipv4_addr_legal = 0;
1064 		}
1065 	} else {
1066 		asoc->scope.ipv6_addr_legal = 0;
1067 		asoc->scope.ipv4_addr_legal = 1;
1068 	}
1069 
1070 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1071 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1072 
1073 	asoc->smallest_mtu = inp->sctp_frag_point;
1074 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1075 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1076 
1077 	asoc->locked_on_sending = NULL;
1078 	asoc->stream_locked_on = 0;
1079 	asoc->ecn_echo_cnt_onq = 0;
1080 	asoc->stream_locked = 0;
1081 
1082 	asoc->send_sack = 1;
1083 
1084 	LIST_INIT(&asoc->sctp_restricted_addrs);
1085 
1086 	TAILQ_INIT(&asoc->nets);
1087 	TAILQ_INIT(&asoc->pending_reply_queue);
1088 	TAILQ_INIT(&asoc->asconf_ack_sent);
1089 	/* Setup to fill the hb random cache at first HB */
1090 	asoc->hb_random_idx = 4;
1091 
1092 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1093 
1094 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1095 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1096 
1097 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1098 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1099 
1100 	/*
1101 	 * Now the stream parameters, here we allocate space for all streams
1102 	 * that we request by default.
1103 	 */
1104 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1105 	    o_strms;
1106 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1107 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1108 	    SCTP_M_STRMO);
1109 	if (asoc->strmout == NULL) {
1110 		/* big trouble no memory */
1111 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1112 		return (ENOMEM);
1113 	}
1114 	for (i = 0; i < asoc->streamoutcnt; i++) {
1115 		/*
1116 		 * inbound side must be set to 0xffff, also NOTE when we get
1117 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1118 		 * count (streamoutcnt) but first check if we sent to any of
1119 		 * the upper streams that were dropped (if some were). Those
1120 		 * that were dropped must be notified to the upper layer as
1121 		 * failed to send.
1122 		 */
1123 		asoc->strmout[i].next_sequence_send = 0x0;
1124 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1125 		asoc->strmout[i].chunks_on_queues = 0;
1126 #if defined(SCTP_DETAILED_STR_STATS)
1127 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1128 			asoc->strmout[i].abandoned_sent[j] = 0;
1129 			asoc->strmout[i].abandoned_unsent[j] = 0;
1130 		}
1131 #else
1132 		asoc->strmout[i].abandoned_sent[0] = 0;
1133 		asoc->strmout[i].abandoned_unsent[0] = 0;
1134 #endif
1135 		asoc->strmout[i].stream_no = i;
1136 		asoc->strmout[i].last_msg_incomplete = 0;
1137 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1138 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1139 	}
1140 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1141 
1142 	/* Now the mapping array */
1143 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1144 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1145 	    SCTP_M_MAP);
1146 	if (asoc->mapping_array == NULL) {
1147 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1152 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1153 	    SCTP_M_MAP);
1154 	if (asoc->nr_mapping_array == NULL) {
1155 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1156 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1157 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1158 		return (ENOMEM);
1159 	}
1160 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1161 
1162 	/* Now the init of the other outqueues */
1163 	TAILQ_INIT(&asoc->free_chunks);
1164 	TAILQ_INIT(&asoc->control_send_queue);
1165 	TAILQ_INIT(&asoc->asconf_send_queue);
1166 	TAILQ_INIT(&asoc->send_queue);
1167 	TAILQ_INIT(&asoc->sent_queue);
1168 	TAILQ_INIT(&asoc->resetHead);
1169 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1170 	TAILQ_INIT(&asoc->asconf_queue);
1171 	/* authentication fields */
1172 	asoc->authinfo.random = NULL;
1173 	asoc->authinfo.active_keyid = 0;
1174 	asoc->authinfo.assoc_key = NULL;
1175 	asoc->authinfo.assoc_keyid = 0;
1176 	asoc->authinfo.recv_key = NULL;
1177 	asoc->authinfo.recv_keyid = 0;
1178 	LIST_INIT(&asoc->shared_keys);
1179 	asoc->marked_retrans = 0;
1180 	asoc->port = inp->sctp_ep.port;
1181 	asoc->timoinit = 0;
1182 	asoc->timodata = 0;
1183 	asoc->timosack = 0;
1184 	asoc->timoshutdown = 0;
1185 	asoc->timoheartbeat = 0;
1186 	asoc->timocookie = 0;
1187 	asoc->timoshutdownack = 0;
1188 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1189 	asoc->discontinuity_time = asoc->start_time;
1190 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1191 		asoc->abandoned_unsent[i] = 0;
1192 		asoc->abandoned_sent[i] = 0;
1193 	}
1194 	/*
1195 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1196 	 * freed later when the association is freed.
1197 	 */
1198 	return (0);
1199 }
1200 
1201 void
1202 sctp_print_mapping_array(struct sctp_association *asoc)
1203 {
1204 	unsigned int i, limit;
1205 
1206 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1207 	    asoc->mapping_array_size,
1208 	    asoc->mapping_array_base_tsn,
1209 	    asoc->cumulative_tsn,
1210 	    asoc->highest_tsn_inside_map,
1211 	    asoc->highest_tsn_inside_nr_map);
1212 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1213 		if (asoc->mapping_array[limit - 1] != 0) {
1214 			break;
1215 		}
1216 	}
1217 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1218 	for (i = 0; i < limit; i++) {
1219 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1220 	}
1221 	if (limit % 16)
1222 		SCTP_PRINTF("\n");
1223 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1224 		if (asoc->nr_mapping_array[limit - 1]) {
1225 			break;
1226 		}
1227 	}
1228 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1229 	for (i = 0; i < limit; i++) {
1230 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1231 	}
1232 	if (limit % 16)
1233 		SCTP_PRINTF("\n");
1234 }
1235 
1236 int
1237 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1238 {
1239 	/* mapping array needs to grow */
1240 	uint8_t *new_array1, *new_array2;
1241 	uint32_t new_size;
1242 
1243 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1244 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1245 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1246 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1247 		/* can't get more, forget it */
1248 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1249 		if (new_array1) {
1250 			SCTP_FREE(new_array1, SCTP_M_MAP);
1251 		}
1252 		if (new_array2) {
1253 			SCTP_FREE(new_array2, SCTP_M_MAP);
1254 		}
1255 		return (-1);
1256 	}
1257 	memset(new_array1, 0, new_size);
1258 	memset(new_array2, 0, new_size);
1259 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1260 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1261 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1262 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1263 	asoc->mapping_array = new_array1;
1264 	asoc->nr_mapping_array = new_array2;
1265 	asoc->mapping_array_size = new_size;
1266 	return (0);
1267 }
1268 
1269 
1270 static void
1271 sctp_iterator_work(struct sctp_iterator *it)
1272 {
1273 	int iteration_count = 0;
1274 	int inp_skip = 0;
1275 	int first_in = 1;
1276 	struct sctp_inpcb *tinp;
1277 
1278 	SCTP_INP_INFO_RLOCK();
1279 	SCTP_ITERATOR_LOCK();
1280 	if (it->inp) {
1281 		SCTP_INP_RLOCK(it->inp);
1282 		SCTP_INP_DECR_REF(it->inp);
1283 	}
1284 	if (it->inp == NULL) {
1285 		/* iterator is complete */
1286 done_with_iterator:
1287 		SCTP_ITERATOR_UNLOCK();
1288 		SCTP_INP_INFO_RUNLOCK();
1289 		if (it->function_atend != NULL) {
1290 			(*it->function_atend) (it->pointer, it->val);
1291 		}
1292 		SCTP_FREE(it, SCTP_M_ITER);
1293 		return;
1294 	}
1295 select_a_new_ep:
1296 	if (first_in) {
1297 		first_in = 0;
1298 	} else {
1299 		SCTP_INP_RLOCK(it->inp);
1300 	}
1301 	while (((it->pcb_flags) &&
1302 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1303 	    ((it->pcb_features) &&
1304 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1305 		/* endpoint flags or features don't match, so keep looking */
1306 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1307 			SCTP_INP_RUNLOCK(it->inp);
1308 			goto done_with_iterator;
1309 		}
1310 		tinp = it->inp;
1311 		it->inp = LIST_NEXT(it->inp, sctp_list);
1312 		SCTP_INP_RUNLOCK(tinp);
1313 		if (it->inp == NULL) {
1314 			goto done_with_iterator;
1315 		}
1316 		SCTP_INP_RLOCK(it->inp);
1317 	}
1318 	/* now go through each assoc which is in the desired state */
1319 	if (it->done_current_ep == 0) {
1320 		if (it->function_inp != NULL)
1321 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1322 		it->done_current_ep = 1;
1323 	}
1324 	if (it->stcb == NULL) {
1325 		/* run the per instance function */
1326 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1327 	}
1328 	if ((inp_skip) || it->stcb == NULL) {
1329 		if (it->function_inp_end != NULL) {
1330 			inp_skip = (*it->function_inp_end) (it->inp,
1331 			    it->pointer,
1332 			    it->val);
1333 		}
1334 		SCTP_INP_RUNLOCK(it->inp);
1335 		goto no_stcb;
1336 	}
1337 	while (it->stcb) {
1338 		SCTP_TCB_LOCK(it->stcb);
1339 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1340 			/* not in the right state... keep looking */
1341 			SCTP_TCB_UNLOCK(it->stcb);
1342 			goto next_assoc;
1343 		}
1344 		/* see if we have limited out the iterator loop */
1345 		iteration_count++;
1346 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1347 			/* Pause to let others grab the lock */
1348 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1349 			SCTP_TCB_UNLOCK(it->stcb);
1350 			SCTP_INP_INCR_REF(it->inp);
1351 			SCTP_INP_RUNLOCK(it->inp);
1352 			SCTP_ITERATOR_UNLOCK();
1353 			SCTP_INP_INFO_RUNLOCK();
1354 			SCTP_INP_INFO_RLOCK();
1355 			SCTP_ITERATOR_LOCK();
1356 			if (sctp_it_ctl.iterator_flags) {
1357 				/* We won't be staying here */
1358 				SCTP_INP_DECR_REF(it->inp);
1359 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1360 				if (sctp_it_ctl.iterator_flags &
1361 				    SCTP_ITERATOR_STOP_CUR_IT) {
1362 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1363 					goto done_with_iterator;
1364 				}
1365 				if (sctp_it_ctl.iterator_flags &
1366 				    SCTP_ITERATOR_STOP_CUR_INP) {
1367 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1368 					goto no_stcb;
1369 				}
1370 				/* If we reach here huh? */
1371 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1372 				    sctp_it_ctl.iterator_flags);
1373 				sctp_it_ctl.iterator_flags = 0;
1374 			}
1375 			SCTP_INP_RLOCK(it->inp);
1376 			SCTP_INP_DECR_REF(it->inp);
1377 			SCTP_TCB_LOCK(it->stcb);
1378 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1379 			iteration_count = 0;
1380 		}
1381 		/* run function on this one */
1382 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1383 
1384 		/*
1385 		 * we lie here, it really needs to have its own type but
1386 		 * first I must verify that this won't effect things :-0
1387 		 */
1388 		if (it->no_chunk_output == 0)
1389 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1390 
1391 		SCTP_TCB_UNLOCK(it->stcb);
1392 next_assoc:
1393 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1394 		if (it->stcb == NULL) {
1395 			/* Run last function */
1396 			if (it->function_inp_end != NULL) {
1397 				inp_skip = (*it->function_inp_end) (it->inp,
1398 				    it->pointer,
1399 				    it->val);
1400 			}
1401 		}
1402 	}
1403 	SCTP_INP_RUNLOCK(it->inp);
1404 no_stcb:
1405 	/* done with all assocs on this endpoint, move on to next endpoint */
1406 	it->done_current_ep = 0;
1407 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1408 		it->inp = NULL;
1409 	} else {
1410 		it->inp = LIST_NEXT(it->inp, sctp_list);
1411 	}
1412 	if (it->inp == NULL) {
1413 		goto done_with_iterator;
1414 	}
1415 	goto select_a_new_ep;
1416 }
1417 
1418 void
1419 sctp_iterator_worker(void)
1420 {
1421 	struct sctp_iterator *it, *nit;
1422 
1423 	/* This function is called with the WQ lock in place */
1424 
1425 	sctp_it_ctl.iterator_running = 1;
1426 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1427 		sctp_it_ctl.cur_it = it;
1428 		/* now lets work on this one */
1429 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1430 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1431 		CURVNET_SET(it->vn);
1432 		sctp_iterator_work(it);
1433 		sctp_it_ctl.cur_it = NULL;
1434 		CURVNET_RESTORE();
1435 		SCTP_IPI_ITERATOR_WQ_LOCK();
1436 		/* sa_ignore FREED_MEMORY */
1437 	}
1438 	sctp_it_ctl.iterator_running = 0;
1439 	return;
1440 }
1441 
1442 
1443 static void
1444 sctp_handle_addr_wq(void)
1445 {
1446 	/* deal with the ADDR wq from the rtsock calls */
1447 	struct sctp_laddr *wi, *nwi;
1448 	struct sctp_asconf_iterator *asc;
1449 
1450 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1451 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1452 	if (asc == NULL) {
1453 		/* Try later, no memory */
1454 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1455 		    (struct sctp_inpcb *)NULL,
1456 		    (struct sctp_tcb *)NULL,
1457 		    (struct sctp_nets *)NULL);
1458 		return;
1459 	}
1460 	LIST_INIT(&asc->list_of_work);
1461 	asc->cnt = 0;
1462 
1463 	SCTP_WQ_ADDR_LOCK();
1464 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1465 		LIST_REMOVE(wi, sctp_nxt_addr);
1466 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1467 		asc->cnt++;
1468 	}
1469 	SCTP_WQ_ADDR_UNLOCK();
1470 
1471 	if (asc->cnt == 0) {
1472 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1473 	} else {
1474 		int ret;
1475 
1476 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1477 		    sctp_asconf_iterator_stcb,
1478 		    NULL,	/* No ep end for boundall */
1479 		    SCTP_PCB_FLAGS_BOUNDALL,
1480 		    SCTP_PCB_ANY_FEATURES,
1481 		    SCTP_ASOC_ANY_STATE,
1482 		    (void *)asc, 0,
1483 		    sctp_asconf_iterator_end, NULL, 0);
1484 		if (ret) {
1485 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1486 			/*
1487 			 * Freeing if we are stopping or put back on the
1488 			 * addr_wq.
1489 			 */
1490 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1491 				sctp_asconf_iterator_end(asc, 0);
1492 			} else {
1493 				SCTP_WQ_ADDR_LOCK();
1494 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1495 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1496 				}
1497 				SCTP_WQ_ADDR_UNLOCK();
1498 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1499 			}
1500 		}
1501 	}
1502 }
1503 
1504 void
1505 sctp_timeout_handler(void *t)
1506 {
1507 	struct sctp_inpcb *inp;
1508 	struct sctp_tcb *stcb;
1509 	struct sctp_nets *net;
1510 	struct sctp_timer *tmr;
1511 	struct mbuf *op_err;
1512 
1513 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1514 	struct socket *so;
1515 
1516 #endif
1517 	int did_output;
1518 	int type;
1519 
1520 	tmr = (struct sctp_timer *)t;
1521 	inp = (struct sctp_inpcb *)tmr->ep;
1522 	stcb = (struct sctp_tcb *)tmr->tcb;
1523 	net = (struct sctp_nets *)tmr->net;
1524 	CURVNET_SET((struct vnet *)tmr->vnet);
1525 	did_output = 1;
1526 
1527 #ifdef SCTP_AUDITING_ENABLED
1528 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1529 	sctp_auditing(3, inp, stcb, net);
1530 #endif
1531 
1532 	/* sanity checks... */
1533 	if (tmr->self != (void *)tmr) {
1534 		/*
1535 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536 		 * (void *)tmr);
1537 		 */
1538 		CURVNET_RESTORE();
1539 		return;
1540 	}
1541 	tmr->stopped_from = 0xa001;
1542 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543 		/*
1544 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545 		 * tmr->type);
1546 		 */
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	tmr->stopped_from = 0xa002;
1551 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552 		CURVNET_RESTORE();
1553 		return;
1554 	}
1555 	/* if this is an iterator timeout, get the struct and clear inp */
1556 	tmr->stopped_from = 0xa003;
1557 	if (inp) {
1558 		SCTP_INP_INCR_REF(inp);
1559 		if ((inp->sctp_socket == NULL) &&
1560 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569 		    ) {
1570 			SCTP_INP_DECR_REF(inp);
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 			CURVNET_RESTORE();
1584 			return;
1585 		}
1586 	}
1587 	type = tmr->type;
1588 	tmr->stopped_from = 0xa005;
1589 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591 		if (inp) {
1592 			SCTP_INP_DECR_REF(inp);
1593 		}
1594 		if (stcb) {
1595 			atomic_add_int(&stcb->asoc.refcnt, -1);
1596 		}
1597 		CURVNET_RESTORE();
1598 		return;
1599 	}
1600 	tmr->stopped_from = 0xa006;
1601 
1602 	if (stcb) {
1603 		SCTP_TCB_LOCK(stcb);
1604 		atomic_add_int(&stcb->asoc.refcnt, -1);
1605 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606 		    ((stcb->asoc.state == 0) ||
1607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608 			SCTP_TCB_UNLOCK(stcb);
1609 			if (inp) {
1610 				SCTP_INP_DECR_REF(inp);
1611 			}
1612 			CURVNET_RESTORE();
1613 			return;
1614 		}
1615 	}
1616 	/* record in stopped what t-o occured */
1617 	tmr->stopped_from = type;
1618 
1619 	/* mark as being serviced now */
1620 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621 		/*
1622 		 * Callout has been rescheduled.
1623 		 */
1624 		goto get_out;
1625 	}
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		/*
1628 		 * Not active, so no action.
1629 		 */
1630 		goto get_out;
1631 	}
1632 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633 
1634 	/* call the handler for the appropriate timer type */
1635 	switch (type) {
1636 	case SCTP_TIMER_TYPE_ZERO_COPY:
1637 		if (inp == NULL) {
1638 			break;
1639 		}
1640 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1641 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1645 		if (inp == NULL) {
1646 			break;
1647 		}
1648 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1649 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_ADDR_WQ:
1653 		sctp_handle_addr_wq();
1654 		break;
1655 	case SCTP_TIMER_TYPE_SEND:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		}
1659 		SCTP_STAT_INCR(sctps_timodata);
1660 		stcb->asoc.timodata++;
1661 		stcb->asoc.num_send_timers_up--;
1662 		if (stcb->asoc.num_send_timers_up < 0) {
1663 			stcb->asoc.num_send_timers_up = 0;
1664 		}
1665 		SCTP_TCB_LOCK_ASSERT(stcb);
1666 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1667 			/* no need to unlock on tcb its gone */
1668 
1669 			goto out_decr;
1670 		}
1671 		SCTP_TCB_LOCK_ASSERT(stcb);
1672 #ifdef SCTP_AUDITING_ENABLED
1673 		sctp_auditing(4, inp, stcb, net);
1674 #endif
1675 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1676 		if ((stcb->asoc.num_send_timers_up == 0) &&
1677 		    (stcb->asoc.sent_queue_cnt > 0)) {
1678 			struct sctp_tmit_chunk *chk;
1679 
1680 			/*
1681 			 * safeguard. If there on some on the sent queue
1682 			 * somewhere but no timers running something is
1683 			 * wrong... so we start a timer on the first chunk
1684 			 * on the send queue on whatever net it is sent to.
1685 			 */
1686 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1687 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1688 			    chk->whoTo);
1689 		}
1690 		break;
1691 	case SCTP_TIMER_TYPE_INIT:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timoinit);
1696 		stcb->asoc.timoinit++;
1697 		if (sctp_t1init_timer(inp, stcb, net)) {
1698 			/* no need to unlock on tcb its gone */
1699 			goto out_decr;
1700 		}
1701 		/* We do output but not here */
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_RECV:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timosack);
1709 		stcb->asoc.timosack++;
1710 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_SHUTDOWN:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		if (sctp_shutdown_timer(inp, stcb, net)) {
1721 			/* no need to unlock on tcb its gone */
1722 			goto out_decr;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timoshutdown);
1725 		stcb->asoc.timoshutdown++;
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1730 		break;
1731 	case SCTP_TIMER_TYPE_HEARTBEAT:
1732 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoheartbeat);
1736 		stcb->asoc.timoheartbeat++;
1737 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 #ifdef SCTP_AUDITING_ENABLED
1742 		sctp_auditing(4, inp, stcb, net);
1743 #endif
1744 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1745 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1746 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1747 		}
1748 		break;
1749 	case SCTP_TIMER_TYPE_COOKIE:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		if (sctp_cookie_timer(inp, stcb, net)) {
1754 			/* no need to unlock on tcb its gone */
1755 			goto out_decr;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timocookie);
1758 		stcb->asoc.timocookie++;
1759 #ifdef SCTP_AUDITING_ENABLED
1760 		sctp_auditing(4, inp, stcb, net);
1761 #endif
1762 		/*
1763 		 * We consider T3 and Cookie timer pretty much the same with
1764 		 * respect to where from in chunk_output.
1765 		 */
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1769 		{
1770 			struct timeval tv;
1771 			int i, secret;
1772 
1773 			if (inp == NULL) {
1774 				break;
1775 			}
1776 			SCTP_STAT_INCR(sctps_timosecret);
1777 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1778 			SCTP_INP_WLOCK(inp);
1779 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1780 			inp->sctp_ep.last_secret_number =
1781 			    inp->sctp_ep.current_secret_number;
1782 			inp->sctp_ep.current_secret_number++;
1783 			if (inp->sctp_ep.current_secret_number >=
1784 			    SCTP_HOW_MANY_SECRETS) {
1785 				inp->sctp_ep.current_secret_number = 0;
1786 			}
1787 			secret = (int)inp->sctp_ep.current_secret_number;
1788 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1789 				inp->sctp_ep.secret_key[secret][i] =
1790 				    sctp_select_initial_TSN(&inp->sctp_ep);
1791 			}
1792 			SCTP_INP_WUNLOCK(inp);
1793 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1794 		}
1795 		did_output = 0;
1796 		break;
1797 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timopathmtu);
1802 		sctp_pathmtu_timer(inp, stcb, net);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdownack);
1814 		stcb->asoc.timoshutdownack++;
1815 #ifdef SCTP_AUDITING_ENABLED
1816 		sctp_auditing(4, inp, stcb, net);
1817 #endif
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1821 		if ((stcb == NULL) || (inp == NULL)) {
1822 			break;
1823 		}
1824 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1825 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1826 		    "Shutdown guard timer expired");
1827 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828 		/* no need to unlock on tcb its gone */
1829 		goto out_decr;
1830 
1831 	case SCTP_TIMER_TYPE_STRRESET:
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		if (sctp_strreset_timer(inp, stcb, net)) {
1836 			/* no need to unlock on tcb its gone */
1837 			goto out_decr;
1838 		}
1839 		SCTP_STAT_INCR(sctps_timostrmrst);
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASCONF:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_asconf_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		sctp_delete_prim_timer(inp, stcb, net);
1861 		SCTP_STAT_INCR(sctps_timodelprim);
1862 		break;
1863 
1864 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 		if ((stcb == NULL) || (inp == NULL)) {
1866 			break;
1867 		}
1868 		SCTP_STAT_INCR(sctps_timoautoclose);
1869 		sctp_autoclose_timer(inp, stcb, net);
1870 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 		did_output = 0;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ASOCKILL:
1874 		if ((stcb == NULL) || (inp == NULL)) {
1875 			break;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timoassockill);
1878 		/* Can we free it yet? */
1879 		SCTP_INP_DECR_REF(inp);
1880 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1881 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1883 		so = SCTP_INP_SO(inp);
1884 		atomic_add_int(&stcb->asoc.refcnt, 1);
1885 		SCTP_TCB_UNLOCK(stcb);
1886 		SCTP_SOCKET_LOCK(so, 1);
1887 		SCTP_TCB_LOCK(stcb);
1888 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1889 #endif
1890 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1893 		SCTP_SOCKET_UNLOCK(so, 1);
1894 #endif
1895 		/*
1896 		 * free asoc, always unlocks (or destroy's) so prevent
1897 		 * duplicate unlock or unlock of a free mtx :-0
1898 		 */
1899 		stcb = NULL;
1900 		goto out_no_decr;
1901 	case SCTP_TIMER_TYPE_INPKILL:
1902 		SCTP_STAT_INCR(sctps_timoinpkill);
1903 		if (inp == NULL) {
1904 			break;
1905 		}
1906 		/*
1907 		 * special case, take away our increment since WE are the
1908 		 * killer
1909 		 */
1910 		SCTP_INP_DECR_REF(inp);
1911 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1913 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1914 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1915 		inp = NULL;
1916 		goto out_no_decr;
1917 	default:
1918 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1919 		    type);
1920 		break;
1921 	}
1922 #ifdef SCTP_AUDITING_ENABLED
1923 	sctp_audit_log(0xF1, (uint8_t) type);
1924 	if (inp)
1925 		sctp_auditing(5, inp, stcb, net);
1926 #endif
1927 	if ((did_output) && stcb) {
1928 		/*
1929 		 * Now we need to clean up the control chunk chain if an
1930 		 * ECNE is on it. It must be marked as UNSENT again so next
1931 		 * call will continue to send it until such time that we get
1932 		 * a CWR, to remove it. It is, however, less likely that we
1933 		 * will find a ecn echo on the chain though.
1934 		 */
1935 		sctp_fix_ecn_echo(&stcb->asoc);
1936 	}
1937 get_out:
1938 	if (stcb) {
1939 		SCTP_TCB_UNLOCK(stcb);
1940 	}
1941 out_decr:
1942 	if (inp) {
1943 		SCTP_INP_DECR_REF(inp);
1944 	}
1945 out_no_decr:
1946 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1947 	CURVNET_RESTORE();
1948 }
1949 
1950 void
1951 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1952     struct sctp_nets *net)
1953 {
1954 	uint32_t to_ticks;
1955 	struct sctp_timer *tmr;
1956 
1957 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1958 		return;
1959 
1960 	tmr = NULL;
1961 	if (stcb) {
1962 		SCTP_TCB_LOCK_ASSERT(stcb);
1963 	}
1964 	switch (t_type) {
1965 	case SCTP_TIMER_TYPE_ZERO_COPY:
1966 		tmr = &inp->sctp_ep.zero_copy_timer;
1967 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ADDR_WQ:
1974 		/* Only 1 tick away :-) */
1975 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 		break;
1978 	case SCTP_TIMER_TYPE_SEND:
1979 		/* Here we use the RTO timer */
1980 		{
1981 			int rto_val;
1982 
1983 			if ((stcb == NULL) || (net == NULL)) {
1984 				return;
1985 			}
1986 			tmr = &net->rxt_timer;
1987 			if (net->RTO == 0) {
1988 				rto_val = stcb->asoc.initial_rto;
1989 			} else {
1990 				rto_val = net->RTO;
1991 			}
1992 			to_ticks = MSEC_TO_TICKS(rto_val);
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_INIT:
1996 		/*
1997 		 * Here we use the INIT timer default usually about 1
1998 		 * minute.
1999 		 */
2000 		if ((stcb == NULL) || (net == NULL)) {
2001 			return;
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		break;
2010 	case SCTP_TIMER_TYPE_RECV:
2011 		/*
2012 		 * Here we use the Delayed-Ack timer value from the inp
2013 		 * ususually about 200ms.
2014 		 */
2015 		if (stcb == NULL) {
2016 			return;
2017 		}
2018 		tmr = &stcb->asoc.dack_timer;
2019 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 		break;
2021 	case SCTP_TIMER_TYPE_SHUTDOWN:
2022 		/* Here we use the RTO of the destination. */
2023 		if ((stcb == NULL) || (net == NULL)) {
2024 			return;
2025 		}
2026 		if (net->RTO == 0) {
2027 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 		} else {
2029 			to_ticks = MSEC_TO_TICKS(net->RTO);
2030 		}
2031 		tmr = &net->rxt_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_HEARTBEAT:
2034 		/*
2035 		 * the net is used here so that we can add in the RTO. Even
2036 		 * though we use a different timer. We also add the HB timer
2037 		 * PLUS a random jitter.
2038 		 */
2039 		if ((stcb == NULL) || (net == NULL)) {
2040 			return;
2041 		} else {
2042 			uint32_t rndval;
2043 			uint32_t jitter;
2044 
2045 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2046 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2047 				return;
2048 			}
2049 			if (net->RTO == 0) {
2050 				to_ticks = stcb->asoc.initial_rto;
2051 			} else {
2052 				to_ticks = net->RTO;
2053 			}
2054 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 			jitter = rndval % to_ticks;
2056 			if (jitter >= (to_ticks >> 1)) {
2057 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2058 			} else {
2059 				to_ticks = to_ticks - jitter;
2060 			}
2061 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2062 			    !(net->dest_state & SCTP_ADDR_PF)) {
2063 				to_ticks += net->heart_beat_delay;
2064 			}
2065 			/*
2066 			 * Now we must convert the to_ticks that are now in
2067 			 * ms to ticks.
2068 			 */
2069 			to_ticks = MSEC_TO_TICKS(to_ticks);
2070 			tmr = &net->hb_timer;
2071 		}
2072 		break;
2073 	case SCTP_TIMER_TYPE_COOKIE:
2074 		/*
2075 		 * Here we can use the RTO timer from the network since one
2076 		 * RTT was compelete. If a retran happened then we will be
2077 		 * using the RTO initial value.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &net->rxt_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2090 		/*
2091 		 * nothing needed but the endpoint here ususually about 60
2092 		 * minutes.
2093 		 */
2094 		tmr = &inp->sctp_ep.signature_change;
2095 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2096 		break;
2097 	case SCTP_TIMER_TYPE_ASOCKILL:
2098 		if (stcb == NULL) {
2099 			return;
2100 		}
2101 		tmr = &stcb->asoc.strreset_timer;
2102 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2103 		break;
2104 	case SCTP_TIMER_TYPE_INPKILL:
2105 		/*
2106 		 * The inp is setup to die. We re-use the signature_chage
2107 		 * timer since that has stopped and we are in the GONE
2108 		 * state.
2109 		 */
2110 		tmr = &inp->sctp_ep.signature_change;
2111 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2112 		break;
2113 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2114 		/*
2115 		 * Here we use the value found in the EP for PMTU ususually
2116 		 * about 10 minutes.
2117 		 */
2118 		if ((stcb == NULL) || (net == NULL)) {
2119 			return;
2120 		}
2121 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2122 			return;
2123 		}
2124 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2125 		tmr = &net->pmtu_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2128 		/* Here we use the RTO of the destination */
2129 		if ((stcb == NULL) || (net == NULL)) {
2130 			return;
2131 		}
2132 		if (net->RTO == 0) {
2133 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2134 		} else {
2135 			to_ticks = MSEC_TO_TICKS(net->RTO);
2136 		}
2137 		tmr = &net->rxt_timer;
2138 		break;
2139 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2140 		/*
2141 		 * Here we use the endpoints shutdown guard timer usually
2142 		 * about 3 minutes.
2143 		 */
2144 		if (stcb == NULL) {
2145 			return;
2146 		}
2147 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2148 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2149 		} else {
2150 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2151 		}
2152 		tmr = &stcb->asoc.shut_guard_timer;
2153 		break;
2154 	case SCTP_TIMER_TYPE_STRRESET:
2155 		/*
2156 		 * Here the timer comes from the stcb but its value is from
2157 		 * the net's RTO.
2158 		 */
2159 		if ((stcb == NULL) || (net == NULL)) {
2160 			return;
2161 		}
2162 		if (net->RTO == 0) {
2163 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2164 		} else {
2165 			to_ticks = MSEC_TO_TICKS(net->RTO);
2166 		}
2167 		tmr = &stcb->asoc.strreset_timer;
2168 		break;
2169 	case SCTP_TIMER_TYPE_ASCONF:
2170 		/*
2171 		 * Here the timer comes from the stcb but its value is from
2172 		 * the net's RTO.
2173 		 */
2174 		if ((stcb == NULL) || (net == NULL)) {
2175 			return;
2176 		}
2177 		if (net->RTO == 0) {
2178 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2179 		} else {
2180 			to_ticks = MSEC_TO_TICKS(net->RTO);
2181 		}
2182 		tmr = &stcb->asoc.asconf_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2185 		if ((stcb == NULL) || (net != NULL)) {
2186 			return;
2187 		}
2188 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189 		tmr = &stcb->asoc.delete_prim_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2192 		if (stcb == NULL) {
2193 			return;
2194 		}
2195 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2196 			/*
2197 			 * Really an error since stcb is NOT set to
2198 			 * autoclose
2199 			 */
2200 			return;
2201 		}
2202 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2203 		tmr = &stcb->asoc.autoclose_timer;
2204 		break;
2205 	default:
2206 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2207 		    __func__, t_type);
2208 		return;
2209 		break;
2210 	}
2211 	if ((to_ticks <= 0) || (tmr == NULL)) {
2212 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2213 		    __func__, t_type, to_ticks, (void *)tmr);
2214 		return;
2215 	}
2216 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2217 		/*
2218 		 * we do NOT allow you to have it already running. if it is
2219 		 * we leave the current one up unchanged
2220 		 */
2221 		return;
2222 	}
2223 	/* At this point we can proceed */
2224 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2225 		stcb->asoc.num_send_timers_up++;
2226 	}
2227 	tmr->stopped_from = 0;
2228 	tmr->type = t_type;
2229 	tmr->ep = (void *)inp;
2230 	tmr->tcb = (void *)stcb;
2231 	tmr->net = (void *)net;
2232 	tmr->self = (void *)tmr;
2233 	tmr->vnet = (void *)curvnet;
2234 	tmr->ticks = sctp_get_tick_count();
2235 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236 	return;
2237 }
2238 
2239 void
2240 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241     struct sctp_nets *net, uint32_t from)
2242 {
2243 	struct sctp_timer *tmr;
2244 
2245 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246 	    (inp == NULL))
2247 		return;
2248 
2249 	tmr = NULL;
2250 	if (stcb) {
2251 		SCTP_TCB_LOCK_ASSERT(stcb);
2252 	}
2253 	switch (t_type) {
2254 	case SCTP_TIMER_TYPE_ZERO_COPY:
2255 		tmr = &inp->sctp_ep.zero_copy_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ADDR_WQ:
2261 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262 		break;
2263 	case SCTP_TIMER_TYPE_SEND:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_INIT:
2270 		if ((stcb == NULL) || (net == NULL)) {
2271 			return;
2272 		}
2273 		tmr = &net->rxt_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_RECV:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.dack_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_SHUTDOWN:
2282 		if ((stcb == NULL) || (net == NULL)) {
2283 			return;
2284 		}
2285 		tmr = &net->rxt_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_HEARTBEAT:
2288 		if ((stcb == NULL) || (net == NULL)) {
2289 			return;
2290 		}
2291 		tmr = &net->hb_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_COOKIE:
2294 		if ((stcb == NULL) || (net == NULL)) {
2295 			return;
2296 		}
2297 		tmr = &net->rxt_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2300 		/* nothing needed but the endpoint here */
2301 		tmr = &inp->sctp_ep.signature_change;
2302 		/*
2303 		 * We re-use the newcookie timer for the INP kill timer. We
2304 		 * must assure that we do not kill it by accident.
2305 		 */
2306 		break;
2307 	case SCTP_TIMER_TYPE_ASOCKILL:
2308 		/*
2309 		 * Stop the asoc kill timer.
2310 		 */
2311 		if (stcb == NULL) {
2312 			return;
2313 		}
2314 		tmr = &stcb->asoc.strreset_timer;
2315 		break;
2316 
2317 	case SCTP_TIMER_TYPE_INPKILL:
2318 		/*
2319 		 * The inp is setup to die. We re-use the signature_chage
2320 		 * timer since that has stopped and we are in the GONE
2321 		 * state.
2322 		 */
2323 		tmr = &inp->sctp_ep.signature_change;
2324 		break;
2325 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2326 		if ((stcb == NULL) || (net == NULL)) {
2327 			return;
2328 		}
2329 		tmr = &net->pmtu_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2332 		if ((stcb == NULL) || (net == NULL)) {
2333 			return;
2334 		}
2335 		tmr = &net->rxt_timer;
2336 		break;
2337 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.shut_guard_timer;
2342 		break;
2343 	case SCTP_TIMER_TYPE_STRRESET:
2344 		if (stcb == NULL) {
2345 			return;
2346 		}
2347 		tmr = &stcb->asoc.strreset_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_ASCONF:
2350 		if (stcb == NULL) {
2351 			return;
2352 		}
2353 		tmr = &stcb->asoc.asconf_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2356 		if (stcb == NULL) {
2357 			return;
2358 		}
2359 		tmr = &stcb->asoc.delete_prim_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.autoclose_timer;
2366 		break;
2367 	default:
2368 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2369 		    __func__, t_type);
2370 		break;
2371 	}
2372 	if (tmr == NULL) {
2373 		return;
2374 	}
2375 	if ((tmr->type != t_type) && tmr->type) {
2376 		/*
2377 		 * Ok we have a timer that is under joint use. Cookie timer
2378 		 * per chance with the SEND timer. We therefore are NOT
2379 		 * running the timer that the caller wants stopped.  So just
2380 		 * return.
2381 		 */
2382 		return;
2383 	}
2384 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2385 		stcb->asoc.num_send_timers_up--;
2386 		if (stcb->asoc.num_send_timers_up < 0) {
2387 			stcb->asoc.num_send_timers_up = 0;
2388 		}
2389 	}
2390 	tmr->self = NULL;
2391 	tmr->stopped_from = from;
2392 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2393 	return;
2394 }
2395 
2396 uint32_t
2397 sctp_calculate_len(struct mbuf *m)
2398 {
2399 	uint32_t tlen = 0;
2400 	struct mbuf *at;
2401 
2402 	at = m;
2403 	while (at) {
2404 		tlen += SCTP_BUF_LEN(at);
2405 		at = SCTP_BUF_NEXT(at);
2406 	}
2407 	return (tlen);
2408 }
2409 
2410 void
2411 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2412     struct sctp_association *asoc, uint32_t mtu)
2413 {
2414 	/*
2415 	 * Reset the P-MTU size on this association, this involves changing
2416 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2417 	 * allow the DF flag to be cleared.
2418 	 */
2419 	struct sctp_tmit_chunk *chk;
2420 	unsigned int eff_mtu, ovh;
2421 
2422 	asoc->smallest_mtu = mtu;
2423 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2424 		ovh = SCTP_MIN_OVERHEAD;
2425 	} else {
2426 		ovh = SCTP_MIN_V4_OVERHEAD;
2427 	}
2428 	eff_mtu = mtu - ovh;
2429 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2430 		if (chk->send_size > eff_mtu) {
2431 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432 		}
2433 	}
2434 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2435 		if (chk->send_size > eff_mtu) {
2436 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2437 		}
2438 	}
2439 }
2440 
2441 
2442 /*
2443  * given an association and starting time of the current RTT period return
2444  * RTO in number of msecs net should point to the current network
2445  */
2446 
2447 uint32_t
2448 sctp_calculate_rto(struct sctp_tcb *stcb,
2449     struct sctp_association *asoc,
2450     struct sctp_nets *net,
2451     struct timeval *told,
2452     int safe, int rtt_from_sack)
2453 {
2454 	/*-
2455 	 * given an association and the starting time of the current RTT
2456 	 * period (in value1/value2) return RTO in number of msecs.
2457 	 */
2458 	int32_t rtt;		/* RTT in ms */
2459 	uint32_t new_rto;
2460 	int first_measure = 0;
2461 	struct timeval now, then, *old;
2462 
2463 	/* Copy it out for sparc64 */
2464 	if (safe == sctp_align_unsafe_makecopy) {
2465 		old = &then;
2466 		memcpy(&then, told, sizeof(struct timeval));
2467 	} else if (safe == sctp_align_safe_nocopy) {
2468 		old = told;
2469 	} else {
2470 		/* error */
2471 		SCTP_PRINTF("Huh, bad rto calc call\n");
2472 		return (0);
2473 	}
2474 	/************************/
2475 	/* 1. calculate new RTT */
2476 	/************************/
2477 	/* get the current time */
2478 	if (stcb->asoc.use_precise_time) {
2479 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2480 	} else {
2481 		(void)SCTP_GETTIME_TIMEVAL(&now);
2482 	}
2483 	timevalsub(&now, old);
2484 	/* store the current RTT in us */
2485 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2486 	        (uint64_t) now.tv_usec;
2487 
2488 	/* compute rtt in ms */
2489 	rtt = (int32_t) (net->rtt / 1000);
2490 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2491 		/*
2492 		 * Tell the CC module that a new update has just occurred
2493 		 * from a sack
2494 		 */
2495 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2496 	}
2497 	/*
2498 	 * Do we need to determine the lan? We do this only on sacks i.e.
2499 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2500 	 */
2501 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2502 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2503 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2504 			net->lan_type = SCTP_LAN_INTERNET;
2505 		} else {
2506 			net->lan_type = SCTP_LAN_LOCAL;
2507 		}
2508 	}
2509 	/***************************/
2510 	/* 2. update RTTVAR & SRTT */
2511 	/***************************/
2512 	/*-
2513 	 * Compute the scaled average lastsa and the
2514 	 * scaled variance lastsv as described in van Jacobson
2515 	 * Paper "Congestion Avoidance and Control", Annex A.
2516 	 *
2517 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2518 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2519 	 */
2520 	if (net->RTO_measured) {
2521 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2522 		net->lastsa += rtt;
2523 		if (rtt < 0) {
2524 			rtt = -rtt;
2525 		}
2526 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2527 		net->lastsv += rtt;
2528 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2529 			rto_logging(net, SCTP_LOG_RTTVAR);
2530 		}
2531 	} else {
2532 		/* First RTO measurment */
2533 		net->RTO_measured = 1;
2534 		first_measure = 1;
2535 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2536 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2538 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2539 		}
2540 	}
2541 	if (net->lastsv == 0) {
2542 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2543 	}
2544 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2545 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2546 	    (stcb->asoc.sat_network_lockout == 0)) {
2547 		stcb->asoc.sat_network = 1;
2548 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2549 		stcb->asoc.sat_network = 0;
2550 		stcb->asoc.sat_network_lockout = 1;
2551 	}
2552 	/* bound it, per C6/C7 in Section 5.3.1 */
2553 	if (new_rto < stcb->asoc.minrto) {
2554 		new_rto = stcb->asoc.minrto;
2555 	}
2556 	if (new_rto > stcb->asoc.maxrto) {
2557 		new_rto = stcb->asoc.maxrto;
2558 	}
2559 	/* we are now returning the RTO */
2560 	return (new_rto);
2561 }
2562 
2563 /*
2564  * return a pointer to a contiguous piece of data from the given mbuf chain
2565  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2566  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2567  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2568  */
2569 caddr_t
2570 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2571 {
2572 	uint32_t count;
2573 	uint8_t *ptr;
2574 
2575 	ptr = in_ptr;
2576 	if ((off < 0) || (len <= 0))
2577 		return (NULL);
2578 
2579 	/* find the desired start location */
2580 	while ((m != NULL) && (off > 0)) {
2581 		if (off < SCTP_BUF_LEN(m))
2582 			break;
2583 		off -= SCTP_BUF_LEN(m);
2584 		m = SCTP_BUF_NEXT(m);
2585 	}
2586 	if (m == NULL)
2587 		return (NULL);
2588 
2589 	/* is the current mbuf large enough (eg. contiguous)? */
2590 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2591 		return (mtod(m, caddr_t)+off);
2592 	} else {
2593 		/* else, it spans more than one mbuf, so save a temp copy... */
2594 		while ((m != NULL) && (len > 0)) {
2595 			count = min(SCTP_BUF_LEN(m) - off, len);
2596 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2597 			len -= count;
2598 			ptr += count;
2599 			off = 0;
2600 			m = SCTP_BUF_NEXT(m);
2601 		}
2602 		if ((m == NULL) && (len > 0))
2603 			return (NULL);
2604 		else
2605 			return ((caddr_t)in_ptr);
2606 	}
2607 }
2608 
2609 
2610 
2611 struct sctp_paramhdr *
2612 sctp_get_next_param(struct mbuf *m,
2613     int offset,
2614     struct sctp_paramhdr *pull,
2615     int pull_limit)
2616 {
2617 	/* This just provides a typed signature to Peter's Pull routine */
2618 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2619 	    (uint8_t *) pull));
2620 }
2621 
2622 
2623 struct mbuf *
2624 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2625 {
2626 	struct mbuf *m_last;
2627 	caddr_t dp;
2628 
2629 	if (padlen > 3) {
2630 		return (NULL);
2631 	}
2632 	if (padlen <= M_TRAILINGSPACE(m)) {
2633 		/*
2634 		 * The easy way. We hope the majority of the time we hit
2635 		 * here :)
2636 		 */
2637 		m_last = m;
2638 	} else {
2639 		/* Hard way we must grow the mbuf chain */
2640 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2641 		if (m_last == NULL) {
2642 			return (NULL);
2643 		}
2644 		SCTP_BUF_LEN(m_last) = 0;
2645 		SCTP_BUF_NEXT(m_last) = NULL;
2646 		SCTP_BUF_NEXT(m) = m_last;
2647 	}
2648 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2649 	SCTP_BUF_LEN(m_last) += padlen;
2650 	memset(dp, 0, padlen);
2651 	return (m_last);
2652 }
2653 
2654 struct mbuf *
2655 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2656 {
2657 	/* find the last mbuf in chain and pad it */
2658 	struct mbuf *m_at;
2659 
2660 	if (last_mbuf != NULL) {
2661 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2662 	} else {
2663 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2664 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2665 				return (sctp_add_pad_tombuf(m_at, padval));
2666 			}
2667 		}
2668 	}
2669 	return (NULL);
2670 }
2671 
2672 static void
2673 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2674     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2675 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2676     SCTP_UNUSED
2677 #endif
2678 )
2679 {
2680 	struct mbuf *m_notify;
2681 	struct sctp_assoc_change *sac;
2682 	struct sctp_queued_to_read *control;
2683 	unsigned int notif_len;
2684 	uint16_t abort_len;
2685 	unsigned int i;
2686 
2687 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2688 	struct socket *so;
2689 
2690 #endif
2691 
2692 	if (stcb == NULL) {
2693 		return;
2694 	}
2695 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2696 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2697 		if (abort != NULL) {
2698 			abort_len = ntohs(abort->ch.chunk_length);
2699 		} else {
2700 			abort_len = 0;
2701 		}
2702 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2703 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2704 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2705 			notif_len += abort_len;
2706 		}
2707 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2708 		if (m_notify == NULL) {
2709 			/* Retry with smaller value. */
2710 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2711 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2712 			if (m_notify == NULL) {
2713 				goto set_error;
2714 			}
2715 		}
2716 		SCTP_BUF_NEXT(m_notify) = NULL;
2717 		sac = mtod(m_notify, struct sctp_assoc_change *);
2718 		memset(sac, 0, notif_len);
2719 		sac->sac_type = SCTP_ASSOC_CHANGE;
2720 		sac->sac_flags = 0;
2721 		sac->sac_length = sizeof(struct sctp_assoc_change);
2722 		sac->sac_state = state;
2723 		sac->sac_error = error;
2724 		/* XXX verify these stream counts */
2725 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2726 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2727 		sac->sac_assoc_id = sctp_get_associd(stcb);
2728 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2729 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2730 				i = 0;
2731 				if (stcb->asoc.prsctp_supported == 1) {
2732 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2733 				}
2734 				if (stcb->asoc.auth_supported == 1) {
2735 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2736 				}
2737 				if (stcb->asoc.asconf_supported == 1) {
2738 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2739 				}
2740 				if (stcb->asoc.idata_supported == 1) {
2741 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2742 				}
2743 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2744 				if (stcb->asoc.reconfig_supported == 1) {
2745 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2746 				}
2747 				sac->sac_length += i;
2748 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2749 				memcpy(sac->sac_info, abort, abort_len);
2750 				sac->sac_length += abort_len;
2751 			}
2752 		}
2753 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2754 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2755 		    0, 0, stcb->asoc.context, 0, 0, 0,
2756 		    m_notify);
2757 		if (control != NULL) {
2758 			control->length = SCTP_BUF_LEN(m_notify);
2759 			/* not that we need this */
2760 			control->tail_mbuf = m_notify;
2761 			control->spec_flags = M_NOTIFICATION;
2762 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2763 			    control,
2764 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2765 			    so_locked);
2766 		} else {
2767 			sctp_m_freem(m_notify);
2768 		}
2769 	}
2770 	/*
2771 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2772 	 * comes in.
2773 	 */
2774 set_error:
2775 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2776 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2777 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2778 		SOCK_LOCK(stcb->sctp_socket);
2779 		if (from_peer) {
2780 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2781 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2782 				stcb->sctp_socket->so_error = ECONNREFUSED;
2783 			} else {
2784 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2785 				stcb->sctp_socket->so_error = ECONNRESET;
2786 			}
2787 		} else {
2788 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2789 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2790 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2791 				stcb->sctp_socket->so_error = ETIMEDOUT;
2792 			} else {
2793 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2794 				stcb->sctp_socket->so_error = ECONNABORTED;
2795 			}
2796 		}
2797 	}
2798 	/* Wake ANY sleepers */
2799 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2800 	so = SCTP_INP_SO(stcb->sctp_ep);
2801 	if (!so_locked) {
2802 		atomic_add_int(&stcb->asoc.refcnt, 1);
2803 		SCTP_TCB_UNLOCK(stcb);
2804 		SCTP_SOCKET_LOCK(so, 1);
2805 		SCTP_TCB_LOCK(stcb);
2806 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2807 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2808 			SCTP_SOCKET_UNLOCK(so, 1);
2809 			return;
2810 		}
2811 	}
2812 #endif
2813 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2814 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2815 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2816 		socantrcvmore_locked(stcb->sctp_socket);
2817 	}
2818 	sorwakeup(stcb->sctp_socket);
2819 	sowwakeup(stcb->sctp_socket);
2820 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2821 	if (!so_locked) {
2822 		SCTP_SOCKET_UNLOCK(so, 1);
2823 	}
2824 #endif
2825 }
2826 
2827 static void
2828 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2829     struct sockaddr *sa, uint32_t error, int so_locked
2830 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2831     SCTP_UNUSED
2832 #endif
2833 )
2834 {
2835 	struct mbuf *m_notify;
2836 	struct sctp_paddr_change *spc;
2837 	struct sctp_queued_to_read *control;
2838 
2839 	if ((stcb == NULL) ||
2840 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2841 		/* event not enabled */
2842 		return;
2843 	}
2844 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2845 	if (m_notify == NULL)
2846 		return;
2847 	SCTP_BUF_LEN(m_notify) = 0;
2848 	spc = mtod(m_notify, struct sctp_paddr_change *);
2849 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2850 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2851 	spc->spc_flags = 0;
2852 	spc->spc_length = sizeof(struct sctp_paddr_change);
2853 	switch (sa->sa_family) {
2854 #ifdef INET
2855 	case AF_INET:
2856 #ifdef INET6
2857 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2858 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2859 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2860 		} else {
2861 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2862 		}
2863 #else
2864 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2865 #endif
2866 		break;
2867 #endif
2868 #ifdef INET6
2869 	case AF_INET6:
2870 		{
2871 			struct sockaddr_in6 *sin6;
2872 
2873 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2874 
2875 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2876 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2877 				if (sin6->sin6_scope_id == 0) {
2878 					/* recover scope_id for user */
2879 					(void)sa6_recoverscope(sin6);
2880 				} else {
2881 					/* clear embedded scope_id for user */
2882 					in6_clearscope(&sin6->sin6_addr);
2883 				}
2884 			}
2885 			break;
2886 		}
2887 #endif
2888 	default:
2889 		/* TSNH */
2890 		break;
2891 	}
2892 	spc->spc_state = state;
2893 	spc->spc_error = error;
2894 	spc->spc_assoc_id = sctp_get_associd(stcb);
2895 
2896 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2897 	SCTP_BUF_NEXT(m_notify) = NULL;
2898 
2899 	/* append to socket */
2900 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2901 	    0, 0, stcb->asoc.context, 0, 0, 0,
2902 	    m_notify);
2903 	if (control == NULL) {
2904 		/* no memory */
2905 		sctp_m_freem(m_notify);
2906 		return;
2907 	}
2908 	control->length = SCTP_BUF_LEN(m_notify);
2909 	control->spec_flags = M_NOTIFICATION;
2910 	/* not that we need this */
2911 	control->tail_mbuf = m_notify;
2912 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2913 	    control,
2914 	    &stcb->sctp_socket->so_rcv, 1,
2915 	    SCTP_READ_LOCK_NOT_HELD,
2916 	    so_locked);
2917 }
2918 
2919 
2920 static void
2921 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2922     struct sctp_tmit_chunk *chk, int so_locked
2923 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2924     SCTP_UNUSED
2925 #endif
2926 )
2927 {
2928 	struct mbuf *m_notify;
2929 	struct sctp_send_failed *ssf;
2930 	struct sctp_send_failed_event *ssfe;
2931 	struct sctp_queued_to_read *control;
2932 	int length;
2933 
2934 	if ((stcb == NULL) ||
2935 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2936 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2937 		/* event not enabled */
2938 		return;
2939 	}
2940 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2941 		length = sizeof(struct sctp_send_failed_event);
2942 	} else {
2943 		length = sizeof(struct sctp_send_failed);
2944 	}
2945 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2946 	if (m_notify == NULL)
2947 		/* no space left */
2948 		return;
2949 	SCTP_BUF_LEN(m_notify) = 0;
2950 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2951 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2952 		memset(ssfe, 0, length);
2953 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2954 		if (sent) {
2955 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2956 		} else {
2957 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2958 		}
2959 		length += chk->send_size;
2960 		length -= sizeof(struct sctp_data_chunk);
2961 		ssfe->ssfe_length = length;
2962 		ssfe->ssfe_error = error;
2963 		/* not exactly what the user sent in, but should be close :) */
2964 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2965 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2966 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2967 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2968 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2969 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2970 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2971 	} else {
2972 		ssf = mtod(m_notify, struct sctp_send_failed *);
2973 		memset(ssf, 0, length);
2974 		ssf->ssf_type = SCTP_SEND_FAILED;
2975 		if (sent) {
2976 			ssf->ssf_flags = SCTP_DATA_SENT;
2977 		} else {
2978 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2979 		}
2980 		length += chk->send_size;
2981 		length -= sizeof(struct sctp_data_chunk);
2982 		ssf->ssf_length = length;
2983 		ssf->ssf_error = error;
2984 		/* not exactly what the user sent in, but should be close :) */
2985 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2986 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2987 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2988 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2989 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2990 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2991 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2992 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2993 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2994 	}
2995 	if (chk->data) {
2996 		/*
2997 		 * trim off the sctp chunk header(it should be there)
2998 		 */
2999 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3000 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3001 			sctp_mbuf_crush(chk->data);
3002 			chk->send_size -= sizeof(struct sctp_data_chunk);
3003 		}
3004 	}
3005 	SCTP_BUF_NEXT(m_notify) = chk->data;
3006 	/* Steal off the mbuf */
3007 	chk->data = NULL;
3008 	/*
3009 	 * For this case, we check the actual socket buffer, since the assoc
3010 	 * is going away we don't want to overfill the socket buffer for a
3011 	 * non-reader
3012 	 */
3013 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3014 		sctp_m_freem(m_notify);
3015 		return;
3016 	}
3017 	/* append to socket */
3018 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3019 	    0, 0, stcb->asoc.context, 0, 0, 0,
3020 	    m_notify);
3021 	if (control == NULL) {
3022 		/* no memory */
3023 		sctp_m_freem(m_notify);
3024 		return;
3025 	}
3026 	control->spec_flags = M_NOTIFICATION;
3027 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3028 	    control,
3029 	    &stcb->sctp_socket->so_rcv, 1,
3030 	    SCTP_READ_LOCK_NOT_HELD,
3031 	    so_locked);
3032 }
3033 
3034 
3035 static void
3036 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3037     struct sctp_stream_queue_pending *sp, int so_locked
3038 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3039     SCTP_UNUSED
3040 #endif
3041 )
3042 {
3043 	struct mbuf *m_notify;
3044 	struct sctp_send_failed *ssf;
3045 	struct sctp_send_failed_event *ssfe;
3046 	struct sctp_queued_to_read *control;
3047 	int length;
3048 
3049 	if ((stcb == NULL) ||
3050 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3051 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3052 		/* event not enabled */
3053 		return;
3054 	}
3055 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3056 		length = sizeof(struct sctp_send_failed_event);
3057 	} else {
3058 		length = sizeof(struct sctp_send_failed);
3059 	}
3060 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3061 	if (m_notify == NULL) {
3062 		/* no space left */
3063 		return;
3064 	}
3065 	SCTP_BUF_LEN(m_notify) = 0;
3066 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3067 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3068 		memset(ssfe, 0, length);
3069 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3070 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3071 		length += sp->length;
3072 		ssfe->ssfe_length = length;
3073 		ssfe->ssfe_error = error;
3074 		/* not exactly what the user sent in, but should be close :) */
3075 		ssfe->ssfe_info.snd_sid = sp->stream;
3076 		if (sp->some_taken) {
3077 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3078 		} else {
3079 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3080 		}
3081 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3082 		ssfe->ssfe_info.snd_context = sp->context;
3083 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3084 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3085 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3086 	} else {
3087 		ssf = mtod(m_notify, struct sctp_send_failed *);
3088 		memset(ssf, 0, length);
3089 		ssf->ssf_type = SCTP_SEND_FAILED;
3090 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3091 		length += sp->length;
3092 		ssf->ssf_length = length;
3093 		ssf->ssf_error = error;
3094 		/* not exactly what the user sent in, but should be close :) */
3095 		ssf->ssf_info.sinfo_stream = sp->stream;
3096 		ssf->ssf_info.sinfo_ssn = 0;
3097 		if (sp->some_taken) {
3098 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3099 		} else {
3100 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3101 		}
3102 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3103 		ssf->ssf_info.sinfo_context = sp->context;
3104 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3105 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3106 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3107 	}
3108 	SCTP_BUF_NEXT(m_notify) = sp->data;
3109 
3110 	/* Steal off the mbuf */
3111 	sp->data = NULL;
3112 	/*
3113 	 * For this case, we check the actual socket buffer, since the assoc
3114 	 * is going away we don't want to overfill the socket buffer for a
3115 	 * non-reader
3116 	 */
3117 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3118 		sctp_m_freem(m_notify);
3119 		return;
3120 	}
3121 	/* append to socket */
3122 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3123 	    0, 0, stcb->asoc.context, 0, 0, 0,
3124 	    m_notify);
3125 	if (control == NULL) {
3126 		/* no memory */
3127 		sctp_m_freem(m_notify);
3128 		return;
3129 	}
3130 	control->spec_flags = M_NOTIFICATION;
3131 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3132 	    control,
3133 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3134 }
3135 
3136 
3137 
3138 static void
3139 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3140 {
3141 	struct mbuf *m_notify;
3142 	struct sctp_adaptation_event *sai;
3143 	struct sctp_queued_to_read *control;
3144 
3145 	if ((stcb == NULL) ||
3146 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3147 		/* event not enabled */
3148 		return;
3149 	}
3150 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3151 	if (m_notify == NULL)
3152 		/* no space left */
3153 		return;
3154 	SCTP_BUF_LEN(m_notify) = 0;
3155 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3156 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3157 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3158 	sai->sai_flags = 0;
3159 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3160 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3161 	sai->sai_assoc_id = sctp_get_associd(stcb);
3162 
3163 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3164 	SCTP_BUF_NEXT(m_notify) = NULL;
3165 
3166 	/* append to socket */
3167 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3168 	    0, 0, stcb->asoc.context, 0, 0, 0,
3169 	    m_notify);
3170 	if (control == NULL) {
3171 		/* no memory */
3172 		sctp_m_freem(m_notify);
3173 		return;
3174 	}
3175 	control->length = SCTP_BUF_LEN(m_notify);
3176 	control->spec_flags = M_NOTIFICATION;
3177 	/* not that we need this */
3178 	control->tail_mbuf = m_notify;
3179 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3180 	    control,
3181 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3182 }
3183 
3184 /* This always must be called with the read-queue LOCKED in the INP */
3185 static void
3186 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3187     uint32_t val, int so_locked
3188 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3189     SCTP_UNUSED
3190 #endif
3191 )
3192 {
3193 	struct mbuf *m_notify;
3194 	struct sctp_pdapi_event *pdapi;
3195 	struct sctp_queued_to_read *control;
3196 	struct sockbuf *sb;
3197 
3198 	if ((stcb == NULL) ||
3199 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3200 		/* event not enabled */
3201 		return;
3202 	}
3203 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3204 		return;
3205 	}
3206 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3207 	if (m_notify == NULL)
3208 		/* no space left */
3209 		return;
3210 	SCTP_BUF_LEN(m_notify) = 0;
3211 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3212 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3213 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3214 	pdapi->pdapi_flags = 0;
3215 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3216 	pdapi->pdapi_indication = error;
3217 	pdapi->pdapi_stream = (val >> 16);
3218 	pdapi->pdapi_seq = (val & 0x0000ffff);
3219 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3220 
3221 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3222 	SCTP_BUF_NEXT(m_notify) = NULL;
3223 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3224 	    0, 0, stcb->asoc.context, 0, 0, 0,
3225 	    m_notify);
3226 	if (control == NULL) {
3227 		/* no memory */
3228 		sctp_m_freem(m_notify);
3229 		return;
3230 	}
3231 	control->spec_flags = M_NOTIFICATION;
3232 	control->length = SCTP_BUF_LEN(m_notify);
3233 	/* not that we need this */
3234 	control->tail_mbuf = m_notify;
3235 	control->held_length = 0;
3236 	control->length = 0;
3237 	sb = &stcb->sctp_socket->so_rcv;
3238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3239 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3240 	}
3241 	sctp_sballoc(stcb, sb, m_notify);
3242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3243 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3244 	}
3245 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3246 	control->end_added = 1;
3247 	if (stcb->asoc.control_pdapi)
3248 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3249 	else {
3250 		/* we really should not see this case */
3251 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3252 	}
3253 	if (stcb->sctp_ep && stcb->sctp_socket) {
3254 		/* This should always be the case */
3255 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3256 		struct socket *so;
3257 
3258 		so = SCTP_INP_SO(stcb->sctp_ep);
3259 		if (!so_locked) {
3260 			atomic_add_int(&stcb->asoc.refcnt, 1);
3261 			SCTP_TCB_UNLOCK(stcb);
3262 			SCTP_SOCKET_LOCK(so, 1);
3263 			SCTP_TCB_LOCK(stcb);
3264 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3265 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3266 				SCTP_SOCKET_UNLOCK(so, 1);
3267 				return;
3268 			}
3269 		}
3270 #endif
3271 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3272 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3273 		if (!so_locked) {
3274 			SCTP_SOCKET_UNLOCK(so, 1);
3275 		}
3276 #endif
3277 	}
3278 }
3279 
3280 static void
3281 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3282 {
3283 	struct mbuf *m_notify;
3284 	struct sctp_shutdown_event *sse;
3285 	struct sctp_queued_to_read *control;
3286 
3287 	/*
3288 	 * For TCP model AND UDP connected sockets we will send an error up
3289 	 * when an SHUTDOWN completes
3290 	 */
3291 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3292 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3293 		/* mark socket closed for read/write and wakeup! */
3294 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3295 		struct socket *so;
3296 
3297 		so = SCTP_INP_SO(stcb->sctp_ep);
3298 		atomic_add_int(&stcb->asoc.refcnt, 1);
3299 		SCTP_TCB_UNLOCK(stcb);
3300 		SCTP_SOCKET_LOCK(so, 1);
3301 		SCTP_TCB_LOCK(stcb);
3302 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3303 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3304 			SCTP_SOCKET_UNLOCK(so, 1);
3305 			return;
3306 		}
3307 #endif
3308 		socantsendmore(stcb->sctp_socket);
3309 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3310 		SCTP_SOCKET_UNLOCK(so, 1);
3311 #endif
3312 	}
3313 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3314 		/* event not enabled */
3315 		return;
3316 	}
3317 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3318 	if (m_notify == NULL)
3319 		/* no space left */
3320 		return;
3321 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3322 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3323 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3324 	sse->sse_flags = 0;
3325 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3326 	sse->sse_assoc_id = sctp_get_associd(stcb);
3327 
3328 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3329 	SCTP_BUF_NEXT(m_notify) = NULL;
3330 
3331 	/* append to socket */
3332 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3333 	    0, 0, stcb->asoc.context, 0, 0, 0,
3334 	    m_notify);
3335 	if (control == NULL) {
3336 		/* no memory */
3337 		sctp_m_freem(m_notify);
3338 		return;
3339 	}
3340 	control->spec_flags = M_NOTIFICATION;
3341 	control->length = SCTP_BUF_LEN(m_notify);
3342 	/* not that we need this */
3343 	control->tail_mbuf = m_notify;
3344 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3345 	    control,
3346 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3347 }
3348 
3349 static void
3350 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3351     int so_locked
3352 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3353     SCTP_UNUSED
3354 #endif
3355 )
3356 {
3357 	struct mbuf *m_notify;
3358 	struct sctp_sender_dry_event *event;
3359 	struct sctp_queued_to_read *control;
3360 
3361 	if ((stcb == NULL) ||
3362 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3363 		/* event not enabled */
3364 		return;
3365 	}
3366 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3367 	if (m_notify == NULL) {
3368 		/* no space left */
3369 		return;
3370 	}
3371 	SCTP_BUF_LEN(m_notify) = 0;
3372 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3373 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3374 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3375 	event->sender_dry_flags = 0;
3376 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3377 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3378 
3379 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3380 	SCTP_BUF_NEXT(m_notify) = NULL;
3381 
3382 	/* append to socket */
3383 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3384 	    0, 0, stcb->asoc.context, 0, 0, 0,
3385 	    m_notify);
3386 	if (control == NULL) {
3387 		/* no memory */
3388 		sctp_m_freem(m_notify);
3389 		return;
3390 	}
3391 	control->length = SCTP_BUF_LEN(m_notify);
3392 	control->spec_flags = M_NOTIFICATION;
3393 	/* not that we need this */
3394 	control->tail_mbuf = m_notify;
3395 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3396 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3397 }
3398 
3399 
3400 void
3401 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3402 {
3403 	struct mbuf *m_notify;
3404 	struct sctp_queued_to_read *control;
3405 	struct sctp_stream_change_event *stradd;
3406 
3407 	if ((stcb == NULL) ||
3408 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3409 		/* event not enabled */
3410 		return;
3411 	}
3412 	if ((stcb->asoc.peer_req_out) && flag) {
3413 		/* Peer made the request, don't tell the local user */
3414 		stcb->asoc.peer_req_out = 0;
3415 		return;
3416 	}
3417 	stcb->asoc.peer_req_out = 0;
3418 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3419 	if (m_notify == NULL)
3420 		/* no space left */
3421 		return;
3422 	SCTP_BUF_LEN(m_notify) = 0;
3423 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3424 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3425 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3426 	stradd->strchange_flags = flag;
3427 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3428 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3429 	stradd->strchange_instrms = numberin;
3430 	stradd->strchange_outstrms = numberout;
3431 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3432 	SCTP_BUF_NEXT(m_notify) = NULL;
3433 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3434 		/* no space */
3435 		sctp_m_freem(m_notify);
3436 		return;
3437 	}
3438 	/* append to socket */
3439 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3440 	    0, 0, stcb->asoc.context, 0, 0, 0,
3441 	    m_notify);
3442 	if (control == NULL) {
3443 		/* no memory */
3444 		sctp_m_freem(m_notify);
3445 		return;
3446 	}
3447 	control->spec_flags = M_NOTIFICATION;
3448 	control->length = SCTP_BUF_LEN(m_notify);
3449 	/* not that we need this */
3450 	control->tail_mbuf = m_notify;
3451 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3452 	    control,
3453 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3454 }
3455 
3456 void
3457 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3458 {
3459 	struct mbuf *m_notify;
3460 	struct sctp_queued_to_read *control;
3461 	struct sctp_assoc_reset_event *strasoc;
3462 
3463 	if ((stcb == NULL) ||
3464 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3465 		/* event not enabled */
3466 		return;
3467 	}
3468 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3469 	if (m_notify == NULL)
3470 		/* no space left */
3471 		return;
3472 	SCTP_BUF_LEN(m_notify) = 0;
3473 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3474 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3475 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3476 	strasoc->assocreset_flags = flag;
3477 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3478 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3479 	strasoc->assocreset_local_tsn = sending_tsn;
3480 	strasoc->assocreset_remote_tsn = recv_tsn;
3481 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3482 	SCTP_BUF_NEXT(m_notify) = NULL;
3483 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3484 		/* no space */
3485 		sctp_m_freem(m_notify);
3486 		return;
3487 	}
3488 	/* append to socket */
3489 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3490 	    0, 0, stcb->asoc.context, 0, 0, 0,
3491 	    m_notify);
3492 	if (control == NULL) {
3493 		/* no memory */
3494 		sctp_m_freem(m_notify);
3495 		return;
3496 	}
3497 	control->spec_flags = M_NOTIFICATION;
3498 	control->length = SCTP_BUF_LEN(m_notify);
3499 	/* not that we need this */
3500 	control->tail_mbuf = m_notify;
3501 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3502 	    control,
3503 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3504 }
3505 
3506 
3507 
3508 static void
3509 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3510     int number_entries, uint16_t * list, int flag)
3511 {
3512 	struct mbuf *m_notify;
3513 	struct sctp_queued_to_read *control;
3514 	struct sctp_stream_reset_event *strreset;
3515 	int len;
3516 
3517 	if ((stcb == NULL) ||
3518 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3519 		/* event not enabled */
3520 		return;
3521 	}
3522 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3523 	if (m_notify == NULL)
3524 		/* no space left */
3525 		return;
3526 	SCTP_BUF_LEN(m_notify) = 0;
3527 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3528 	if (len > M_TRAILINGSPACE(m_notify)) {
3529 		/* never enough room */
3530 		sctp_m_freem(m_notify);
3531 		return;
3532 	}
3533 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3534 	memset(strreset, 0, len);
3535 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3536 	strreset->strreset_flags = flag;
3537 	strreset->strreset_length = len;
3538 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3539 	if (number_entries) {
3540 		int i;
3541 
3542 		for (i = 0; i < number_entries; i++) {
3543 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3544 		}
3545 	}
3546 	SCTP_BUF_LEN(m_notify) = len;
3547 	SCTP_BUF_NEXT(m_notify) = NULL;
3548 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3549 		/* no space */
3550 		sctp_m_freem(m_notify);
3551 		return;
3552 	}
3553 	/* append to socket */
3554 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3555 	    0, 0, stcb->asoc.context, 0, 0, 0,
3556 	    m_notify);
3557 	if (control == NULL) {
3558 		/* no memory */
3559 		sctp_m_freem(m_notify);
3560 		return;
3561 	}
3562 	control->spec_flags = M_NOTIFICATION;
3563 	control->length = SCTP_BUF_LEN(m_notify);
3564 	/* not that we need this */
3565 	control->tail_mbuf = m_notify;
3566 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3567 	    control,
3568 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3569 }
3570 
3571 
3572 static void
3573 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3574 {
3575 	struct mbuf *m_notify;
3576 	struct sctp_remote_error *sre;
3577 	struct sctp_queued_to_read *control;
3578 	unsigned int notif_len;
3579 	uint16_t chunk_len;
3580 
3581 	if ((stcb == NULL) ||
3582 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3583 		return;
3584 	}
3585 	if (chunk != NULL) {
3586 		chunk_len = ntohs(chunk->ch.chunk_length);
3587 	} else {
3588 		chunk_len = 0;
3589 	}
3590 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3591 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3592 	if (m_notify == NULL) {
3593 		/* Retry with smaller value. */
3594 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3595 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3596 		if (m_notify == NULL) {
3597 			return;
3598 		}
3599 	}
3600 	SCTP_BUF_NEXT(m_notify) = NULL;
3601 	sre = mtod(m_notify, struct sctp_remote_error *);
3602 	memset(sre, 0, notif_len);
3603 	sre->sre_type = SCTP_REMOTE_ERROR;
3604 	sre->sre_flags = 0;
3605 	sre->sre_length = sizeof(struct sctp_remote_error);
3606 	sre->sre_error = error;
3607 	sre->sre_assoc_id = sctp_get_associd(stcb);
3608 	if (notif_len > sizeof(struct sctp_remote_error)) {
3609 		memcpy(sre->sre_data, chunk, chunk_len);
3610 		sre->sre_length += chunk_len;
3611 	}
3612 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3613 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3614 	    0, 0, stcb->asoc.context, 0, 0, 0,
3615 	    m_notify);
3616 	if (control != NULL) {
3617 		control->length = SCTP_BUF_LEN(m_notify);
3618 		/* not that we need this */
3619 		control->tail_mbuf = m_notify;
3620 		control->spec_flags = M_NOTIFICATION;
3621 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3622 		    control,
3623 		    &stcb->sctp_socket->so_rcv, 1,
3624 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3625 	} else {
3626 		sctp_m_freem(m_notify);
3627 	}
3628 }
3629 
3630 
3631 void
3632 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3633     uint32_t error, void *data, int so_locked
3634 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3635     SCTP_UNUSED
3636 #endif
3637 )
3638 {
3639 	if ((stcb == NULL) ||
3640 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3641 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3642 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3643 		/* If the socket is gone we are out of here */
3644 		return;
3645 	}
3646 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3647 		return;
3648 	}
3649 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3650 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3651 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3652 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3653 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3654 			/* Don't report these in front states */
3655 			return;
3656 		}
3657 	}
3658 	switch (notification) {
3659 	case SCTP_NOTIFY_ASSOC_UP:
3660 		if (stcb->asoc.assoc_up_sent == 0) {
3661 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3662 			stcb->asoc.assoc_up_sent = 1;
3663 		}
3664 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3665 			sctp_notify_adaptation_layer(stcb);
3666 		}
3667 		if (stcb->asoc.auth_supported == 0) {
3668 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3669 			    NULL, so_locked);
3670 		}
3671 		break;
3672 	case SCTP_NOTIFY_ASSOC_DOWN:
3673 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3674 		break;
3675 	case SCTP_NOTIFY_INTERFACE_DOWN:
3676 		{
3677 			struct sctp_nets *net;
3678 
3679 			net = (struct sctp_nets *)data;
3680 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3681 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3682 			break;
3683 		}
3684 	case SCTP_NOTIFY_INTERFACE_UP:
3685 		{
3686 			struct sctp_nets *net;
3687 
3688 			net = (struct sctp_nets *)data;
3689 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3690 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3691 			break;
3692 		}
3693 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3694 		{
3695 			struct sctp_nets *net;
3696 
3697 			net = (struct sctp_nets *)data;
3698 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3699 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3700 			break;
3701 		}
3702 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3703 		sctp_notify_send_failed2(stcb, error,
3704 		    (struct sctp_stream_queue_pending *)data, so_locked);
3705 		break;
3706 	case SCTP_NOTIFY_SENT_DG_FAIL:
3707 		sctp_notify_send_failed(stcb, 1, error,
3708 		    (struct sctp_tmit_chunk *)data, so_locked);
3709 		break;
3710 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3711 		sctp_notify_send_failed(stcb, 0, error,
3712 		    (struct sctp_tmit_chunk *)data, so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3715 		{
3716 			uint32_t val;
3717 
3718 			val = *((uint32_t *) data);
3719 
3720 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3721 			break;
3722 		}
3723 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3724 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3725 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3726 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3727 		} else {
3728 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3729 		}
3730 		break;
3731 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3732 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3733 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3734 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3735 		} else {
3736 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3737 		}
3738 		break;
3739 	case SCTP_NOTIFY_ASSOC_RESTART:
3740 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3741 		if (stcb->asoc.auth_supported == 0) {
3742 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3743 			    NULL, so_locked);
3744 		}
3745 		break;
3746 	case SCTP_NOTIFY_STR_RESET_SEND:
3747 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3748 		break;
3749 	case SCTP_NOTIFY_STR_RESET_RECV:
3750 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3751 		break;
3752 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3753 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3754 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3755 		break;
3756 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3757 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3758 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3759 		break;
3760 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3761 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3762 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3766 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3767 		break;
3768 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3769 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3770 		    error, so_locked);
3771 		break;
3772 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3773 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3774 		    error, so_locked);
3775 		break;
3776 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3777 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3778 		    error, so_locked);
3779 		break;
3780 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3781 		sctp_notify_shutdown_event(stcb);
3782 		break;
3783 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3784 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3785 		    (uint16_t) (uintptr_t) data,
3786 		    so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3789 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3790 		    (uint16_t) (uintptr_t) data,
3791 		    so_locked);
3792 		break;
3793 	case SCTP_NOTIFY_NO_PEER_AUTH:
3794 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3795 		    (uint16_t) (uintptr_t) data,
3796 		    so_locked);
3797 		break;
3798 	case SCTP_NOTIFY_SENDER_DRY:
3799 		sctp_notify_sender_dry_event(stcb, so_locked);
3800 		break;
3801 	case SCTP_NOTIFY_REMOTE_ERROR:
3802 		sctp_notify_remote_error(stcb, error, data);
3803 		break;
3804 	default:
3805 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3806 		    __func__, notification, notification);
3807 		break;
3808 	}			/* end switch */
3809 }
3810 
3811 void
3812 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3813 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3814     SCTP_UNUSED
3815 #endif
3816 )
3817 {
3818 	struct sctp_association *asoc;
3819 	struct sctp_stream_out *outs;
3820 	struct sctp_tmit_chunk *chk, *nchk;
3821 	struct sctp_stream_queue_pending *sp, *nsp;
3822 	int i;
3823 
3824 	if (stcb == NULL) {
3825 		return;
3826 	}
3827 	asoc = &stcb->asoc;
3828 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3829 		/* already being freed */
3830 		return;
3831 	}
3832 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3833 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3834 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3835 		return;
3836 	}
3837 	/* now through all the gunk freeing chunks */
3838 	if (holds_lock == 0) {
3839 		SCTP_TCB_SEND_LOCK(stcb);
3840 	}
3841 	/* sent queue SHOULD be empty */
3842 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3843 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3844 		asoc->sent_queue_cnt--;
3845 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3846 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3847 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3848 #ifdef INVARIANTS
3849 			} else {
3850 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3851 #endif
3852 			}
3853 		}
3854 		if (chk->data != NULL) {
3855 			sctp_free_bufspace(stcb, asoc, chk, 1);
3856 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3857 			    error, chk, so_locked);
3858 			if (chk->data) {
3859 				sctp_m_freem(chk->data);
3860 				chk->data = NULL;
3861 			}
3862 		}
3863 		sctp_free_a_chunk(stcb, chk, so_locked);
3864 		/* sa_ignore FREED_MEMORY */
3865 	}
3866 	/* pending send queue SHOULD be empty */
3867 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3868 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3869 		asoc->send_queue_cnt--;
3870 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3871 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3872 #ifdef INVARIANTS
3873 		} else {
3874 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3875 #endif
3876 		}
3877 		if (chk->data != NULL) {
3878 			sctp_free_bufspace(stcb, asoc, chk, 1);
3879 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3880 			    error, chk, so_locked);
3881 			if (chk->data) {
3882 				sctp_m_freem(chk->data);
3883 				chk->data = NULL;
3884 			}
3885 		}
3886 		sctp_free_a_chunk(stcb, chk, so_locked);
3887 		/* sa_ignore FREED_MEMORY */
3888 	}
3889 	for (i = 0; i < asoc->streamoutcnt; i++) {
3890 		/* For each stream */
3891 		outs = &asoc->strmout[i];
3892 		/* clean up any sends there */
3893 		asoc->locked_on_sending = NULL;
3894 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3895 			asoc->stream_queue_cnt--;
3896 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3897 			sctp_free_spbufspace(stcb, asoc, sp);
3898 			if (sp->data) {
3899 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3900 				    error, (void *)sp, so_locked);
3901 				if (sp->data) {
3902 					sctp_m_freem(sp->data);
3903 					sp->data = NULL;
3904 					sp->tail_mbuf = NULL;
3905 					sp->length = 0;
3906 				}
3907 			}
3908 			if (sp->net) {
3909 				sctp_free_remote_addr(sp->net);
3910 				sp->net = NULL;
3911 			}
3912 			/* Free the chunk */
3913 			sctp_free_a_strmoq(stcb, sp, so_locked);
3914 			/* sa_ignore FREED_MEMORY */
3915 		}
3916 	}
3917 
3918 	if (holds_lock == 0) {
3919 		SCTP_TCB_SEND_UNLOCK(stcb);
3920 	}
3921 }
3922 
3923 void
3924 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3925     struct sctp_abort_chunk *abort, int so_locked
3926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3927     SCTP_UNUSED
3928 #endif
3929 )
3930 {
3931 	if (stcb == NULL) {
3932 		return;
3933 	}
3934 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3935 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3936 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3937 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3938 	}
3939 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3940 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3941 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3942 		return;
3943 	}
3944 	/* Tell them we lost the asoc */
3945 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3946 	if (from_peer) {
3947 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3948 	} else {
3949 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3950 	}
3951 }
3952 
3953 void
3954 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3955     struct mbuf *m, int iphlen,
3956     struct sockaddr *src, struct sockaddr *dst,
3957     struct sctphdr *sh, struct mbuf *op_err,
3958     uint8_t mflowtype, uint32_t mflowid,
3959     uint32_t vrf_id, uint16_t port)
3960 {
3961 	uint32_t vtag;
3962 
3963 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3964 	struct socket *so;
3965 
3966 #endif
3967 
3968 	vtag = 0;
3969 	if (stcb != NULL) {
3970 		/* We have a TCB to abort, send notification too */
3971 		vtag = stcb->asoc.peer_vtag;
3972 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3973 		/* get the assoc vrf id and table id */
3974 		vrf_id = stcb->asoc.vrf_id;
3975 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3976 	}
3977 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3978 	    mflowtype, mflowid, inp->fibnum,
3979 	    vrf_id, port);
3980 	if (stcb != NULL) {
3981 		/* Ok, now lets free it */
3982 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3983 		so = SCTP_INP_SO(inp);
3984 		atomic_add_int(&stcb->asoc.refcnt, 1);
3985 		SCTP_TCB_UNLOCK(stcb);
3986 		SCTP_SOCKET_LOCK(so, 1);
3987 		SCTP_TCB_LOCK(stcb);
3988 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3989 #endif
3990 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3991 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3992 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3993 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3994 		}
3995 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3996 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998 		SCTP_SOCKET_UNLOCK(so, 1);
3999 #endif
4000 	}
4001 }
4002 
4003 #ifdef SCTP_ASOCLOG_OF_TSNS
4004 void
4005 sctp_print_out_track_log(struct sctp_tcb *stcb)
4006 {
4007 #ifdef NOSIY_PRINTS
4008 	int i;
4009 
4010 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4011 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4012 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4013 		SCTP_PRINTF("None rcvd\n");
4014 		goto none_in;
4015 	}
4016 	if (stcb->asoc.tsn_in_wrapped) {
4017 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4018 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4019 			    stcb->asoc.in_tsnlog[i].tsn,
4020 			    stcb->asoc.in_tsnlog[i].strm,
4021 			    stcb->asoc.in_tsnlog[i].seq,
4022 			    stcb->asoc.in_tsnlog[i].flgs,
4023 			    stcb->asoc.in_tsnlog[i].sz);
4024 		}
4025 	}
4026 	if (stcb->asoc.tsn_in_at) {
4027 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4028 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4029 			    stcb->asoc.in_tsnlog[i].tsn,
4030 			    stcb->asoc.in_tsnlog[i].strm,
4031 			    stcb->asoc.in_tsnlog[i].seq,
4032 			    stcb->asoc.in_tsnlog[i].flgs,
4033 			    stcb->asoc.in_tsnlog[i].sz);
4034 		}
4035 	}
4036 none_in:
4037 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4038 	if ((stcb->asoc.tsn_out_at == 0) &&
4039 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4040 		SCTP_PRINTF("None sent\n");
4041 	}
4042 	if (stcb->asoc.tsn_out_wrapped) {
4043 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4044 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4045 			    stcb->asoc.out_tsnlog[i].tsn,
4046 			    stcb->asoc.out_tsnlog[i].strm,
4047 			    stcb->asoc.out_tsnlog[i].seq,
4048 			    stcb->asoc.out_tsnlog[i].flgs,
4049 			    stcb->asoc.out_tsnlog[i].sz);
4050 		}
4051 	}
4052 	if (stcb->asoc.tsn_out_at) {
4053 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4054 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4055 			    stcb->asoc.out_tsnlog[i].tsn,
4056 			    stcb->asoc.out_tsnlog[i].strm,
4057 			    stcb->asoc.out_tsnlog[i].seq,
4058 			    stcb->asoc.out_tsnlog[i].flgs,
4059 			    stcb->asoc.out_tsnlog[i].sz);
4060 		}
4061 	}
4062 #endif
4063 }
4064 
4065 #endif
4066 
4067 void
4068 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4069     struct mbuf *op_err,
4070     int so_locked
4071 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4072     SCTP_UNUSED
4073 #endif
4074 )
4075 {
4076 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4077 	struct socket *so;
4078 
4079 #endif
4080 
4081 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4082 	so = SCTP_INP_SO(inp);
4083 #endif
4084 	if (stcb == NULL) {
4085 		/* Got to have a TCB */
4086 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4087 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4088 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4089 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4090 			}
4091 		}
4092 		return;
4093 	} else {
4094 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4095 	}
4096 	/* notify the ulp */
4097 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4098 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4099 	}
4100 	/* notify the peer */
4101 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4102 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4103 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4104 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4105 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4106 	}
4107 	/* now free the asoc */
4108 #ifdef SCTP_ASOCLOG_OF_TSNS
4109 	sctp_print_out_track_log(stcb);
4110 #endif
4111 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4112 	if (!so_locked) {
4113 		atomic_add_int(&stcb->asoc.refcnt, 1);
4114 		SCTP_TCB_UNLOCK(stcb);
4115 		SCTP_SOCKET_LOCK(so, 1);
4116 		SCTP_TCB_LOCK(stcb);
4117 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4118 	}
4119 #endif
4120 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4121 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4122 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4123 	if (!so_locked) {
4124 		SCTP_SOCKET_UNLOCK(so, 1);
4125 	}
4126 #endif
4127 }
4128 
4129 void
4130 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4131     struct sockaddr *src, struct sockaddr *dst,
4132     struct sctphdr *sh, struct sctp_inpcb *inp,
4133     struct mbuf *cause,
4134     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4135     uint32_t vrf_id, uint16_t port)
4136 {
4137 	struct sctp_chunkhdr *ch, chunk_buf;
4138 	unsigned int chk_length;
4139 	int contains_init_chunk;
4140 
4141 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4142 	/* Generate a TO address for future reference */
4143 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4144 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4145 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4146 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4147 		}
4148 	}
4149 	contains_init_chunk = 0;
4150 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4151 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4152 	while (ch != NULL) {
4153 		chk_length = ntohs(ch->chunk_length);
4154 		if (chk_length < sizeof(*ch)) {
4155 			/* break to abort land */
4156 			break;
4157 		}
4158 		switch (ch->chunk_type) {
4159 		case SCTP_INIT:
4160 			contains_init_chunk = 1;
4161 			break;
4162 		case SCTP_PACKET_DROPPED:
4163 			/* we don't respond to pkt-dropped */
4164 			return;
4165 		case SCTP_ABORT_ASSOCIATION:
4166 			/* we don't respond with an ABORT to an ABORT */
4167 			return;
4168 		case SCTP_SHUTDOWN_COMPLETE:
4169 			/*
4170 			 * we ignore it since we are not waiting for it and
4171 			 * peer is gone
4172 			 */
4173 			return;
4174 		case SCTP_SHUTDOWN_ACK:
4175 			sctp_send_shutdown_complete2(src, dst, sh,
4176 			    mflowtype, mflowid, fibnum,
4177 			    vrf_id, port);
4178 			return;
4179 		default:
4180 			break;
4181 		}
4182 		offset += SCTP_SIZE32(chk_length);
4183 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4184 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4185 	}
4186 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4187 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4188 	    (contains_init_chunk == 0))) {
4189 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4190 		    mflowtype, mflowid, fibnum,
4191 		    vrf_id, port);
4192 	}
4193 }
4194 
4195 /*
4196  * check the inbound datagram to make sure there is not an abort inside it,
4197  * if there is return 1, else return 0.
4198  */
4199 int
4200 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4201 {
4202 	struct sctp_chunkhdr *ch;
4203 	struct sctp_init_chunk *init_chk, chunk_buf;
4204 	int offset;
4205 	unsigned int chk_length;
4206 
4207 	offset = iphlen + sizeof(struct sctphdr);
4208 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4209 	    (uint8_t *) & chunk_buf);
4210 	while (ch != NULL) {
4211 		chk_length = ntohs(ch->chunk_length);
4212 		if (chk_length < sizeof(*ch)) {
4213 			/* packet is probably corrupt */
4214 			break;
4215 		}
4216 		/* we seem to be ok, is it an abort? */
4217 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4218 			/* yep, tell them */
4219 			return (1);
4220 		}
4221 		if (ch->chunk_type == SCTP_INITIATION) {
4222 			/* need to update the Vtag */
4223 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4224 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4225 			if (init_chk != NULL) {
4226 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4227 			}
4228 		}
4229 		/* Nope, move to the next chunk */
4230 		offset += SCTP_SIZE32(chk_length);
4231 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4232 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4233 	}
4234 	return (0);
4235 }
4236 
4237 /*
4238  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4239  * set (i.e. it's 0) so, create this function to compare link local scopes
4240  */
4241 #ifdef INET6
4242 uint32_t
4243 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4244 {
4245 	struct sockaddr_in6 a, b;
4246 
4247 	/* save copies */
4248 	a = *addr1;
4249 	b = *addr2;
4250 
4251 	if (a.sin6_scope_id == 0)
4252 		if (sa6_recoverscope(&a)) {
4253 			/* can't get scope, so can't match */
4254 			return (0);
4255 		}
4256 	if (b.sin6_scope_id == 0)
4257 		if (sa6_recoverscope(&b)) {
4258 			/* can't get scope, so can't match */
4259 			return (0);
4260 		}
4261 	if (a.sin6_scope_id != b.sin6_scope_id)
4262 		return (0);
4263 
4264 	return (1);
4265 }
4266 
4267 /*
4268  * returns a sockaddr_in6 with embedded scope recovered and removed
4269  */
4270 struct sockaddr_in6 *
4271 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4272 {
4273 	/* check and strip embedded scope junk */
4274 	if (addr->sin6_family == AF_INET6) {
4275 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4276 			if (addr->sin6_scope_id == 0) {
4277 				*store = *addr;
4278 				if (!sa6_recoverscope(store)) {
4279 					/* use the recovered scope */
4280 					addr = store;
4281 				}
4282 			} else {
4283 				/* else, return the original "to" addr */
4284 				in6_clearscope(&addr->sin6_addr);
4285 			}
4286 		}
4287 	}
4288 	return (addr);
4289 }
4290 
4291 #endif
4292 
4293 /*
4294  * are the two addresses the same?  currently a "scopeless" check returns: 1
4295  * if same, 0 if not
4296  */
4297 int
4298 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4299 {
4300 
4301 	/* must be valid */
4302 	if (sa1 == NULL || sa2 == NULL)
4303 		return (0);
4304 
4305 	/* must be the same family */
4306 	if (sa1->sa_family != sa2->sa_family)
4307 		return (0);
4308 
4309 	switch (sa1->sa_family) {
4310 #ifdef INET6
4311 	case AF_INET6:
4312 		{
4313 			/* IPv6 addresses */
4314 			struct sockaddr_in6 *sin6_1, *sin6_2;
4315 
4316 			sin6_1 = (struct sockaddr_in6 *)sa1;
4317 			sin6_2 = (struct sockaddr_in6 *)sa2;
4318 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4319 			    sin6_2));
4320 		}
4321 #endif
4322 #ifdef INET
4323 	case AF_INET:
4324 		{
4325 			/* IPv4 addresses */
4326 			struct sockaddr_in *sin_1, *sin_2;
4327 
4328 			sin_1 = (struct sockaddr_in *)sa1;
4329 			sin_2 = (struct sockaddr_in *)sa2;
4330 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4331 		}
4332 #endif
4333 	default:
4334 		/* we don't do these... */
4335 		return (0);
4336 	}
4337 }
4338 
4339 void
4340 sctp_print_address(struct sockaddr *sa)
4341 {
4342 #ifdef INET6
4343 	char ip6buf[INET6_ADDRSTRLEN];
4344 
4345 #endif
4346 
4347 	switch (sa->sa_family) {
4348 #ifdef INET6
4349 	case AF_INET6:
4350 		{
4351 			struct sockaddr_in6 *sin6;
4352 
4353 			sin6 = (struct sockaddr_in6 *)sa;
4354 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4355 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4356 			    ntohs(sin6->sin6_port),
4357 			    sin6->sin6_scope_id);
4358 			break;
4359 		}
4360 #endif
4361 #ifdef INET
4362 	case AF_INET:
4363 		{
4364 			struct sockaddr_in *sin;
4365 			unsigned char *p;
4366 
4367 			sin = (struct sockaddr_in *)sa;
4368 			p = (unsigned char *)&sin->sin_addr;
4369 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4370 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4371 			break;
4372 		}
4373 #endif
4374 	default:
4375 		SCTP_PRINTF("?\n");
4376 		break;
4377 	}
4378 }
4379 
4380 void
4381 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4382     struct sctp_inpcb *new_inp,
4383     struct sctp_tcb *stcb,
4384     int waitflags)
4385 {
4386 	/*
4387 	 * go through our old INP and pull off any control structures that
4388 	 * belong to stcb and move then to the new inp.
4389 	 */
4390 	struct socket *old_so, *new_so;
4391 	struct sctp_queued_to_read *control, *nctl;
4392 	struct sctp_readhead tmp_queue;
4393 	struct mbuf *m;
4394 	int error = 0;
4395 
4396 	old_so = old_inp->sctp_socket;
4397 	new_so = new_inp->sctp_socket;
4398 	TAILQ_INIT(&tmp_queue);
4399 	error = sblock(&old_so->so_rcv, waitflags);
4400 	if (error) {
4401 		/*
4402 		 * Gak, can't get sblock, we have a problem. data will be
4403 		 * left stranded.. and we don't dare look at it since the
4404 		 * other thread may be reading something. Oh well, its a
4405 		 * screwed up app that does a peeloff OR a accept while
4406 		 * reading from the main socket... actually its only the
4407 		 * peeloff() case, since I think read will fail on a
4408 		 * listening socket..
4409 		 */
4410 		return;
4411 	}
4412 	/* lock the socket buffers */
4413 	SCTP_INP_READ_LOCK(old_inp);
4414 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4415 		/* Pull off all for out target stcb */
4416 		if (control->stcb == stcb) {
4417 			/* remove it we want it */
4418 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4419 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4420 			m = control->data;
4421 			while (m) {
4422 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4423 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4424 				}
4425 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4426 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4427 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4428 				}
4429 				m = SCTP_BUF_NEXT(m);
4430 			}
4431 		}
4432 	}
4433 	SCTP_INP_READ_UNLOCK(old_inp);
4434 	/* Remove the sb-lock on the old socket */
4435 
4436 	sbunlock(&old_so->so_rcv);
4437 	/* Now we move them over to the new socket buffer */
4438 	SCTP_INP_READ_LOCK(new_inp);
4439 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4440 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4441 		m = control->data;
4442 		while (m) {
4443 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4444 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4445 			}
4446 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4448 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4449 			}
4450 			m = SCTP_BUF_NEXT(m);
4451 		}
4452 	}
4453 	SCTP_INP_READ_UNLOCK(new_inp);
4454 }
4455 
4456 void
4457 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4458     struct sctp_tcb *stcb,
4459     int so_locked
4460 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4461     SCTP_UNUSED
4462 #endif
4463 )
4464 {
4465 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4466 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4467 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4468 		} else {
4469 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4470 			struct socket *so;
4471 
4472 			so = SCTP_INP_SO(inp);
4473 			if (!so_locked) {
4474 				if (stcb) {
4475 					atomic_add_int(&stcb->asoc.refcnt, 1);
4476 					SCTP_TCB_UNLOCK(stcb);
4477 				}
4478 				SCTP_SOCKET_LOCK(so, 1);
4479 				if (stcb) {
4480 					SCTP_TCB_LOCK(stcb);
4481 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4482 				}
4483 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4484 					SCTP_SOCKET_UNLOCK(so, 1);
4485 					return;
4486 				}
4487 			}
4488 #endif
4489 			sctp_sorwakeup(inp, inp->sctp_socket);
4490 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4491 			if (!so_locked) {
4492 				SCTP_SOCKET_UNLOCK(so, 1);
4493 			}
4494 #endif
4495 		}
4496 	}
4497 }
4498 
4499 void
4500 sctp_add_to_readq(struct sctp_inpcb *inp,
4501     struct sctp_tcb *stcb,
4502     struct sctp_queued_to_read *control,
4503     struct sockbuf *sb,
4504     int end,
4505     int inp_read_lock_held,
4506     int so_locked
4507 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4508     SCTP_UNUSED
4509 #endif
4510 )
4511 {
4512 	/*
4513 	 * Here we must place the control on the end of the socket read
4514 	 * queue AND increment sb_cc so that select will work properly on
4515 	 * read.
4516 	 */
4517 	struct mbuf *m, *prev = NULL;
4518 
4519 	if (inp == NULL) {
4520 		/* Gak, TSNH!! */
4521 #ifdef INVARIANTS
4522 		panic("Gak, inp NULL on add_to_readq");
4523 #endif
4524 		return;
4525 	}
4526 	if (inp_read_lock_held == 0)
4527 		SCTP_INP_READ_LOCK(inp);
4528 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4529 		sctp_free_remote_addr(control->whoFrom);
4530 		if (control->data) {
4531 			sctp_m_freem(control->data);
4532 			control->data = NULL;
4533 		}
4534 		sctp_free_a_readq(stcb, control);
4535 		if (inp_read_lock_held == 0)
4536 			SCTP_INP_READ_UNLOCK(inp);
4537 		return;
4538 	}
4539 	if (!(control->spec_flags & M_NOTIFICATION)) {
4540 		atomic_add_int(&inp->total_recvs, 1);
4541 		if (!control->do_not_ref_stcb) {
4542 			atomic_add_int(&stcb->total_recvs, 1);
4543 		}
4544 	}
4545 	m = control->data;
4546 	control->held_length = 0;
4547 	control->length = 0;
4548 	while (m) {
4549 		if (SCTP_BUF_LEN(m) == 0) {
4550 			/* Skip mbufs with NO length */
4551 			if (prev == NULL) {
4552 				/* First one */
4553 				control->data = sctp_m_free(m);
4554 				m = control->data;
4555 			} else {
4556 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4557 				m = SCTP_BUF_NEXT(prev);
4558 			}
4559 			if (m == NULL) {
4560 				control->tail_mbuf = prev;
4561 			}
4562 			continue;
4563 		}
4564 		prev = m;
4565 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4566 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4567 		}
4568 		sctp_sballoc(stcb, sb, m);
4569 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4570 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4571 		}
4572 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4573 		m = SCTP_BUF_NEXT(m);
4574 	}
4575 	if (prev != NULL) {
4576 		control->tail_mbuf = prev;
4577 	} else {
4578 		/* Everything got collapsed out?? */
4579 		sctp_free_remote_addr(control->whoFrom);
4580 		sctp_free_a_readq(stcb, control);
4581 		if (inp_read_lock_held == 0)
4582 			SCTP_INP_READ_UNLOCK(inp);
4583 		return;
4584 	}
4585 	if (end) {
4586 		control->end_added = 1;
4587 	}
4588 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4589 	control->on_read_q = 1;
4590 	if (inp_read_lock_held == 0)
4591 		SCTP_INP_READ_UNLOCK(inp);
4592 	if (inp && inp->sctp_socket) {
4593 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4594 	}
4595 }
4596 
4597 
4598 int
4599 sctp_append_to_readq(struct sctp_inpcb *inp,
4600     struct sctp_tcb *stcb,
4601     struct sctp_queued_to_read *control,
4602     struct mbuf *m,
4603     int end,
4604     int ctls_cumack,
4605     struct sockbuf *sb)
4606 {
4607 	/*
4608 	 * A partial delivery API event is underway. OR we are appending on
4609 	 * the reassembly queue.
4610 	 *
4611 	 * If PDAPI this means we need to add m to the end of the data.
4612 	 * Increase the length in the control AND increment the sb_cc.
4613 	 * Otherwise sb is NULL and all we need to do is put it at the end
4614 	 * of the mbuf chain.
4615 	 */
4616 	int len = 0;
4617 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4618 
4619 	if (inp) {
4620 		SCTP_INP_READ_LOCK(inp);
4621 	}
4622 	if (control == NULL) {
4623 get_out:
4624 		if (inp) {
4625 			SCTP_INP_READ_UNLOCK(inp);
4626 		}
4627 		return (-1);
4628 	}
4629 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4630 		SCTP_INP_READ_UNLOCK(inp);
4631 		return (0);
4632 	}
4633 	if (control->end_added) {
4634 		/* huh this one is complete? */
4635 		goto get_out;
4636 	}
4637 	mm = m;
4638 	if (mm == NULL) {
4639 		goto get_out;
4640 	}
4641 	while (mm) {
4642 		if (SCTP_BUF_LEN(mm) == 0) {
4643 			/* Skip mbufs with NO lenght */
4644 			if (prev == NULL) {
4645 				/* First one */
4646 				m = sctp_m_free(mm);
4647 				mm = m;
4648 			} else {
4649 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4650 				mm = SCTP_BUF_NEXT(prev);
4651 			}
4652 			continue;
4653 		}
4654 		prev = mm;
4655 		len += SCTP_BUF_LEN(mm);
4656 		if (sb) {
4657 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4658 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4659 			}
4660 			sctp_sballoc(stcb, sb, mm);
4661 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4662 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4663 			}
4664 		}
4665 		mm = SCTP_BUF_NEXT(mm);
4666 	}
4667 	if (prev) {
4668 		tail = prev;
4669 	} else {
4670 		/* Really there should always be a prev */
4671 		if (m == NULL) {
4672 			/* Huh nothing left? */
4673 #ifdef INVARIANTS
4674 			panic("Nothing left to add?");
4675 #else
4676 			goto get_out;
4677 #endif
4678 		}
4679 		tail = m;
4680 	}
4681 	if (control->tail_mbuf) {
4682 		/* append */
4683 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4684 		control->tail_mbuf = tail;
4685 	} else {
4686 		/* nothing there */
4687 #ifdef INVARIANTS
4688 		if (control->data != NULL) {
4689 			panic("This should NOT happen");
4690 		}
4691 #endif
4692 		control->data = m;
4693 		control->tail_mbuf = tail;
4694 	}
4695 	atomic_add_int(&control->length, len);
4696 	if (end) {
4697 		/* message is complete */
4698 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4699 			stcb->asoc.control_pdapi = NULL;
4700 		}
4701 		control->held_length = 0;
4702 		control->end_added = 1;
4703 	}
4704 	if (stcb == NULL) {
4705 		control->do_not_ref_stcb = 1;
4706 	}
4707 	/*
4708 	 * When we are appending in partial delivery, the cum-ack is used
4709 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4710 	 * is populated in the outbound sinfo structure from the true cumack
4711 	 * if the association exists...
4712 	 */
4713 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4714 	if (inp) {
4715 		SCTP_INP_READ_UNLOCK(inp);
4716 	}
4717 	if (inp && inp->sctp_socket) {
4718 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4719 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4720 		} else {
4721 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4722 			struct socket *so;
4723 
4724 			so = SCTP_INP_SO(inp);
4725 			if (stcb) {
4726 				atomic_add_int(&stcb->asoc.refcnt, 1);
4727 				SCTP_TCB_UNLOCK(stcb);
4728 			}
4729 			SCTP_SOCKET_LOCK(so, 1);
4730 			if (stcb) {
4731 				SCTP_TCB_LOCK(stcb);
4732 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4733 			}
4734 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4735 				SCTP_SOCKET_UNLOCK(so, 1);
4736 				return (0);
4737 			}
4738 #endif
4739 			sctp_sorwakeup(inp, inp->sctp_socket);
4740 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4741 			SCTP_SOCKET_UNLOCK(so, 1);
4742 #endif
4743 		}
4744 	}
4745 	return (0);
4746 }
4747 
4748 
4749 
4750 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4751  *************ALTERNATE ROUTING CODE
4752  */
4753 
4754 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4755  *************ALTERNATE ROUTING CODE
4756  */
4757 
4758 struct mbuf *
4759 sctp_generate_cause(uint16_t code, char *info)
4760 {
4761 	struct mbuf *m;
4762 	struct sctp_gen_error_cause *cause;
4763 	size_t info_len;
4764 	uint16_t len;
4765 
4766 	if ((code == 0) || (info == NULL)) {
4767 		return (NULL);
4768 	}
4769 	info_len = strlen(info);
4770 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4771 		return (NULL);
4772 	}
4773 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4774 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4775 	if (m != NULL) {
4776 		SCTP_BUF_LEN(m) = len;
4777 		cause = mtod(m, struct sctp_gen_error_cause *);
4778 		cause->code = htons(code);
4779 		cause->length = htons(len);
4780 		memcpy(cause->info, info, info_len);
4781 	}
4782 	return (m);
4783 }
4784 
4785 struct mbuf *
4786 sctp_generate_no_user_data_cause(uint32_t tsn)
4787 {
4788 	struct mbuf *m;
4789 	struct sctp_error_no_user_data *no_user_data_cause;
4790 	uint16_t len;
4791 
4792 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4793 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4794 	if (m != NULL) {
4795 		SCTP_BUF_LEN(m) = len;
4796 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4797 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4798 		no_user_data_cause->cause.length = htons(len);
4799 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4800 	}
4801 	return (m);
4802 }
4803 
4804 #ifdef SCTP_MBCNT_LOGGING
4805 void
4806 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4807     struct sctp_tmit_chunk *tp1, int chk_cnt)
4808 {
4809 	if (tp1->data == NULL) {
4810 		return;
4811 	}
4812 	asoc->chunks_on_out_queue -= chk_cnt;
4813 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4814 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4815 		    asoc->total_output_queue_size,
4816 		    tp1->book_size,
4817 		    0,
4818 		    tp1->mbcnt);
4819 	}
4820 	if (asoc->total_output_queue_size >= tp1->book_size) {
4821 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4822 	} else {
4823 		asoc->total_output_queue_size = 0;
4824 	}
4825 
4826 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4827 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4828 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4829 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4830 		} else {
4831 			stcb->sctp_socket->so_snd.sb_cc = 0;
4832 
4833 		}
4834 	}
4835 }
4836 
4837 #endif
4838 
4839 int
4840 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4841     uint8_t sent, int so_locked
4842 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4843     SCTP_UNUSED
4844 #endif
4845 )
4846 {
4847 	struct sctp_stream_out *strq;
4848 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4849 	struct sctp_stream_queue_pending *sp;
4850 	uint16_t stream = 0, seq = 0;
4851 	uint8_t foundeom = 0;
4852 	int ret_sz = 0;
4853 	int notdone;
4854 	int do_wakeup_routine = 0;
4855 
4856 	stream = tp1->rec.data.stream_number;
4857 	seq = tp1->rec.data.stream_seq;
4858 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4859 		stcb->asoc.abandoned_sent[0]++;
4860 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4861 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4862 #if defined(SCTP_DETAILED_STR_STATS)
4863 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4864 #endif
4865 	} else {
4866 		stcb->asoc.abandoned_unsent[0]++;
4867 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4868 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4869 #if defined(SCTP_DETAILED_STR_STATS)
4870 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4871 #endif
4872 	}
4873 	do {
4874 		ret_sz += tp1->book_size;
4875 		if (tp1->data != NULL) {
4876 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4877 				sctp_flight_size_decrease(tp1);
4878 				sctp_total_flight_decrease(stcb, tp1);
4879 			}
4880 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4881 			stcb->asoc.peers_rwnd += tp1->send_size;
4882 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4883 			if (sent) {
4884 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4885 			} else {
4886 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4887 			}
4888 			if (tp1->data) {
4889 				sctp_m_freem(tp1->data);
4890 				tp1->data = NULL;
4891 			}
4892 			do_wakeup_routine = 1;
4893 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4894 				stcb->asoc.sent_queue_cnt_removeable--;
4895 			}
4896 		}
4897 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4898 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4899 		    SCTP_DATA_NOT_FRAG) {
4900 			/* not frag'ed we ae done   */
4901 			notdone = 0;
4902 			foundeom = 1;
4903 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4904 			/* end of frag, we are done */
4905 			notdone = 0;
4906 			foundeom = 1;
4907 		} else {
4908 			/*
4909 			 * Its a begin or middle piece, we must mark all of
4910 			 * it
4911 			 */
4912 			notdone = 1;
4913 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4914 		}
4915 	} while (tp1 && notdone);
4916 	if (foundeom == 0) {
4917 		/*
4918 		 * The multi-part message was scattered across the send and
4919 		 * sent queue.
4920 		 */
4921 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4922 			if ((tp1->rec.data.stream_number != stream) ||
4923 			    (tp1->rec.data.stream_seq != seq)) {
4924 				break;
4925 			}
4926 			/*
4927 			 * save to chk in case we have some on stream out
4928 			 * queue. If so and we have an un-transmitted one we
4929 			 * don't have to fudge the TSN.
4930 			 */
4931 			chk = tp1;
4932 			ret_sz += tp1->book_size;
4933 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4934 			if (sent) {
4935 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4936 			} else {
4937 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4938 			}
4939 			if (tp1->data) {
4940 				sctp_m_freem(tp1->data);
4941 				tp1->data = NULL;
4942 			}
4943 			/* No flight involved here book the size to 0 */
4944 			tp1->book_size = 0;
4945 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4946 				foundeom = 1;
4947 			}
4948 			do_wakeup_routine = 1;
4949 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4950 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4951 			/*
4952 			 * on to the sent queue so we can wait for it to be
4953 			 * passed by.
4954 			 */
4955 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4956 			    sctp_next);
4957 			stcb->asoc.send_queue_cnt--;
4958 			stcb->asoc.sent_queue_cnt++;
4959 		}
4960 	}
4961 	if (foundeom == 0) {
4962 		/*
4963 		 * Still no eom found. That means there is stuff left on the
4964 		 * stream out queue.. yuck.
4965 		 */
4966 		SCTP_TCB_SEND_LOCK(stcb);
4967 		strq = &stcb->asoc.strmout[stream];
4968 		sp = TAILQ_FIRST(&strq->outqueue);
4969 		if (sp != NULL) {
4970 			sp->discard_rest = 1;
4971 			/*
4972 			 * We may need to put a chunk on the queue that
4973 			 * holds the TSN that would have been sent with the
4974 			 * LAST bit.
4975 			 */
4976 			if (chk == NULL) {
4977 				/* Yep, we have to */
4978 				sctp_alloc_a_chunk(stcb, chk);
4979 				if (chk == NULL) {
4980 					/*
4981 					 * we are hosed. All we can do is
4982 					 * nothing.. which will cause an
4983 					 * abort if the peer is paying
4984 					 * attention.
4985 					 */
4986 					goto oh_well;
4987 				}
4988 				memset(chk, 0, sizeof(*chk));
4989 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4990 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4991 				chk->asoc = &stcb->asoc;
4992 				chk->rec.data.stream_seq = strq->next_sequence_send;
4993 				chk->rec.data.stream_number = sp->stream;
4994 				chk->rec.data.payloadtype = sp->ppid;
4995 				chk->rec.data.context = sp->context;
4996 				chk->flags = sp->act_flags;
4997 				chk->whoTo = NULL;
4998 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4999 				strq->chunks_on_queues++;
5000 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5001 				stcb->asoc.sent_queue_cnt++;
5002 				stcb->asoc.pr_sctp_cnt++;
5003 			} else {
5004 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5005 			}
5006 			strq->next_sequence_send++;
5007 	oh_well:
5008 			if (sp->data) {
5009 				/*
5010 				 * Pull any data to free up the SB and allow
5011 				 * sender to "add more" while we will throw
5012 				 * away :-)
5013 				 */
5014 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5015 				ret_sz += sp->length;
5016 				do_wakeup_routine = 1;
5017 				sp->some_taken = 1;
5018 				sctp_m_freem(sp->data);
5019 				sp->data = NULL;
5020 				sp->tail_mbuf = NULL;
5021 				sp->length = 0;
5022 			}
5023 		}
5024 		SCTP_TCB_SEND_UNLOCK(stcb);
5025 	}
5026 	if (do_wakeup_routine) {
5027 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5028 		struct socket *so;
5029 
5030 		so = SCTP_INP_SO(stcb->sctp_ep);
5031 		if (!so_locked) {
5032 			atomic_add_int(&stcb->asoc.refcnt, 1);
5033 			SCTP_TCB_UNLOCK(stcb);
5034 			SCTP_SOCKET_LOCK(so, 1);
5035 			SCTP_TCB_LOCK(stcb);
5036 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5037 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5038 				/* assoc was freed while we were unlocked */
5039 				SCTP_SOCKET_UNLOCK(so, 1);
5040 				return (ret_sz);
5041 			}
5042 		}
5043 #endif
5044 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5045 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5046 		if (!so_locked) {
5047 			SCTP_SOCKET_UNLOCK(so, 1);
5048 		}
5049 #endif
5050 	}
5051 	return (ret_sz);
5052 }
5053 
5054 /*
5055  * checks to see if the given address, sa, is one that is currently known by
5056  * the kernel note: can't distinguish the same address on multiple interfaces
5057  * and doesn't handle multiple addresses with different zone/scope id's note:
5058  * ifa_ifwithaddr() compares the entire sockaddr struct
5059  */
5060 struct sctp_ifa *
5061 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5062     int holds_lock)
5063 {
5064 	struct sctp_laddr *laddr;
5065 
5066 	if (holds_lock == 0) {
5067 		SCTP_INP_RLOCK(inp);
5068 	}
5069 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5070 		if (laddr->ifa == NULL)
5071 			continue;
5072 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5073 			continue;
5074 #ifdef INET
5075 		if (addr->sa_family == AF_INET) {
5076 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5077 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5078 				/* found him. */
5079 				if (holds_lock == 0) {
5080 					SCTP_INP_RUNLOCK(inp);
5081 				}
5082 				return (laddr->ifa);
5083 				break;
5084 			}
5085 		}
5086 #endif
5087 #ifdef INET6
5088 		if (addr->sa_family == AF_INET6) {
5089 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5090 			    &laddr->ifa->address.sin6)) {
5091 				/* found him. */
5092 				if (holds_lock == 0) {
5093 					SCTP_INP_RUNLOCK(inp);
5094 				}
5095 				return (laddr->ifa);
5096 				break;
5097 			}
5098 		}
5099 #endif
5100 	}
5101 	if (holds_lock == 0) {
5102 		SCTP_INP_RUNLOCK(inp);
5103 	}
5104 	return (NULL);
5105 }
5106 
5107 uint32_t
5108 sctp_get_ifa_hash_val(struct sockaddr *addr)
5109 {
5110 	switch (addr->sa_family) {
5111 #ifdef INET
5112 	case AF_INET:
5113 		{
5114 			struct sockaddr_in *sin;
5115 
5116 			sin = (struct sockaddr_in *)addr;
5117 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5118 		}
5119 #endif
5120 #ifdef INET6
5121 	case AF_INET6:
5122 		{
5123 			struct sockaddr_in6 *sin6;
5124 			uint32_t hash_of_addr;
5125 
5126 			sin6 = (struct sockaddr_in6 *)addr;
5127 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5128 			    sin6->sin6_addr.s6_addr32[1] +
5129 			    sin6->sin6_addr.s6_addr32[2] +
5130 			    sin6->sin6_addr.s6_addr32[3]);
5131 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5132 			return (hash_of_addr);
5133 		}
5134 #endif
5135 	default:
5136 		break;
5137 	}
5138 	return (0);
5139 }
5140 
5141 struct sctp_ifa *
5142 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5143 {
5144 	struct sctp_ifa *sctp_ifap;
5145 	struct sctp_vrf *vrf;
5146 	struct sctp_ifalist *hash_head;
5147 	uint32_t hash_of_addr;
5148 
5149 	if (holds_lock == 0)
5150 		SCTP_IPI_ADDR_RLOCK();
5151 
5152 	vrf = sctp_find_vrf(vrf_id);
5153 	if (vrf == NULL) {
5154 		if (holds_lock == 0)
5155 			SCTP_IPI_ADDR_RUNLOCK();
5156 		return (NULL);
5157 	}
5158 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5159 
5160 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5161 	if (hash_head == NULL) {
5162 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5163 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5164 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5165 		sctp_print_address(addr);
5166 		SCTP_PRINTF("No such bucket for address\n");
5167 		if (holds_lock == 0)
5168 			SCTP_IPI_ADDR_RUNLOCK();
5169 
5170 		return (NULL);
5171 	}
5172 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5173 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5174 			continue;
5175 #ifdef INET
5176 		if (addr->sa_family == AF_INET) {
5177 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5178 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5179 				/* found him. */
5180 				if (holds_lock == 0)
5181 					SCTP_IPI_ADDR_RUNLOCK();
5182 				return (sctp_ifap);
5183 				break;
5184 			}
5185 		}
5186 #endif
5187 #ifdef INET6
5188 		if (addr->sa_family == AF_INET6) {
5189 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5190 			    &sctp_ifap->address.sin6)) {
5191 				/* found him. */
5192 				if (holds_lock == 0)
5193 					SCTP_IPI_ADDR_RUNLOCK();
5194 				return (sctp_ifap);
5195 				break;
5196 			}
5197 		}
5198 #endif
5199 	}
5200 	if (holds_lock == 0)
5201 		SCTP_IPI_ADDR_RUNLOCK();
5202 	return (NULL);
5203 }
5204 
5205 static void
5206 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5207     uint32_t rwnd_req)
5208 {
5209 	/* User pulled some data, do we need a rwnd update? */
5210 	int r_unlocked = 0;
5211 	uint32_t dif, rwnd;
5212 	struct socket *so = NULL;
5213 
5214 	if (stcb == NULL)
5215 		return;
5216 
5217 	atomic_add_int(&stcb->asoc.refcnt, 1);
5218 
5219 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5220 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5221 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5222 		/* Pre-check If we are freeing no update */
5223 		goto no_lock;
5224 	}
5225 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5226 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5227 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5228 		goto out;
5229 	}
5230 	so = stcb->sctp_socket;
5231 	if (so == NULL) {
5232 		goto out;
5233 	}
5234 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5235 	/* Have you have freed enough to look */
5236 	*freed_so_far = 0;
5237 	/* Yep, its worth a look and the lock overhead */
5238 
5239 	/* Figure out what the rwnd would be */
5240 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5241 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5242 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5243 	} else {
5244 		dif = 0;
5245 	}
5246 	if (dif >= rwnd_req) {
5247 		if (hold_rlock) {
5248 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5249 			r_unlocked = 1;
5250 		}
5251 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5252 			/*
5253 			 * One last check before we allow the guy possibly
5254 			 * to get in. There is a race, where the guy has not
5255 			 * reached the gate. In that case
5256 			 */
5257 			goto out;
5258 		}
5259 		SCTP_TCB_LOCK(stcb);
5260 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5261 			/* No reports here */
5262 			SCTP_TCB_UNLOCK(stcb);
5263 			goto out;
5264 		}
5265 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5266 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5267 
5268 		sctp_chunk_output(stcb->sctp_ep, stcb,
5269 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5270 		/* make sure no timer is running */
5271 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5272 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5273 		SCTP_TCB_UNLOCK(stcb);
5274 	} else {
5275 		/* Update how much we have pending */
5276 		stcb->freed_by_sorcv_sincelast = dif;
5277 	}
5278 out:
5279 	if (so && r_unlocked && hold_rlock) {
5280 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5281 	}
5282 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5283 no_lock:
5284 	atomic_add_int(&stcb->asoc.refcnt, -1);
5285 	return;
5286 }
5287 
5288 int
5289 sctp_sorecvmsg(struct socket *so,
5290     struct uio *uio,
5291     struct mbuf **mp,
5292     struct sockaddr *from,
5293     int fromlen,
5294     int *msg_flags,
5295     struct sctp_sndrcvinfo *sinfo,
5296     int filling_sinfo)
5297 {
5298 	/*
5299 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5300 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5301 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5302 	 * On the way out we may send out any combination of:
5303 	 * MSG_NOTIFICATION MSG_EOR
5304 	 *
5305 	 */
5306 	struct sctp_inpcb *inp = NULL;
5307 	int my_len = 0;
5308 	int cp_len = 0, error = 0;
5309 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5310 	struct mbuf *m = NULL;
5311 	struct sctp_tcb *stcb = NULL;
5312 	int wakeup_read_socket = 0;
5313 	int freecnt_applied = 0;
5314 	int out_flags = 0, in_flags = 0;
5315 	int block_allowed = 1;
5316 	uint32_t freed_so_far = 0;
5317 	uint32_t copied_so_far = 0;
5318 	int in_eeor_mode = 0;
5319 	int no_rcv_needed = 0;
5320 	uint32_t rwnd_req = 0;
5321 	int hold_sblock = 0;
5322 	int hold_rlock = 0;
5323 	ssize_t slen = 0;
5324 	uint32_t held_length = 0;
5325 	int sockbuf_lock = 0;
5326 
5327 	if (uio == NULL) {
5328 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5329 		return (EINVAL);
5330 	}
5331 	if (msg_flags) {
5332 		in_flags = *msg_flags;
5333 		if (in_flags & MSG_PEEK)
5334 			SCTP_STAT_INCR(sctps_read_peeks);
5335 	} else {
5336 		in_flags = 0;
5337 	}
5338 	slen = uio->uio_resid;
5339 
5340 	/* Pull in and set up our int flags */
5341 	if (in_flags & MSG_OOB) {
5342 		/* Out of band's NOT supported */
5343 		return (EOPNOTSUPP);
5344 	}
5345 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5346 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5347 		return (EINVAL);
5348 	}
5349 	if ((in_flags & (MSG_DONTWAIT
5350 	    | MSG_NBIO
5351 	    )) ||
5352 	    SCTP_SO_IS_NBIO(so)) {
5353 		block_allowed = 0;
5354 	}
5355 	/* setup the endpoint */
5356 	inp = (struct sctp_inpcb *)so->so_pcb;
5357 	if (inp == NULL) {
5358 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5359 		return (EFAULT);
5360 	}
5361 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5362 	/* Must be at least a MTU's worth */
5363 	if (rwnd_req < SCTP_MIN_RWND)
5364 		rwnd_req = SCTP_MIN_RWND;
5365 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5366 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5367 		sctp_misc_ints(SCTP_SORECV_ENTER,
5368 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5369 	}
5370 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5371 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5372 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5373 	}
5374 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5375 	if (error) {
5376 		goto release_unlocked;
5377 	}
5378 	sockbuf_lock = 1;
5379 restart:
5380 
5381 
5382 restart_nosblocks:
5383 	if (hold_sblock == 0) {
5384 		SOCKBUF_LOCK(&so->so_rcv);
5385 		hold_sblock = 1;
5386 	}
5387 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5388 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5389 		goto out;
5390 	}
5391 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5392 		if (so->so_error) {
5393 			error = so->so_error;
5394 			if ((in_flags & MSG_PEEK) == 0)
5395 				so->so_error = 0;
5396 			goto out;
5397 		} else {
5398 			if (so->so_rcv.sb_cc == 0) {
5399 				/* indicate EOF */
5400 				error = 0;
5401 				goto out;
5402 			}
5403 		}
5404 	}
5405 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5406 		/* we need to wait for data */
5407 		if ((so->so_rcv.sb_cc == 0) &&
5408 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5409 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5410 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5411 				/*
5412 				 * For active open side clear flags for
5413 				 * re-use passive open is blocked by
5414 				 * connect.
5415 				 */
5416 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5417 					/*
5418 					 * You were aborted, passive side
5419 					 * always hits here
5420 					 */
5421 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5422 					error = ECONNRESET;
5423 				}
5424 				so->so_state &= ~(SS_ISCONNECTING |
5425 				    SS_ISDISCONNECTING |
5426 				    SS_ISCONFIRMING |
5427 				    SS_ISCONNECTED);
5428 				if (error == 0) {
5429 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5430 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5431 						error = ENOTCONN;
5432 					}
5433 				}
5434 				goto out;
5435 			}
5436 		}
5437 		error = sbwait(&so->so_rcv);
5438 		if (error) {
5439 			goto out;
5440 		}
5441 		held_length = 0;
5442 		goto restart_nosblocks;
5443 	} else if (so->so_rcv.sb_cc == 0) {
5444 		if (so->so_error) {
5445 			error = so->so_error;
5446 			if ((in_flags & MSG_PEEK) == 0)
5447 				so->so_error = 0;
5448 		} else {
5449 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5450 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5451 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5452 					/*
5453 					 * For active open side clear flags
5454 					 * for re-use passive open is
5455 					 * blocked by connect.
5456 					 */
5457 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5458 						/*
5459 						 * You were aborted, passive
5460 						 * side always hits here
5461 						 */
5462 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5463 						error = ECONNRESET;
5464 					}
5465 					so->so_state &= ~(SS_ISCONNECTING |
5466 					    SS_ISDISCONNECTING |
5467 					    SS_ISCONFIRMING |
5468 					    SS_ISCONNECTED);
5469 					if (error == 0) {
5470 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5471 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5472 							error = ENOTCONN;
5473 						}
5474 					}
5475 					goto out;
5476 				}
5477 			}
5478 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5479 			error = EWOULDBLOCK;
5480 		}
5481 		goto out;
5482 	}
5483 	if (hold_sblock == 1) {
5484 		SOCKBUF_UNLOCK(&so->so_rcv);
5485 		hold_sblock = 0;
5486 	}
5487 	/* we possibly have data we can read */
5488 	/* sa_ignore FREED_MEMORY */
5489 	control = TAILQ_FIRST(&inp->read_queue);
5490 	if (control == NULL) {
5491 		/*
5492 		 * This could be happening since the appender did the
5493 		 * increment but as not yet did the tailq insert onto the
5494 		 * read_queue
5495 		 */
5496 		if (hold_rlock == 0) {
5497 			SCTP_INP_READ_LOCK(inp);
5498 		}
5499 		control = TAILQ_FIRST(&inp->read_queue);
5500 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5501 #ifdef INVARIANTS
5502 			panic("Huh, its non zero and nothing on control?");
5503 #endif
5504 			so->so_rcv.sb_cc = 0;
5505 		}
5506 		SCTP_INP_READ_UNLOCK(inp);
5507 		hold_rlock = 0;
5508 		goto restart;
5509 	}
5510 	if ((control->length == 0) &&
5511 	    (control->do_not_ref_stcb)) {
5512 		/*
5513 		 * Clean up code for freeing assoc that left behind a
5514 		 * pdapi.. maybe a peer in EEOR that just closed after
5515 		 * sending and never indicated a EOR.
5516 		 */
5517 		if (hold_rlock == 0) {
5518 			hold_rlock = 1;
5519 			SCTP_INP_READ_LOCK(inp);
5520 		}
5521 		control->held_length = 0;
5522 		if (control->data) {
5523 			/* Hmm there is data here .. fix */
5524 			struct mbuf *m_tmp;
5525 			int cnt = 0;
5526 
5527 			m_tmp = control->data;
5528 			while (m_tmp) {
5529 				cnt += SCTP_BUF_LEN(m_tmp);
5530 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5531 					control->tail_mbuf = m_tmp;
5532 					control->end_added = 1;
5533 				}
5534 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5535 			}
5536 			control->length = cnt;
5537 		} else {
5538 			/* remove it */
5539 			TAILQ_REMOVE(&inp->read_queue, control, next);
5540 			/* Add back any hiddend data */
5541 			sctp_free_remote_addr(control->whoFrom);
5542 			sctp_free_a_readq(stcb, control);
5543 		}
5544 		if (hold_rlock) {
5545 			hold_rlock = 0;
5546 			SCTP_INP_READ_UNLOCK(inp);
5547 		}
5548 		goto restart;
5549 	}
5550 	if ((control->length == 0) &&
5551 	    (control->end_added == 1)) {
5552 		/*
5553 		 * Do we also need to check for (control->pdapi_aborted ==
5554 		 * 1)?
5555 		 */
5556 		if (hold_rlock == 0) {
5557 			hold_rlock = 1;
5558 			SCTP_INP_READ_LOCK(inp);
5559 		}
5560 		TAILQ_REMOVE(&inp->read_queue, control, next);
5561 		if (control->data) {
5562 #ifdef INVARIANTS
5563 			panic("control->data not null but control->length == 0");
5564 #else
5565 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5566 			sctp_m_freem(control->data);
5567 			control->data = NULL;
5568 #endif
5569 		}
5570 		if (control->aux_data) {
5571 			sctp_m_free(control->aux_data);
5572 			control->aux_data = NULL;
5573 		}
5574 #ifdef INVARIANTS
5575 		if (control->on_strm_q) {
5576 			panic("About to free ctl:%p so:%p and its in %d",
5577 			    control, so, control->on_strm_q);
5578 		}
5579 #endif
5580 		sctp_free_remote_addr(control->whoFrom);
5581 		sctp_free_a_readq(stcb, control);
5582 		if (hold_rlock) {
5583 			hold_rlock = 0;
5584 			SCTP_INP_READ_UNLOCK(inp);
5585 		}
5586 		goto restart;
5587 	}
5588 	if (control->length == 0) {
5589 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5590 		    (filling_sinfo)) {
5591 			/* find a more suitable one then this */
5592 			ctl = TAILQ_NEXT(control, next);
5593 			while (ctl) {
5594 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5595 				    (ctl->some_taken ||
5596 				    (ctl->spec_flags & M_NOTIFICATION) ||
5597 				    ((ctl->do_not_ref_stcb == 0) &&
5598 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5599 				    ) {
5600 					/*-
5601 					 * If we have a different TCB next, and there is data
5602 					 * present. If we have already taken some (pdapi), OR we can
5603 					 * ref the tcb and no delivery as started on this stream, we
5604 					 * take it. Note we allow a notification on a different
5605 					 * assoc to be delivered..
5606 					 */
5607 					control = ctl;
5608 					goto found_one;
5609 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5610 					    (ctl->length) &&
5611 					    ((ctl->some_taken) ||
5612 					    ((ctl->do_not_ref_stcb == 0) &&
5613 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5614 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5615 					/*-
5616 					 * If we have the same tcb, and there is data present, and we
5617 					 * have the strm interleave feature present. Then if we have
5618 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5619 					 * not started a delivery for this stream, we can take it.
5620 					 * Note we do NOT allow a notificaiton on the same assoc to
5621 					 * be delivered.
5622 					 */
5623 					control = ctl;
5624 					goto found_one;
5625 				}
5626 				ctl = TAILQ_NEXT(ctl, next);
5627 			}
5628 		}
5629 		/*
5630 		 * if we reach here, not suitable replacement is available
5631 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5632 		 * into the our held count, and its time to sleep again.
5633 		 */
5634 		held_length = so->so_rcv.sb_cc;
5635 		control->held_length = so->so_rcv.sb_cc;
5636 		goto restart;
5637 	}
5638 	/* Clear the held length since there is something to read */
5639 	control->held_length = 0;
5640 	if (hold_rlock) {
5641 		SCTP_INP_READ_UNLOCK(inp);
5642 		hold_rlock = 0;
5643 	}
5644 found_one:
5645 	/*
5646 	 * If we reach here, control has a some data for us to read off.
5647 	 * Note that stcb COULD be NULL.
5648 	 */
5649 	control->some_taken++;
5650 	if (hold_sblock) {
5651 		SOCKBUF_UNLOCK(&so->so_rcv);
5652 		hold_sblock = 0;
5653 	}
5654 	stcb = control->stcb;
5655 	if (stcb) {
5656 		if ((control->do_not_ref_stcb == 0) &&
5657 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5658 			if (freecnt_applied == 0)
5659 				stcb = NULL;
5660 		} else if (control->do_not_ref_stcb == 0) {
5661 			/* you can't free it on me please */
5662 			/*
5663 			 * The lock on the socket buffer protects us so the
5664 			 * free code will stop. But since we used the
5665 			 * socketbuf lock and the sender uses the tcb_lock
5666 			 * to increment, we need to use the atomic add to
5667 			 * the refcnt
5668 			 */
5669 			if (freecnt_applied) {
5670 #ifdef INVARIANTS
5671 				panic("refcnt already incremented");
5672 #else
5673 				SCTP_PRINTF("refcnt already incremented?\n");
5674 #endif
5675 			} else {
5676 				atomic_add_int(&stcb->asoc.refcnt, 1);
5677 				freecnt_applied = 1;
5678 			}
5679 			/*
5680 			 * Setup to remember how much we have not yet told
5681 			 * the peer our rwnd has opened up. Note we grab the
5682 			 * value from the tcb from last time. Note too that
5683 			 * sack sending clears this when a sack is sent,
5684 			 * which is fine. Once we hit the rwnd_req, we then
5685 			 * will go to the sctp_user_rcvd() that will not
5686 			 * lock until it KNOWs it MUST send a WUP-SACK.
5687 			 */
5688 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5689 			stcb->freed_by_sorcv_sincelast = 0;
5690 		}
5691 	}
5692 	if (stcb &&
5693 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5694 	    control->do_not_ref_stcb == 0) {
5695 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5696 	}
5697 	/* First lets get off the sinfo and sockaddr info */
5698 	if ((sinfo) && filling_sinfo) {
5699 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5700 		nxt = TAILQ_NEXT(control, next);
5701 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5702 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5703 			struct sctp_extrcvinfo *s_extra;
5704 
5705 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5706 			if ((nxt) &&
5707 			    (nxt->length)) {
5708 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5709 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5710 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5711 				}
5712 				if (nxt->spec_flags & M_NOTIFICATION) {
5713 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5714 				}
5715 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5716 				s_extra->serinfo_next_length = nxt->length;
5717 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5718 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5719 				if (nxt->tail_mbuf != NULL) {
5720 					if (nxt->end_added) {
5721 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5722 					}
5723 				}
5724 			} else {
5725 				/*
5726 				 * we explicitly 0 this, since the memcpy
5727 				 * got some other things beyond the older
5728 				 * sinfo_ that is on the control's structure
5729 				 * :-D
5730 				 */
5731 				nxt = NULL;
5732 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5733 				s_extra->serinfo_next_aid = 0;
5734 				s_extra->serinfo_next_length = 0;
5735 				s_extra->serinfo_next_ppid = 0;
5736 				s_extra->serinfo_next_stream = 0;
5737 			}
5738 		}
5739 		/*
5740 		 * update off the real current cum-ack, if we have an stcb.
5741 		 */
5742 		if ((control->do_not_ref_stcb == 0) && stcb)
5743 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5744 		/*
5745 		 * mask off the high bits, we keep the actual chunk bits in
5746 		 * there.
5747 		 */
5748 		sinfo->sinfo_flags &= 0x00ff;
5749 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5750 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5751 		}
5752 	}
5753 #ifdef SCTP_ASOCLOG_OF_TSNS
5754 	{
5755 		int index, newindex;
5756 		struct sctp_pcbtsn_rlog *entry;
5757 
5758 		do {
5759 			index = inp->readlog_index;
5760 			newindex = index + 1;
5761 			if (newindex >= SCTP_READ_LOG_SIZE) {
5762 				newindex = 0;
5763 			}
5764 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5765 		entry = &inp->readlog[index];
5766 		entry->vtag = control->sinfo_assoc_id;
5767 		entry->strm = control->sinfo_stream;
5768 		entry->seq = control->sinfo_ssn;
5769 		entry->sz = control->length;
5770 		entry->flgs = control->sinfo_flags;
5771 	}
5772 #endif
5773 	if ((fromlen > 0) && (from != NULL)) {
5774 		union sctp_sockstore store;
5775 		size_t len;
5776 
5777 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5778 #ifdef INET6
5779 		case AF_INET6:
5780 			len = sizeof(struct sockaddr_in6);
5781 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5782 			store.sin6.sin6_port = control->port_from;
5783 			break;
5784 #endif
5785 #ifdef INET
5786 		case AF_INET:
5787 #ifdef INET6
5788 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5789 				len = sizeof(struct sockaddr_in6);
5790 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5791 				    &store.sin6);
5792 				store.sin6.sin6_port = control->port_from;
5793 			} else {
5794 				len = sizeof(struct sockaddr_in);
5795 				store.sin = control->whoFrom->ro._l_addr.sin;
5796 				store.sin.sin_port = control->port_from;
5797 			}
5798 #else
5799 			len = sizeof(struct sockaddr_in);
5800 			store.sin = control->whoFrom->ro._l_addr.sin;
5801 			store.sin.sin_port = control->port_from;
5802 #endif
5803 			break;
5804 #endif
5805 		default:
5806 			len = 0;
5807 			break;
5808 		}
5809 		memcpy(from, &store, min((size_t)fromlen, len));
5810 #ifdef INET6
5811 		{
5812 			struct sockaddr_in6 lsa6, *from6;
5813 
5814 			from6 = (struct sockaddr_in6 *)from;
5815 			sctp_recover_scope_mac(from6, (&lsa6));
5816 		}
5817 #endif
5818 	}
5819 	/* now copy out what data we can */
5820 	if (mp == NULL) {
5821 		/* copy out each mbuf in the chain up to length */
5822 get_more_data:
5823 		m = control->data;
5824 		while (m) {
5825 			/* Move out all we can */
5826 			cp_len = (int)uio->uio_resid;
5827 			my_len = (int)SCTP_BUF_LEN(m);
5828 			if (cp_len > my_len) {
5829 				/* not enough in this buf */
5830 				cp_len = my_len;
5831 			}
5832 			if (hold_rlock) {
5833 				SCTP_INP_READ_UNLOCK(inp);
5834 				hold_rlock = 0;
5835 			}
5836 			if (cp_len > 0)
5837 				error = uiomove(mtod(m, char *), cp_len, uio);
5838 			/* re-read */
5839 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5840 				goto release;
5841 			}
5842 			if ((control->do_not_ref_stcb == 0) && stcb &&
5843 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5844 				no_rcv_needed = 1;
5845 			}
5846 			if (error) {
5847 				/* error we are out of here */
5848 				goto release;
5849 			}
5850 			SCTP_INP_READ_LOCK(inp);
5851 			hold_rlock = 1;
5852 			if (cp_len == SCTP_BUF_LEN(m)) {
5853 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5854 				    (control->end_added)) {
5855 					out_flags |= MSG_EOR;
5856 					if ((control->do_not_ref_stcb == 0) &&
5857 					    (control->stcb != NULL) &&
5858 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5859 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5860 				}
5861 				if (control->spec_flags & M_NOTIFICATION) {
5862 					out_flags |= MSG_NOTIFICATION;
5863 				}
5864 				/* we ate up the mbuf */
5865 				if (in_flags & MSG_PEEK) {
5866 					/* just looking */
5867 					m = SCTP_BUF_NEXT(m);
5868 					copied_so_far += cp_len;
5869 				} else {
5870 					/* dispose of the mbuf */
5871 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5872 						sctp_sblog(&so->so_rcv,
5873 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5874 					}
5875 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5876 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5877 						sctp_sblog(&so->so_rcv,
5878 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5879 					}
5880 					copied_so_far += cp_len;
5881 					freed_so_far += cp_len;
5882 					freed_so_far += MSIZE;
5883 					atomic_subtract_int(&control->length, cp_len);
5884 					control->data = sctp_m_free(m);
5885 					m = control->data;
5886 					/*
5887 					 * been through it all, must hold sb
5888 					 * lock ok to null tail
5889 					 */
5890 					if (control->data == NULL) {
5891 #ifdef INVARIANTS
5892 						if ((control->end_added == 0) ||
5893 						    (TAILQ_NEXT(control, next) == NULL)) {
5894 							/*
5895 							 * If the end is not
5896 							 * added, OR the
5897 							 * next is NOT null
5898 							 * we MUST have the
5899 							 * lock.
5900 							 */
5901 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5902 								panic("Hmm we don't own the lock?");
5903 							}
5904 						}
5905 #endif
5906 						control->tail_mbuf = NULL;
5907 #ifdef INVARIANTS
5908 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5909 							panic("end_added, nothing left and no MSG_EOR");
5910 						}
5911 #endif
5912 					}
5913 				}
5914 			} else {
5915 				/* Do we need to trim the mbuf? */
5916 				if (control->spec_flags & M_NOTIFICATION) {
5917 					out_flags |= MSG_NOTIFICATION;
5918 				}
5919 				if ((in_flags & MSG_PEEK) == 0) {
5920 					SCTP_BUF_RESV_UF(m, cp_len);
5921 					SCTP_BUF_LEN(m) -= cp_len;
5922 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5923 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5924 					}
5925 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5926 					if ((control->do_not_ref_stcb == 0) &&
5927 					    stcb) {
5928 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5929 					}
5930 					copied_so_far += cp_len;
5931 					freed_so_far += cp_len;
5932 					freed_so_far += MSIZE;
5933 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5934 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5935 						    SCTP_LOG_SBRESULT, 0);
5936 					}
5937 					atomic_subtract_int(&control->length, cp_len);
5938 				} else {
5939 					copied_so_far += cp_len;
5940 				}
5941 			}
5942 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5943 				break;
5944 			}
5945 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5946 			    (control->do_not_ref_stcb == 0) &&
5947 			    (freed_so_far >= rwnd_req)) {
5948 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5949 			}
5950 		}		/* end while(m) */
5951 		/*
5952 		 * At this point we have looked at it all and we either have
5953 		 * a MSG_EOR/or read all the user wants... <OR>
5954 		 * control->length == 0.
5955 		 */
5956 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5957 			/* we are done with this control */
5958 			if (control->length == 0) {
5959 				if (control->data) {
5960 #ifdef INVARIANTS
5961 					panic("control->data not null at read eor?");
5962 #else
5963 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5964 					sctp_m_freem(control->data);
5965 					control->data = NULL;
5966 #endif
5967 				}
5968 		done_with_control:
5969 				if (hold_rlock == 0) {
5970 					SCTP_INP_READ_LOCK(inp);
5971 					hold_rlock = 1;
5972 				}
5973 				TAILQ_REMOVE(&inp->read_queue, control, next);
5974 				/* Add back any hiddend data */
5975 				if (control->held_length) {
5976 					held_length = 0;
5977 					control->held_length = 0;
5978 					wakeup_read_socket = 1;
5979 				}
5980 				if (control->aux_data) {
5981 					sctp_m_free(control->aux_data);
5982 					control->aux_data = NULL;
5983 				}
5984 				no_rcv_needed = control->do_not_ref_stcb;
5985 				sctp_free_remote_addr(control->whoFrom);
5986 				control->data = NULL;
5987 #ifdef INVARIANTS
5988 				if (control->on_strm_q) {
5989 					panic("About to free ctl:%p so:%p and its in %d",
5990 					    control, so, control->on_strm_q);
5991 				}
5992 #endif
5993 				sctp_free_a_readq(stcb, control);
5994 				control = NULL;
5995 				if ((freed_so_far >= rwnd_req) &&
5996 				    (no_rcv_needed == 0))
5997 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5998 
5999 			} else {
6000 				/*
6001 				 * The user did not read all of this
6002 				 * message, turn off the returned MSG_EOR
6003 				 * since we are leaving more behind on the
6004 				 * control to read.
6005 				 */
6006 #ifdef INVARIANTS
6007 				if (control->end_added &&
6008 				    (control->data == NULL) &&
6009 				    (control->tail_mbuf == NULL)) {
6010 					panic("Gak, control->length is corrupt?");
6011 				}
6012 #endif
6013 				no_rcv_needed = control->do_not_ref_stcb;
6014 				out_flags &= ~MSG_EOR;
6015 			}
6016 		}
6017 		if (out_flags & MSG_EOR) {
6018 			goto release;
6019 		}
6020 		if ((uio->uio_resid == 0) ||
6021 		    ((in_eeor_mode) &&
6022 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
6023 			goto release;
6024 		}
6025 		/*
6026 		 * If I hit here the receiver wants more and this message is
6027 		 * NOT done (pd-api). So two questions. Can we block? if not
6028 		 * we are done. Did the user NOT set MSG_WAITALL?
6029 		 */
6030 		if (block_allowed == 0) {
6031 			goto release;
6032 		}
6033 		/*
6034 		 * We need to wait for more data a few things: - We don't
6035 		 * sbunlock() so we don't get someone else reading. - We
6036 		 * must be sure to account for the case where what is added
6037 		 * is NOT to our control when we wakeup.
6038 		 */
6039 
6040 		/*
6041 		 * Do we need to tell the transport a rwnd update might be
6042 		 * needed before we go to sleep?
6043 		 */
6044 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6045 		    ((freed_so_far >= rwnd_req) &&
6046 		    (control->do_not_ref_stcb == 0) &&
6047 		    (no_rcv_needed == 0))) {
6048 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6049 		}
6050 wait_some_more:
6051 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6052 			goto release;
6053 		}
6054 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6055 			goto release;
6056 
6057 		if (hold_rlock == 1) {
6058 			SCTP_INP_READ_UNLOCK(inp);
6059 			hold_rlock = 0;
6060 		}
6061 		if (hold_sblock == 0) {
6062 			SOCKBUF_LOCK(&so->so_rcv);
6063 			hold_sblock = 1;
6064 		}
6065 		if ((copied_so_far) && (control->length == 0) &&
6066 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6067 			goto release;
6068 		}
6069 		if (so->so_rcv.sb_cc <= control->held_length) {
6070 			error = sbwait(&so->so_rcv);
6071 			if (error) {
6072 				goto release;
6073 			}
6074 			control->held_length = 0;
6075 		}
6076 		if (hold_sblock) {
6077 			SOCKBUF_UNLOCK(&so->so_rcv);
6078 			hold_sblock = 0;
6079 		}
6080 		if (control->length == 0) {
6081 			/* still nothing here */
6082 			if (control->end_added == 1) {
6083 				/* he aborted, or is done i.e.did a shutdown */
6084 				out_flags |= MSG_EOR;
6085 				if (control->pdapi_aborted) {
6086 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6087 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6088 
6089 					out_flags |= MSG_TRUNC;
6090 				} else {
6091 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6092 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6093 				}
6094 				goto done_with_control;
6095 			}
6096 			if (so->so_rcv.sb_cc > held_length) {
6097 				control->held_length = so->so_rcv.sb_cc;
6098 				held_length = 0;
6099 			}
6100 			goto wait_some_more;
6101 		} else if (control->data == NULL) {
6102 			/*
6103 			 * we must re-sync since data is probably being
6104 			 * added
6105 			 */
6106 			SCTP_INP_READ_LOCK(inp);
6107 			if ((control->length > 0) && (control->data == NULL)) {
6108 				/*
6109 				 * big trouble.. we have the lock and its
6110 				 * corrupt?
6111 				 */
6112 #ifdef INVARIANTS
6113 				panic("Impossible data==NULL length !=0");
6114 #endif
6115 				out_flags |= MSG_EOR;
6116 				out_flags |= MSG_TRUNC;
6117 				control->length = 0;
6118 				SCTP_INP_READ_UNLOCK(inp);
6119 				goto done_with_control;
6120 			}
6121 			SCTP_INP_READ_UNLOCK(inp);
6122 			/* We will fall around to get more data */
6123 		}
6124 		goto get_more_data;
6125 	} else {
6126 		/*-
6127 		 * Give caller back the mbuf chain,
6128 		 * store in uio_resid the length
6129 		 */
6130 		wakeup_read_socket = 0;
6131 		if ((control->end_added == 0) ||
6132 		    (TAILQ_NEXT(control, next) == NULL)) {
6133 			/* Need to get rlock */
6134 			if (hold_rlock == 0) {
6135 				SCTP_INP_READ_LOCK(inp);
6136 				hold_rlock = 1;
6137 			}
6138 		}
6139 		if (control->end_added) {
6140 			out_flags |= MSG_EOR;
6141 			if ((control->do_not_ref_stcb == 0) &&
6142 			    (control->stcb != NULL) &&
6143 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6144 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6145 		}
6146 		if (control->spec_flags & M_NOTIFICATION) {
6147 			out_flags |= MSG_NOTIFICATION;
6148 		}
6149 		uio->uio_resid = control->length;
6150 		*mp = control->data;
6151 		m = control->data;
6152 		while (m) {
6153 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6154 				sctp_sblog(&so->so_rcv,
6155 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6156 			}
6157 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6158 			freed_so_far += SCTP_BUF_LEN(m);
6159 			freed_so_far += MSIZE;
6160 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6161 				sctp_sblog(&so->so_rcv,
6162 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6163 			}
6164 			m = SCTP_BUF_NEXT(m);
6165 		}
6166 		control->data = control->tail_mbuf = NULL;
6167 		control->length = 0;
6168 		if (out_flags & MSG_EOR) {
6169 			/* Done with this control */
6170 			goto done_with_control;
6171 		}
6172 	}
6173 release:
6174 	if (hold_rlock == 1) {
6175 		SCTP_INP_READ_UNLOCK(inp);
6176 		hold_rlock = 0;
6177 	}
6178 	if (hold_sblock == 1) {
6179 		SOCKBUF_UNLOCK(&so->so_rcv);
6180 		hold_sblock = 0;
6181 	}
6182 	sbunlock(&so->so_rcv);
6183 	sockbuf_lock = 0;
6184 
6185 release_unlocked:
6186 	if (hold_sblock) {
6187 		SOCKBUF_UNLOCK(&so->so_rcv);
6188 		hold_sblock = 0;
6189 	}
6190 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6191 		if ((freed_so_far >= rwnd_req) &&
6192 		    (control && (control->do_not_ref_stcb == 0)) &&
6193 		    (no_rcv_needed == 0))
6194 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6195 	}
6196 out:
6197 	if (msg_flags) {
6198 		*msg_flags = out_flags;
6199 	}
6200 	if (((out_flags & MSG_EOR) == 0) &&
6201 	    ((in_flags & MSG_PEEK) == 0) &&
6202 	    (sinfo) &&
6203 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6204 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6205 		struct sctp_extrcvinfo *s_extra;
6206 
6207 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6208 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6209 	}
6210 	if (hold_rlock == 1) {
6211 		SCTP_INP_READ_UNLOCK(inp);
6212 	}
6213 	if (hold_sblock) {
6214 		SOCKBUF_UNLOCK(&so->so_rcv);
6215 	}
6216 	if (sockbuf_lock) {
6217 		sbunlock(&so->so_rcv);
6218 	}
6219 	if (freecnt_applied) {
6220 		/*
6221 		 * The lock on the socket buffer protects us so the free
6222 		 * code will stop. But since we used the socketbuf lock and
6223 		 * the sender uses the tcb_lock to increment, we need to use
6224 		 * the atomic add to the refcnt.
6225 		 */
6226 		if (stcb == NULL) {
6227 #ifdef INVARIANTS
6228 			panic("stcb for refcnt has gone NULL?");
6229 			goto stage_left;
6230 #else
6231 			goto stage_left;
6232 #endif
6233 		}
6234 		atomic_add_int(&stcb->asoc.refcnt, -1);
6235 		/* Save the value back for next time */
6236 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6237 	}
6238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6239 		if (stcb) {
6240 			sctp_misc_ints(SCTP_SORECV_DONE,
6241 			    freed_so_far,
6242 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6243 			    stcb->asoc.my_rwnd,
6244 			    so->so_rcv.sb_cc);
6245 		} else {
6246 			sctp_misc_ints(SCTP_SORECV_DONE,
6247 			    freed_so_far,
6248 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6249 			    0,
6250 			    so->so_rcv.sb_cc);
6251 		}
6252 	}
6253 stage_left:
6254 	if (wakeup_read_socket) {
6255 		sctp_sorwakeup(inp, so);
6256 	}
6257 	return (error);
6258 }
6259 
6260 
6261 #ifdef SCTP_MBUF_LOGGING
6262 struct mbuf *
6263 sctp_m_free(struct mbuf *m)
6264 {
6265 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6266 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6267 	}
6268 	return (m_free(m));
6269 }
6270 
6271 void
6272 sctp_m_freem(struct mbuf *mb)
6273 {
6274 	while (mb != NULL)
6275 		mb = sctp_m_free(mb);
6276 }
6277 
6278 #endif
6279 
6280 int
6281 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6282 {
6283 	/*
6284 	 * Given a local address. For all associations that holds the
6285 	 * address, request a peer-set-primary.
6286 	 */
6287 	struct sctp_ifa *ifa;
6288 	struct sctp_laddr *wi;
6289 
6290 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6291 	if (ifa == NULL) {
6292 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6293 		return (EADDRNOTAVAIL);
6294 	}
6295 	/*
6296 	 * Now that we have the ifa we must awaken the iterator with this
6297 	 * message.
6298 	 */
6299 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6300 	if (wi == NULL) {
6301 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6302 		return (ENOMEM);
6303 	}
6304 	/* Now incr the count and int wi structure */
6305 	SCTP_INCR_LADDR_COUNT();
6306 	bzero(wi, sizeof(*wi));
6307 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6308 	wi->ifa = ifa;
6309 	wi->action = SCTP_SET_PRIM_ADDR;
6310 	atomic_add_int(&ifa->refcount, 1);
6311 
6312 	/* Now add it to the work queue */
6313 	SCTP_WQ_ADDR_LOCK();
6314 	/*
6315 	 * Should this really be a tailq? As it is we will process the
6316 	 * newest first :-0
6317 	 */
6318 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6319 	SCTP_WQ_ADDR_UNLOCK();
6320 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6321 	    (struct sctp_inpcb *)NULL,
6322 	    (struct sctp_tcb *)NULL,
6323 	    (struct sctp_nets *)NULL);
6324 	return (0);
6325 }
6326 
6327 
6328 int
6329 sctp_soreceive(struct socket *so,
6330     struct sockaddr **psa,
6331     struct uio *uio,
6332     struct mbuf **mp0,
6333     struct mbuf **controlp,
6334     int *flagsp)
6335 {
6336 	int error, fromlen;
6337 	uint8_t sockbuf[256];
6338 	struct sockaddr *from;
6339 	struct sctp_extrcvinfo sinfo;
6340 	int filling_sinfo = 1;
6341 	struct sctp_inpcb *inp;
6342 
6343 	inp = (struct sctp_inpcb *)so->so_pcb;
6344 	/* pickup the assoc we are reading from */
6345 	if (inp == NULL) {
6346 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6347 		return (EINVAL);
6348 	}
6349 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6350 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6351 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6352 	    (controlp == NULL)) {
6353 		/* user does not want the sndrcv ctl */
6354 		filling_sinfo = 0;
6355 	}
6356 	if (psa) {
6357 		from = (struct sockaddr *)sockbuf;
6358 		fromlen = sizeof(sockbuf);
6359 		from->sa_len = 0;
6360 	} else {
6361 		from = NULL;
6362 		fromlen = 0;
6363 	}
6364 
6365 	if (filling_sinfo) {
6366 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6367 	}
6368 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6369 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6370 	if (controlp != NULL) {
6371 		/* copy back the sinfo in a CMSG format */
6372 		if (filling_sinfo)
6373 			*controlp = sctp_build_ctl_nchunk(inp,
6374 			    (struct sctp_sndrcvinfo *)&sinfo);
6375 		else
6376 			*controlp = NULL;
6377 	}
6378 	if (psa) {
6379 		/* copy back the address info */
6380 		if (from && from->sa_len) {
6381 			*psa = sodupsockaddr(from, M_NOWAIT);
6382 		} else {
6383 			*psa = NULL;
6384 		}
6385 	}
6386 	return (error);
6387 }
6388 
6389 
6390 
6391 
6392 
6393 int
6394 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6395     int totaddr, int *error)
6396 {
6397 	int added = 0;
6398 	int i;
6399 	struct sctp_inpcb *inp;
6400 	struct sockaddr *sa;
6401 	size_t incr = 0;
6402 
6403 #ifdef INET
6404 	struct sockaddr_in *sin;
6405 
6406 #endif
6407 #ifdef INET6
6408 	struct sockaddr_in6 *sin6;
6409 
6410 #endif
6411 
6412 	sa = addr;
6413 	inp = stcb->sctp_ep;
6414 	*error = 0;
6415 	for (i = 0; i < totaddr; i++) {
6416 		switch (sa->sa_family) {
6417 #ifdef INET
6418 		case AF_INET:
6419 			incr = sizeof(struct sockaddr_in);
6420 			sin = (struct sockaddr_in *)sa;
6421 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6422 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6423 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6424 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6425 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6426 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6427 				*error = EINVAL;
6428 				goto out_now;
6429 			}
6430 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6431 				/* assoc gone no un-lock */
6432 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6433 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6434 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6435 				*error = ENOBUFS;
6436 				goto out_now;
6437 			}
6438 			added++;
6439 			break;
6440 #endif
6441 #ifdef INET6
6442 		case AF_INET6:
6443 			incr = sizeof(struct sockaddr_in6);
6444 			sin6 = (struct sockaddr_in6 *)sa;
6445 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6446 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6447 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6448 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6449 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6450 				*error = EINVAL;
6451 				goto out_now;
6452 			}
6453 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6454 				/* assoc gone no un-lock */
6455 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6456 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6457 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6458 				*error = ENOBUFS;
6459 				goto out_now;
6460 			}
6461 			added++;
6462 			break;
6463 #endif
6464 		default:
6465 			break;
6466 		}
6467 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6468 	}
6469 out_now:
6470 	return (added);
6471 }
6472 
6473 struct sctp_tcb *
6474 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6475     unsigned int *totaddr,
6476     unsigned int *num_v4, unsigned int *num_v6, int *error,
6477     unsigned int limit, int *bad_addr)
6478 {
6479 	struct sockaddr *sa;
6480 	struct sctp_tcb *stcb = NULL;
6481 	unsigned int incr, at, i;
6482 
6483 	at = incr = 0;
6484 	sa = addr;
6485 	*error = *num_v6 = *num_v4 = 0;
6486 	/* account and validate addresses */
6487 	for (i = 0; i < *totaddr; i++) {
6488 		switch (sa->sa_family) {
6489 #ifdef INET
6490 		case AF_INET:
6491 			if (sa->sa_len != incr) {
6492 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 				*error = EINVAL;
6494 				*bad_addr = 1;
6495 				return (NULL);
6496 			}
6497 			(*num_v4) += 1;
6498 			incr = (unsigned int)sizeof(struct sockaddr_in);
6499 			break;
6500 #endif
6501 #ifdef INET6
6502 		case AF_INET6:
6503 			{
6504 				struct sockaddr_in6 *sin6;
6505 
6506 				sin6 = (struct sockaddr_in6 *)sa;
6507 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6508 					/* Must be non-mapped for connectx */
6509 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6510 					*error = EINVAL;
6511 					*bad_addr = 1;
6512 					return (NULL);
6513 				}
6514 				if (sa->sa_len != incr) {
6515 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6516 					*error = EINVAL;
6517 					*bad_addr = 1;
6518 					return (NULL);
6519 				}
6520 				(*num_v6) += 1;
6521 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6522 				break;
6523 			}
6524 #endif
6525 		default:
6526 			*totaddr = i;
6527 			/* we are done */
6528 			break;
6529 		}
6530 		if (i == *totaddr) {
6531 			break;
6532 		}
6533 		SCTP_INP_INCR_REF(inp);
6534 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6535 		if (stcb != NULL) {
6536 			/* Already have or am bring up an association */
6537 			return (stcb);
6538 		} else {
6539 			SCTP_INP_DECR_REF(inp);
6540 		}
6541 		if ((at + incr) > limit) {
6542 			*totaddr = i;
6543 			break;
6544 		}
6545 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6546 	}
6547 	return ((struct sctp_tcb *)NULL);
6548 }
6549 
6550 /*
6551  * sctp_bindx(ADD) for one address.
6552  * assumes all arguments are valid/checked by caller.
6553  */
6554 void
6555 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6556     struct sockaddr *sa, sctp_assoc_t assoc_id,
6557     uint32_t vrf_id, int *error, void *p)
6558 {
6559 	struct sockaddr *addr_touse;
6560 
6561 #if defined(INET) && defined(INET6)
6562 	struct sockaddr_in sin;
6563 
6564 #endif
6565 
6566 	/* see if we're bound all already! */
6567 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6568 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6569 		*error = EINVAL;
6570 		return;
6571 	}
6572 	addr_touse = sa;
6573 #ifdef INET6
6574 	if (sa->sa_family == AF_INET6) {
6575 #ifdef INET
6576 		struct sockaddr_in6 *sin6;
6577 
6578 #endif
6579 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6580 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6581 			*error = EINVAL;
6582 			return;
6583 		}
6584 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6585 			/* can only bind v6 on PF_INET6 sockets */
6586 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6587 			*error = EINVAL;
6588 			return;
6589 		}
6590 #ifdef INET
6591 		sin6 = (struct sockaddr_in6 *)addr_touse;
6592 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6593 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6594 			    SCTP_IPV6_V6ONLY(inp)) {
6595 				/* can't bind v4-mapped on PF_INET sockets */
6596 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 				*error = EINVAL;
6598 				return;
6599 			}
6600 			in6_sin6_2_sin(&sin, sin6);
6601 			addr_touse = (struct sockaddr *)&sin;
6602 		}
6603 #endif
6604 	}
6605 #endif
6606 #ifdef INET
6607 	if (sa->sa_family == AF_INET) {
6608 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6609 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6610 			*error = EINVAL;
6611 			return;
6612 		}
6613 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6614 		    SCTP_IPV6_V6ONLY(inp)) {
6615 			/* can't bind v4 on PF_INET sockets */
6616 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6617 			*error = EINVAL;
6618 			return;
6619 		}
6620 	}
6621 #endif
6622 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6623 		if (p == NULL) {
6624 			/* Can't get proc for Net/Open BSD */
6625 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626 			*error = EINVAL;
6627 			return;
6628 		}
6629 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6630 		return;
6631 	}
6632 	/*
6633 	 * No locks required here since bind and mgmt_ep_sa all do their own
6634 	 * locking. If we do something for the FIX: below we may need to
6635 	 * lock in that case.
6636 	 */
6637 	if (assoc_id == 0) {
6638 		/* add the address */
6639 		struct sctp_inpcb *lep;
6640 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6641 
6642 		/* validate the incoming port */
6643 		if ((lsin->sin_port != 0) &&
6644 		    (lsin->sin_port != inp->sctp_lport)) {
6645 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6646 			*error = EINVAL;
6647 			return;
6648 		} else {
6649 			/* user specified 0 port, set it to existing port */
6650 			lsin->sin_port = inp->sctp_lport;
6651 		}
6652 
6653 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6654 		if (lep != NULL) {
6655 			/*
6656 			 * We must decrement the refcount since we have the
6657 			 * ep already and are binding. No remove going on
6658 			 * here.
6659 			 */
6660 			SCTP_INP_DECR_REF(lep);
6661 		}
6662 		if (lep == inp) {
6663 			/* already bound to it.. ok */
6664 			return;
6665 		} else if (lep == NULL) {
6666 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6667 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6668 			    SCTP_ADD_IP_ADDRESS,
6669 			    vrf_id, NULL);
6670 		} else {
6671 			*error = EADDRINUSE;
6672 		}
6673 		if (*error)
6674 			return;
6675 	} else {
6676 		/*
6677 		 * FIX: decide whether we allow assoc based bindx
6678 		 */
6679 	}
6680 }
6681 
6682 /*
6683  * sctp_bindx(DELETE) for one address.
6684  * assumes all arguments are valid/checked by caller.
6685  */
6686 void
6687 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6688     struct sockaddr *sa, sctp_assoc_t assoc_id,
6689     uint32_t vrf_id, int *error)
6690 {
6691 	struct sockaddr *addr_touse;
6692 
6693 #if defined(INET) && defined(INET6)
6694 	struct sockaddr_in sin;
6695 
6696 #endif
6697 
6698 	/* see if we're bound all already! */
6699 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6700 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6701 		*error = EINVAL;
6702 		return;
6703 	}
6704 	addr_touse = sa;
6705 #ifdef INET6
6706 	if (sa->sa_family == AF_INET6) {
6707 #ifdef INET
6708 		struct sockaddr_in6 *sin6;
6709 
6710 #endif
6711 
6712 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6713 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6714 			*error = EINVAL;
6715 			return;
6716 		}
6717 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6718 			/* can only bind v6 on PF_INET6 sockets */
6719 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6720 			*error = EINVAL;
6721 			return;
6722 		}
6723 #ifdef INET
6724 		sin6 = (struct sockaddr_in6 *)addr_touse;
6725 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6726 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6727 			    SCTP_IPV6_V6ONLY(inp)) {
6728 				/* can't bind mapped-v4 on PF_INET sockets */
6729 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6730 				*error = EINVAL;
6731 				return;
6732 			}
6733 			in6_sin6_2_sin(&sin, sin6);
6734 			addr_touse = (struct sockaddr *)&sin;
6735 		}
6736 #endif
6737 	}
6738 #endif
6739 #ifdef INET
6740 	if (sa->sa_family == AF_INET) {
6741 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6742 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6743 			*error = EINVAL;
6744 			return;
6745 		}
6746 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6747 		    SCTP_IPV6_V6ONLY(inp)) {
6748 			/* can't bind v4 on PF_INET sockets */
6749 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6750 			*error = EINVAL;
6751 			return;
6752 		}
6753 	}
6754 #endif
6755 	/*
6756 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6757 	 * below is ever changed we may need to lock before calling
6758 	 * association level binding.
6759 	 */
6760 	if (assoc_id == 0) {
6761 		/* delete the address */
6762 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6763 		    SCTP_DEL_IP_ADDRESS,
6764 		    vrf_id, NULL);
6765 	} else {
6766 		/*
6767 		 * FIX: decide whether we allow assoc based bindx
6768 		 */
6769 	}
6770 }
6771 
6772 /*
6773  * returns the valid local address count for an assoc, taking into account
6774  * all scoping rules
6775  */
6776 int
6777 sctp_local_addr_count(struct sctp_tcb *stcb)
6778 {
6779 	int loopback_scope;
6780 
6781 #if defined(INET)
6782 	int ipv4_local_scope, ipv4_addr_legal;
6783 
6784 #endif
6785 #if defined (INET6)
6786 	int local_scope, site_scope, ipv6_addr_legal;
6787 
6788 #endif
6789 	struct sctp_vrf *vrf;
6790 	struct sctp_ifn *sctp_ifn;
6791 	struct sctp_ifa *sctp_ifa;
6792 	int count = 0;
6793 
6794 	/* Turn on all the appropriate scopes */
6795 	loopback_scope = stcb->asoc.scope.loopback_scope;
6796 #if defined(INET)
6797 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6798 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6799 #endif
6800 #if defined(INET6)
6801 	local_scope = stcb->asoc.scope.local_scope;
6802 	site_scope = stcb->asoc.scope.site_scope;
6803 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6804 #endif
6805 	SCTP_IPI_ADDR_RLOCK();
6806 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6807 	if (vrf == NULL) {
6808 		/* no vrf, no addresses */
6809 		SCTP_IPI_ADDR_RUNLOCK();
6810 		return (0);
6811 	}
6812 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6813 		/*
6814 		 * bound all case: go through all ifns on the vrf
6815 		 */
6816 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6817 			if ((loopback_scope == 0) &&
6818 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6819 				continue;
6820 			}
6821 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6822 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6823 					continue;
6824 				switch (sctp_ifa->address.sa.sa_family) {
6825 #ifdef INET
6826 				case AF_INET:
6827 					if (ipv4_addr_legal) {
6828 						struct sockaddr_in *sin;
6829 
6830 						sin = &sctp_ifa->address.sin;
6831 						if (sin->sin_addr.s_addr == 0) {
6832 							/*
6833 							 * skip unspecified
6834 							 * addrs
6835 							 */
6836 							continue;
6837 						}
6838 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6839 						    &sin->sin_addr) != 0) {
6840 							continue;
6841 						}
6842 						if ((ipv4_local_scope == 0) &&
6843 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6844 							continue;
6845 						}
6846 						/* count this one */
6847 						count++;
6848 					} else {
6849 						continue;
6850 					}
6851 					break;
6852 #endif
6853 #ifdef INET6
6854 				case AF_INET6:
6855 					if (ipv6_addr_legal) {
6856 						struct sockaddr_in6 *sin6;
6857 
6858 						sin6 = &sctp_ifa->address.sin6;
6859 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6860 							continue;
6861 						}
6862 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6863 						    &sin6->sin6_addr) != 0) {
6864 							continue;
6865 						}
6866 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6867 							if (local_scope == 0)
6868 								continue;
6869 							if (sin6->sin6_scope_id == 0) {
6870 								if (sa6_recoverscope(sin6) != 0)
6871 									/*
6872 									 *
6873 									 * bad
6874 									 *
6875 									 * li
6876 									 * nk
6877 									 *
6878 									 * loc
6879 									 * al
6880 									 *
6881 									 * add
6882 									 * re
6883 									 * ss
6884 									 * */
6885 									continue;
6886 							}
6887 						}
6888 						if ((site_scope == 0) &&
6889 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6890 							continue;
6891 						}
6892 						/* count this one */
6893 						count++;
6894 					}
6895 					break;
6896 #endif
6897 				default:
6898 					/* TSNH */
6899 					break;
6900 				}
6901 			}
6902 		}
6903 	} else {
6904 		/*
6905 		 * subset bound case
6906 		 */
6907 		struct sctp_laddr *laddr;
6908 
6909 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6910 		    sctp_nxt_addr) {
6911 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6912 				continue;
6913 			}
6914 			/* count this one */
6915 			count++;
6916 		}
6917 	}
6918 	SCTP_IPI_ADDR_RUNLOCK();
6919 	return (count);
6920 }
6921 
6922 #if defined(SCTP_LOCAL_TRACE_BUF)
6923 
6924 void
6925 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6926 {
6927 	uint32_t saveindex, newindex;
6928 
6929 	do {
6930 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6931 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6932 			newindex = 1;
6933 		} else {
6934 			newindex = saveindex + 1;
6935 		}
6936 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6937 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6938 		saveindex = 0;
6939 	}
6940 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6941 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6942 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6943 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6944 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6945 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6946 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6947 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6948 }
6949 
6950 #endif
6951 static void
6952 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6953     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6954 {
6955 	struct ip *iph;
6956 
6957 #ifdef INET6
6958 	struct ip6_hdr *ip6;
6959 
6960 #endif
6961 	struct mbuf *sp, *last;
6962 	struct udphdr *uhdr;
6963 	uint16_t port;
6964 
6965 	if ((m->m_flags & M_PKTHDR) == 0) {
6966 		/* Can't handle one that is not a pkt hdr */
6967 		goto out;
6968 	}
6969 	/* Pull the src port */
6970 	iph = mtod(m, struct ip *);
6971 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6972 	port = uhdr->uh_sport;
6973 	/*
6974 	 * Split out the mbuf chain. Leave the IP header in m, place the
6975 	 * rest in the sp.
6976 	 */
6977 	sp = m_split(m, off, M_NOWAIT);
6978 	if (sp == NULL) {
6979 		/* Gak, drop packet, we can't do a split */
6980 		goto out;
6981 	}
6982 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6983 		/* Gak, packet can't have an SCTP header in it - too small */
6984 		m_freem(sp);
6985 		goto out;
6986 	}
6987 	/* Now pull up the UDP header and SCTP header together */
6988 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6989 	if (sp == NULL) {
6990 		/* Gak pullup failed */
6991 		goto out;
6992 	}
6993 	/* Trim out the UDP header */
6994 	m_adj(sp, sizeof(struct udphdr));
6995 
6996 	/* Now reconstruct the mbuf chain */
6997 	for (last = m; last->m_next; last = last->m_next);
6998 	last->m_next = sp;
6999 	m->m_pkthdr.len += sp->m_pkthdr.len;
7000 	/*
7001 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
7002 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
7003 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
7004 	 * SCTP checksum. Therefore, clear the bit.
7005 	 */
7006 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7007 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7008 	    m->m_pkthdr.len,
7009 	    if_name(m->m_pkthdr.rcvif),
7010 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7011 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7012 	iph = mtod(m, struct ip *);
7013 	switch (iph->ip_v) {
7014 #ifdef INET
7015 	case IPVERSION:
7016 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7017 		sctp_input_with_port(m, off, port);
7018 		break;
7019 #endif
7020 #ifdef INET6
7021 	case IPV6_VERSION >> 4:
7022 		ip6 = mtod(m, struct ip6_hdr *);
7023 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7024 		sctp6_input_with_port(&m, &off, port);
7025 		break;
7026 #endif
7027 	default:
7028 		goto out;
7029 		break;
7030 	}
7031 	return;
7032 out:
7033 	m_freem(m);
7034 }
7035 
7036 void
7037 sctp_over_udp_stop(void)
7038 {
7039 	/*
7040 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7041 	 * for writting!
7042 	 */
7043 #ifdef INET
7044 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7045 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7046 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7047 	}
7048 #endif
7049 #ifdef INET6
7050 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7051 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7052 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7053 	}
7054 #endif
7055 }
7056 
7057 int
7058 sctp_over_udp_start(void)
7059 {
7060 	uint16_t port;
7061 	int ret;
7062 
7063 #ifdef INET
7064 	struct sockaddr_in sin;
7065 
7066 #endif
7067 #ifdef INET6
7068 	struct sockaddr_in6 sin6;
7069 
7070 #endif
7071 	/*
7072 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7073 	 * for writting!
7074 	 */
7075 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7076 	if (ntohs(port) == 0) {
7077 		/* Must have a port set */
7078 		return (EINVAL);
7079 	}
7080 #ifdef INET
7081 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7082 		/* Already running -- must stop first */
7083 		return (EALREADY);
7084 	}
7085 #endif
7086 #ifdef INET6
7087 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7088 		/* Already running -- must stop first */
7089 		return (EALREADY);
7090 	}
7091 #endif
7092 #ifdef INET
7093 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7094 	    SOCK_DGRAM, IPPROTO_UDP,
7095 	    curthread->td_ucred, curthread))) {
7096 		sctp_over_udp_stop();
7097 		return (ret);
7098 	}
7099 	/* Call the special UDP hook. */
7100 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7101 	    sctp_recv_udp_tunneled_packet, NULL))) {
7102 		sctp_over_udp_stop();
7103 		return (ret);
7104 	}
7105 	/* Ok, we have a socket, bind it to the port. */
7106 	memset(&sin, 0, sizeof(struct sockaddr_in));
7107 	sin.sin_len = sizeof(struct sockaddr_in);
7108 	sin.sin_family = AF_INET;
7109 	sin.sin_port = htons(port);
7110 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7111 	    (struct sockaddr *)&sin, curthread))) {
7112 		sctp_over_udp_stop();
7113 		return (ret);
7114 	}
7115 #endif
7116 #ifdef INET6
7117 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7118 	    SOCK_DGRAM, IPPROTO_UDP,
7119 	    curthread->td_ucred, curthread))) {
7120 		sctp_over_udp_stop();
7121 		return (ret);
7122 	}
7123 	/* Call the special UDP hook. */
7124 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7125 	    sctp_recv_udp_tunneled_packet, NULL))) {
7126 		sctp_over_udp_stop();
7127 		return (ret);
7128 	}
7129 	/* Ok, we have a socket, bind it to the port. */
7130 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7131 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7132 	sin6.sin6_family = AF_INET6;
7133 	sin6.sin6_port = htons(port);
7134 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7135 	    (struct sockaddr *)&sin6, curthread))) {
7136 		sctp_over_udp_stop();
7137 		return (ret);
7138 	}
7139 #endif
7140 	return (0);
7141 }
7142