xref: /freebsd/sys/netinet/sctputil.c (revision d9f0ce31900a48d1a2bfc1c8c86f79d1e831451a)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/udp.h>
53 #include <netinet/udp_var.h>
54 #include <sys/proc.h>
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 extern const struct sctp_cc_functions sctp_cc_functions[];
62 extern const struct sctp_ss_functions sctp_ss_functions[];
63 
64 void
65 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66 {
67 	struct sctp_cwnd_log sctp_clog;
68 
69 	sctp_clog.x.sb.stcb = stcb;
70 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71 	if (stcb)
72 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73 	else
74 		sctp_clog.x.sb.stcb_sbcc = 0;
75 	sctp_clog.x.sb.incr = incr;
76 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77 	    SCTP_LOG_EVENT_SB,
78 	    from,
79 	    sctp_clog.x.misc.log1,
80 	    sctp_clog.x.misc.log2,
81 	    sctp_clog.x.misc.log3,
82 	    sctp_clog.x.misc.log4);
83 }
84 
85 void
86 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87 {
88 	struct sctp_cwnd_log sctp_clog;
89 
90 	sctp_clog.x.close.inp = (void *)inp;
91 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92 	if (stcb) {
93 		sctp_clog.x.close.stcb = (void *)stcb;
94 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95 	} else {
96 		sctp_clog.x.close.stcb = 0;
97 		sctp_clog.x.close.state = 0;
98 	}
99 	sctp_clog.x.close.loc = loc;
100 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101 	    SCTP_LOG_EVENT_CLOSE,
102 	    0,
103 	    sctp_clog.x.misc.log1,
104 	    sctp_clog.x.misc.log2,
105 	    sctp_clog.x.misc.log3,
106 	    sctp_clog.x.misc.log4);
107 }
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204 {
205 	struct sctp_cwnd_log sctp_clog;
206 
207 	memset(&sctp_clog, 0, sizeof(sctp_clog));
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 }
219 
220 #ifdef SCTP_MBUF_LOGGING
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_mbc(struct mbuf *m, int from)
248 {
249 	struct mbuf *mat;
250 
251 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252 		sctp_log_mb(mat, from);
253 	}
254 }
255 
256 #endif
257 
258 void
259 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260 {
261 	struct sctp_cwnd_log sctp_clog;
262 
263 	if (control == NULL) {
264 		SCTP_PRINTF("Gak log of NULL?\n");
265 		return;
266 	}
267 	sctp_clog.x.strlog.stcb = control->stcb;
268 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270 	sctp_clog.x.strlog.strm = control->sinfo_stream;
271 	if (poschk != NULL) {
272 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274 	} else {
275 		sctp_clog.x.strlog.e_tsn = 0;
276 		sctp_clog.x.strlog.e_sseq = 0;
277 	}
278 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279 	    SCTP_LOG_EVENT_STRM,
280 	    from,
281 	    sctp_clog.x.misc.log1,
282 	    sctp_clog.x.misc.log2,
283 	    sctp_clog.x.misc.log3,
284 	    sctp_clog.x.misc.log4);
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp && (inp->sctp_socket)) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 }
365 
366 void
367 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368 {
369 	struct sctp_cwnd_log sctp_clog;
370 
371 	memset(&sctp_clog, 0, sizeof(sctp_clog));
372 	sctp_clog.x.cwnd.net = net;
373 	sctp_clog.x.cwnd.cwnd_new_value = error;
374 	sctp_clog.x.cwnd.inflight = net->flight_size;
375 	sctp_clog.x.cwnd.cwnd_augment = burst;
376 	if (stcb->asoc.send_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_send = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380 	if (stcb->asoc.stream_queue_cnt > 255)
381 		sctp_clog.x.cwnd.cnt_in_str = 255;
382 	else
383 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385 	    SCTP_LOG_EVENT_MAXBURST,
386 	    from,
387 	    sctp_clog.x.misc.log1,
388 	    sctp_clog.x.misc.log2,
389 	    sctp_clog.x.misc.log3,
390 	    sctp_clog.x.misc.log4);
391 }
392 
393 void
394 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395 {
396 	struct sctp_cwnd_log sctp_clog;
397 
398 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399 	sctp_clog.x.rwnd.send_size = snd_size;
400 	sctp_clog.x.rwnd.overhead = overhead;
401 	sctp_clog.x.rwnd.new_rwnd = 0;
402 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403 	    SCTP_LOG_EVENT_RWND,
404 	    from,
405 	    sctp_clog.x.misc.log1,
406 	    sctp_clog.x.misc.log2,
407 	    sctp_clog.x.misc.log3,
408 	    sctp_clog.x.misc.log4);
409 }
410 
411 void
412 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413 {
414 	struct sctp_cwnd_log sctp_clog;
415 
416 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417 	sctp_clog.x.rwnd.send_size = flight_size;
418 	sctp_clog.x.rwnd.overhead = overhead;
419 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421 	    SCTP_LOG_EVENT_RWND,
422 	    from,
423 	    sctp_clog.x.misc.log1,
424 	    sctp_clog.x.misc.log2,
425 	    sctp_clog.x.misc.log3,
426 	    sctp_clog.x.misc.log4);
427 }
428 
429 #ifdef SCTP_MBCNT_LOGGING
430 static void
431 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432 {
433 	struct sctp_cwnd_log sctp_clog;
434 
435 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436 	sctp_clog.x.mbcnt.size_change = book;
437 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_EVENT_MBCNT,
441 	    from,
442 	    sctp_clog.x.misc.log1,
443 	    sctp_clog.x.misc.log2,
444 	    sctp_clog.x.misc.log3,
445 	    sctp_clog.x.misc.log4);
446 }
447 
448 #endif
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 }
504 
505 void
506 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
507 {
508 	struct sctp_cwnd_log sctp_clog;
509 
510 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516 	sctp_clog.x.blk.sndlen = (uint32_t) sendlen;
517 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518 	    SCTP_LOG_EVENT_BLOCK,
519 	    from,
520 	    sctp_clog.x.misc.log1,
521 	    sctp_clog.x.misc.log2,
522 	    sctp_clog.x.misc.log3,
523 	    sctp_clog.x.misc.log4);
524 }
525 
526 int
527 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528 {
529 	/* May need to fix this if ktrdump does not work */
530 	return (0);
531 }
532 
533 #ifdef SCTP_AUDITING_ENABLED
534 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535 static int sctp_audit_indx = 0;
536 
537 static
538 void
539 sctp_print_audit_report(void)
540 {
541 	int i;
542 	int cnt;
543 
544 	cnt = 0;
545 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546 		if ((sctp_audit_data[i][0] == 0xe0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if (sctp_audit_data[i][0] == 0xf0) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554 		    (sctp_audit_data[i][1] == 0x01)) {
555 			SCTP_PRINTF("\n");
556 			cnt = 0;
557 		}
558 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559 		    (uint32_t) sctp_audit_data[i][1]);
560 		cnt++;
561 		if ((cnt % 14) == 0)
562 			SCTP_PRINTF("\n");
563 	}
564 	for (i = 0; i < sctp_audit_indx; i++) {
565 		if ((sctp_audit_data[i][0] == 0xe0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if (sctp_audit_data[i][0] == 0xf0) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573 		    (sctp_audit_data[i][1] == 0x01)) {
574 			SCTP_PRINTF("\n");
575 			cnt = 0;
576 		}
577 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578 		    (uint32_t) sctp_audit_data[i][1]);
579 		cnt++;
580 		if ((cnt % 14) == 0)
581 			SCTP_PRINTF("\n");
582 	}
583 	SCTP_PRINTF("\n");
584 }
585 
586 void
587 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588     struct sctp_nets *net)
589 {
590 	int resend_cnt, tot_out, rep, tot_book_cnt;
591 	struct sctp_nets *lnet;
592 	struct sctp_tmit_chunk *chk;
593 
594 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596 	sctp_audit_indx++;
597 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 		sctp_audit_indx = 0;
599 	}
600 	if (inp == NULL) {
601 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603 		sctp_audit_indx++;
604 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605 			sctp_audit_indx = 0;
606 		}
607 		return;
608 	}
609 	if (stcb == NULL) {
610 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612 		sctp_audit_indx++;
613 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614 			sctp_audit_indx = 0;
615 		}
616 		return;
617 	}
618 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619 	sctp_audit_data[sctp_audit_indx][1] =
620 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621 	sctp_audit_indx++;
622 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623 		sctp_audit_indx = 0;
624 	}
625 	rep = 0;
626 	tot_book_cnt = 0;
627 	resend_cnt = tot_out = 0;
628 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630 			resend_cnt++;
631 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632 			tot_out += chk->book_size;
633 			tot_book_cnt++;
634 		}
635 	}
636 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645 		rep = 1;
646 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648 		sctp_audit_data[sctp_audit_indx][1] =
649 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 	}
655 	if (tot_out != stcb->asoc.total_flight) {
656 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658 		sctp_audit_indx++;
659 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660 			sctp_audit_indx = 0;
661 		}
662 		rep = 1;
663 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664 		    (int)stcb->asoc.total_flight);
665 		stcb->asoc.total_flight = tot_out;
666 	}
667 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676 
677 		stcb->asoc.total_flight_count = tot_book_cnt;
678 	}
679 	tot_out = 0;
680 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 		tot_out += lnet->flight_size;
682 	}
683 	if (tot_out != stcb->asoc.total_flight) {
684 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686 		sctp_audit_indx++;
687 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688 			sctp_audit_indx = 0;
689 		}
690 		rep = 1;
691 		SCTP_PRINTF("real flight:%d net total was %d\n",
692 		    stcb->asoc.total_flight, tot_out);
693 		/* now corrective action */
694 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695 
696 			tot_out = 0;
697 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698 				if ((chk->whoTo == lnet) &&
699 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700 					tot_out += chk->book_size;
701 				}
702 			}
703 			if (lnet->flight_size != tot_out) {
704 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705 				    (void *)lnet, lnet->flight_size,
706 				    tot_out);
707 				lnet->flight_size = tot_out;
708 			}
709 		}
710 	}
711 	if (rep) {
712 		sctp_print_audit_report();
713 	}
714 }
715 
716 void
717 sctp_audit_log(uint8_t ev, uint8_t fd)
718 {
719 
720 	sctp_audit_data[sctp_audit_indx][0] = ev;
721 	sctp_audit_data[sctp_audit_indx][1] = fd;
722 	sctp_audit_indx++;
723 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724 		sctp_audit_indx = 0;
725 	}
726 }
727 
728 #endif
729 
730 /*
731  * sctp_stop_timers_for_shutdown() should be called
732  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733  * state to make sure that all timers are stopped.
734  */
735 void
736 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737 {
738 	struct sctp_association *asoc;
739 	struct sctp_nets *net;
740 
741 	asoc = &stcb->asoc;
742 
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876 {
877 	uint32_t x;
878 	struct timeval now;
879 
880 	if (check) {
881 		(void)SCTP_GETTIME_TIMEVAL(&now);
882 	}
883 	for (;;) {
884 		x = sctp_select_initial_TSN(&inp->sctp_ep);
885 		if (x == 0) {
886 			/* we never use 0 */
887 			continue;
888 		}
889 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890 			break;
891 		}
892 	}
893 	return (x);
894 }
895 
896 int32_t
897 sctp_map_assoc_state(int kernel_state)
898 {
899 	int32_t user_state;
900 
901 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
902 		user_state = SCTP_CLOSED;
903 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
904 		user_state = SCTP_SHUTDOWN_PENDING;
905 	} else {
906 		switch (kernel_state & SCTP_STATE_MASK) {
907 		case SCTP_STATE_EMPTY:
908 			user_state = SCTP_CLOSED;
909 			break;
910 		case SCTP_STATE_INUSE:
911 			user_state = SCTP_CLOSED;
912 			break;
913 		case SCTP_STATE_COOKIE_WAIT:
914 			user_state = SCTP_COOKIE_WAIT;
915 			break;
916 		case SCTP_STATE_COOKIE_ECHOED:
917 			user_state = SCTP_COOKIE_ECHOED;
918 			break;
919 		case SCTP_STATE_OPEN:
920 			user_state = SCTP_ESTABLISHED;
921 			break;
922 		case SCTP_STATE_SHUTDOWN_SENT:
923 			user_state = SCTP_SHUTDOWN_SENT;
924 			break;
925 		case SCTP_STATE_SHUTDOWN_RECEIVED:
926 			user_state = SCTP_SHUTDOWN_RECEIVED;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
929 			user_state = SCTP_SHUTDOWN_ACK_SENT;
930 			break;
931 		default:
932 			user_state = SCTP_CLOSED;
933 			break;
934 		}
935 	}
936 	return (user_state);
937 }
938 
939 int
940 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
941     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
942 {
943 	struct sctp_association *asoc;
944 
945 	/*
946 	 * Anything set to zero is taken care of by the allocation routine's
947 	 * bzero
948 	 */
949 
950 	/*
951 	 * Up front select what scoping to apply on addresses I tell my peer
952 	 * Not sure what to do with these right now, we will need to come up
953 	 * with a way to set them. We may need to pass them through from the
954 	 * caller in the sctp_aloc_assoc() function.
955 	 */
956 	int i;
957 
958 #if defined(SCTP_DETAILED_STR_STATS)
959 	int j;
960 
961 #endif
962 
963 	asoc = &stcb->asoc;
964 	/* init all variables to a known value. */
965 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
966 	asoc->max_burst = inp->sctp_ep.max_burst;
967 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
968 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
969 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
970 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
971 	asoc->ecn_supported = inp->ecn_supported;
972 	asoc->prsctp_supported = inp->prsctp_supported;
973 	asoc->idata_supported = inp->idata_supported;
974 	asoc->auth_supported = inp->auth_supported;
975 	asoc->asconf_supported = inp->asconf_supported;
976 	asoc->reconfig_supported = inp->reconfig_supported;
977 	asoc->nrsack_supported = inp->nrsack_supported;
978 	asoc->pktdrop_supported = inp->pktdrop_supported;
979 	asoc->idata_supported = inp->idata_supported;
980 	asoc->sctp_cmt_pf = (uint8_t) 0;
981 	asoc->sctp_frag_point = inp->sctp_frag_point;
982 	asoc->sctp_features = inp->sctp_features;
983 	asoc->default_dscp = inp->sctp_ep.default_dscp;
984 	asoc->max_cwnd = inp->max_cwnd;
985 #ifdef INET6
986 	if (inp->sctp_ep.default_flowlabel) {
987 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
988 	} else {
989 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
990 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
991 			asoc->default_flowlabel &= 0x000fffff;
992 			asoc->default_flowlabel |= 0x80000000;
993 		} else {
994 			asoc->default_flowlabel = 0;
995 		}
996 	}
997 #endif
998 	asoc->sb_send_resv = 0;
999 	if (override_tag) {
1000 		asoc->my_vtag = override_tag;
1001 	} else {
1002 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1003 	}
1004 	/* Get the nonce tags */
1005 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1006 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1007 	asoc->vrf_id = vrf_id;
1008 
1009 #ifdef SCTP_ASOCLOG_OF_TSNS
1010 	asoc->tsn_in_at = 0;
1011 	asoc->tsn_out_at = 0;
1012 	asoc->tsn_in_wrapped = 0;
1013 	asoc->tsn_out_wrapped = 0;
1014 	asoc->cumack_log_at = 0;
1015 	asoc->cumack_log_atsnt = 0;
1016 #endif
1017 #ifdef SCTP_FS_SPEC_LOG
1018 	asoc->fs_index = 0;
1019 #endif
1020 	asoc->refcnt = 0;
1021 	asoc->assoc_up_sent = 0;
1022 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1023 	    sctp_select_initial_TSN(&inp->sctp_ep);
1024 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1025 	/* we are optimisitic here */
1026 	asoc->peer_supports_nat = 0;
1027 	asoc->sent_queue_retran_cnt = 0;
1028 
1029 	/* for CMT */
1030 	asoc->last_net_cmt_send_started = NULL;
1031 
1032 	/* This will need to be adjusted */
1033 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1034 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1035 	asoc->asconf_seq_in = asoc->last_acked_seq;
1036 
1037 	/* here we are different, we hold the next one we expect */
1038 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1039 
1040 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1041 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1042 
1043 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1044 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1045 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1046 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1047 	asoc->free_chunk_cnt = 0;
1048 
1049 	asoc->iam_blocking = 0;
1050 	asoc->context = inp->sctp_context;
1051 	asoc->local_strreset_support = inp->local_strreset_support;
1052 	asoc->def_send = inp->def_send;
1053 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1054 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1055 	asoc->pr_sctp_cnt = 0;
1056 	asoc->total_output_queue_size = 0;
1057 
1058 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1059 		asoc->scope.ipv6_addr_legal = 1;
1060 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1061 			asoc->scope.ipv4_addr_legal = 1;
1062 		} else {
1063 			asoc->scope.ipv4_addr_legal = 0;
1064 		}
1065 	} else {
1066 		asoc->scope.ipv6_addr_legal = 0;
1067 		asoc->scope.ipv4_addr_legal = 1;
1068 	}
1069 
1070 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1071 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1072 
1073 	asoc->smallest_mtu = inp->sctp_frag_point;
1074 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1075 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1076 
1077 	asoc->locked_on_sending = NULL;
1078 	asoc->stream_locked_on = 0;
1079 	asoc->ecn_echo_cnt_onq = 0;
1080 	asoc->stream_locked = 0;
1081 
1082 	asoc->send_sack = 1;
1083 
1084 	LIST_INIT(&asoc->sctp_restricted_addrs);
1085 
1086 	TAILQ_INIT(&asoc->nets);
1087 	TAILQ_INIT(&asoc->pending_reply_queue);
1088 	TAILQ_INIT(&asoc->asconf_ack_sent);
1089 	/* Setup to fill the hb random cache at first HB */
1090 	asoc->hb_random_idx = 4;
1091 
1092 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1093 
1094 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1095 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1096 
1097 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1098 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1099 
1100 	/*
1101 	 * Now the stream parameters, here we allocate space for all streams
1102 	 * that we request by default.
1103 	 */
1104 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1105 	    o_strms;
1106 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1107 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1108 	    SCTP_M_STRMO);
1109 	if (asoc->strmout == NULL) {
1110 		/* big trouble no memory */
1111 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1112 		return (ENOMEM);
1113 	}
1114 	for (i = 0; i < asoc->streamoutcnt; i++) {
1115 		/*
1116 		 * inbound side must be set to 0xffff, also NOTE when we get
1117 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1118 		 * count (streamoutcnt) but first check if we sent to any of
1119 		 * the upper streams that were dropped (if some were). Those
1120 		 * that were dropped must be notified to the upper layer as
1121 		 * failed to send.
1122 		 */
1123 		asoc->strmout[i].next_sequence_send = 0x0;
1124 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1125 		asoc->strmout[i].chunks_on_queues = 0;
1126 #if defined(SCTP_DETAILED_STR_STATS)
1127 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1128 			asoc->strmout[i].abandoned_sent[j] = 0;
1129 			asoc->strmout[i].abandoned_unsent[j] = 0;
1130 		}
1131 #else
1132 		asoc->strmout[i].abandoned_sent[0] = 0;
1133 		asoc->strmout[i].abandoned_unsent[0] = 0;
1134 #endif
1135 		asoc->strmout[i].stream_no = i;
1136 		asoc->strmout[i].last_msg_incomplete = 0;
1137 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1138 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1139 	}
1140 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1141 
1142 	/* Now the mapping array */
1143 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1144 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1145 	    SCTP_M_MAP);
1146 	if (asoc->mapping_array == NULL) {
1147 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1152 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1153 	    SCTP_M_MAP);
1154 	if (asoc->nr_mapping_array == NULL) {
1155 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1156 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1157 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1158 		return (ENOMEM);
1159 	}
1160 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1161 
1162 	/* Now the init of the other outqueues */
1163 	TAILQ_INIT(&asoc->free_chunks);
1164 	TAILQ_INIT(&asoc->control_send_queue);
1165 	TAILQ_INIT(&asoc->asconf_send_queue);
1166 	TAILQ_INIT(&asoc->send_queue);
1167 	TAILQ_INIT(&asoc->sent_queue);
1168 	TAILQ_INIT(&asoc->resetHead);
1169 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1170 	TAILQ_INIT(&asoc->asconf_queue);
1171 	/* authentication fields */
1172 	asoc->authinfo.random = NULL;
1173 	asoc->authinfo.active_keyid = 0;
1174 	asoc->authinfo.assoc_key = NULL;
1175 	asoc->authinfo.assoc_keyid = 0;
1176 	asoc->authinfo.recv_key = NULL;
1177 	asoc->authinfo.recv_keyid = 0;
1178 	LIST_INIT(&asoc->shared_keys);
1179 	asoc->marked_retrans = 0;
1180 	asoc->port = inp->sctp_ep.port;
1181 	asoc->timoinit = 0;
1182 	asoc->timodata = 0;
1183 	asoc->timosack = 0;
1184 	asoc->timoshutdown = 0;
1185 	asoc->timoheartbeat = 0;
1186 	asoc->timocookie = 0;
1187 	asoc->timoshutdownack = 0;
1188 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1189 	asoc->discontinuity_time = asoc->start_time;
1190 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1191 		asoc->abandoned_unsent[i] = 0;
1192 		asoc->abandoned_sent[i] = 0;
1193 	}
1194 	/*
1195 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1196 	 * freed later when the association is freed.
1197 	 */
1198 	return (0);
1199 }
1200 
1201 void
1202 sctp_print_mapping_array(struct sctp_association *asoc)
1203 {
1204 	unsigned int i, limit;
1205 
1206 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1207 	    asoc->mapping_array_size,
1208 	    asoc->mapping_array_base_tsn,
1209 	    asoc->cumulative_tsn,
1210 	    asoc->highest_tsn_inside_map,
1211 	    asoc->highest_tsn_inside_nr_map);
1212 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1213 		if (asoc->mapping_array[limit - 1] != 0) {
1214 			break;
1215 		}
1216 	}
1217 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1218 	for (i = 0; i < limit; i++) {
1219 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1220 	}
1221 	if (limit % 16)
1222 		SCTP_PRINTF("\n");
1223 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1224 		if (asoc->nr_mapping_array[limit - 1]) {
1225 			break;
1226 		}
1227 	}
1228 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1229 	for (i = 0; i < limit; i++) {
1230 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1231 	}
1232 	if (limit % 16)
1233 		SCTP_PRINTF("\n");
1234 }
1235 
1236 int
1237 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1238 {
1239 	/* mapping array needs to grow */
1240 	uint8_t *new_array1, *new_array2;
1241 	uint32_t new_size;
1242 
1243 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1244 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1245 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1246 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1247 		/* can't get more, forget it */
1248 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1249 		if (new_array1) {
1250 			SCTP_FREE(new_array1, SCTP_M_MAP);
1251 		}
1252 		if (new_array2) {
1253 			SCTP_FREE(new_array2, SCTP_M_MAP);
1254 		}
1255 		return (-1);
1256 	}
1257 	memset(new_array1, 0, new_size);
1258 	memset(new_array2, 0, new_size);
1259 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1260 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1261 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1262 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1263 	asoc->mapping_array = new_array1;
1264 	asoc->nr_mapping_array = new_array2;
1265 	asoc->mapping_array_size = new_size;
1266 	return (0);
1267 }
1268 
1269 
1270 static void
1271 sctp_iterator_work(struct sctp_iterator *it)
1272 {
1273 	int iteration_count = 0;
1274 	int inp_skip = 0;
1275 	int first_in = 1;
1276 	struct sctp_inpcb *tinp;
1277 
1278 	SCTP_INP_INFO_RLOCK();
1279 	SCTP_ITERATOR_LOCK();
1280 	if (it->inp) {
1281 		SCTP_INP_RLOCK(it->inp);
1282 		SCTP_INP_DECR_REF(it->inp);
1283 	}
1284 	if (it->inp == NULL) {
1285 		/* iterator is complete */
1286 done_with_iterator:
1287 		SCTP_ITERATOR_UNLOCK();
1288 		SCTP_INP_INFO_RUNLOCK();
1289 		if (it->function_atend != NULL) {
1290 			(*it->function_atend) (it->pointer, it->val);
1291 		}
1292 		SCTP_FREE(it, SCTP_M_ITER);
1293 		return;
1294 	}
1295 select_a_new_ep:
1296 	if (first_in) {
1297 		first_in = 0;
1298 	} else {
1299 		SCTP_INP_RLOCK(it->inp);
1300 	}
1301 	while (((it->pcb_flags) &&
1302 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1303 	    ((it->pcb_features) &&
1304 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1305 		/* endpoint flags or features don't match, so keep looking */
1306 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1307 			SCTP_INP_RUNLOCK(it->inp);
1308 			goto done_with_iterator;
1309 		}
1310 		tinp = it->inp;
1311 		it->inp = LIST_NEXT(it->inp, sctp_list);
1312 		SCTP_INP_RUNLOCK(tinp);
1313 		if (it->inp == NULL) {
1314 			goto done_with_iterator;
1315 		}
1316 		SCTP_INP_RLOCK(it->inp);
1317 	}
1318 	/* now go through each assoc which is in the desired state */
1319 	if (it->done_current_ep == 0) {
1320 		if (it->function_inp != NULL)
1321 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1322 		it->done_current_ep = 1;
1323 	}
1324 	if (it->stcb == NULL) {
1325 		/* run the per instance function */
1326 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1327 	}
1328 	if ((inp_skip) || it->stcb == NULL) {
1329 		if (it->function_inp_end != NULL) {
1330 			inp_skip = (*it->function_inp_end) (it->inp,
1331 			    it->pointer,
1332 			    it->val);
1333 		}
1334 		SCTP_INP_RUNLOCK(it->inp);
1335 		goto no_stcb;
1336 	}
1337 	while (it->stcb) {
1338 		SCTP_TCB_LOCK(it->stcb);
1339 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1340 			/* not in the right state... keep looking */
1341 			SCTP_TCB_UNLOCK(it->stcb);
1342 			goto next_assoc;
1343 		}
1344 		/* see if we have limited out the iterator loop */
1345 		iteration_count++;
1346 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1347 			/* Pause to let others grab the lock */
1348 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1349 			SCTP_TCB_UNLOCK(it->stcb);
1350 			SCTP_INP_INCR_REF(it->inp);
1351 			SCTP_INP_RUNLOCK(it->inp);
1352 			SCTP_ITERATOR_UNLOCK();
1353 			SCTP_INP_INFO_RUNLOCK();
1354 			SCTP_INP_INFO_RLOCK();
1355 			SCTP_ITERATOR_LOCK();
1356 			if (sctp_it_ctl.iterator_flags) {
1357 				/* We won't be staying here */
1358 				SCTP_INP_DECR_REF(it->inp);
1359 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1360 				if (sctp_it_ctl.iterator_flags &
1361 				    SCTP_ITERATOR_STOP_CUR_IT) {
1362 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1363 					goto done_with_iterator;
1364 				}
1365 				if (sctp_it_ctl.iterator_flags &
1366 				    SCTP_ITERATOR_STOP_CUR_INP) {
1367 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1368 					goto no_stcb;
1369 				}
1370 				/* If we reach here huh? */
1371 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1372 				    sctp_it_ctl.iterator_flags);
1373 				sctp_it_ctl.iterator_flags = 0;
1374 			}
1375 			SCTP_INP_RLOCK(it->inp);
1376 			SCTP_INP_DECR_REF(it->inp);
1377 			SCTP_TCB_LOCK(it->stcb);
1378 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1379 			iteration_count = 0;
1380 		}
1381 		/* run function on this one */
1382 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1383 
1384 		/*
1385 		 * we lie here, it really needs to have its own type but
1386 		 * first I must verify that this won't effect things :-0
1387 		 */
1388 		if (it->no_chunk_output == 0)
1389 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1390 
1391 		SCTP_TCB_UNLOCK(it->stcb);
1392 next_assoc:
1393 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1394 		if (it->stcb == NULL) {
1395 			/* Run last function */
1396 			if (it->function_inp_end != NULL) {
1397 				inp_skip = (*it->function_inp_end) (it->inp,
1398 				    it->pointer,
1399 				    it->val);
1400 			}
1401 		}
1402 	}
1403 	SCTP_INP_RUNLOCK(it->inp);
1404 no_stcb:
1405 	/* done with all assocs on this endpoint, move on to next endpoint */
1406 	it->done_current_ep = 0;
1407 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1408 		it->inp = NULL;
1409 	} else {
1410 		it->inp = LIST_NEXT(it->inp, sctp_list);
1411 	}
1412 	if (it->inp == NULL) {
1413 		goto done_with_iterator;
1414 	}
1415 	goto select_a_new_ep;
1416 }
1417 
1418 void
1419 sctp_iterator_worker(void)
1420 {
1421 	struct sctp_iterator *it, *nit;
1422 
1423 	/* This function is called with the WQ lock in place */
1424 
1425 	sctp_it_ctl.iterator_running = 1;
1426 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1427 		sctp_it_ctl.cur_it = it;
1428 		/* now lets work on this one */
1429 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1430 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1431 		CURVNET_SET(it->vn);
1432 		sctp_iterator_work(it);
1433 		sctp_it_ctl.cur_it = NULL;
1434 		CURVNET_RESTORE();
1435 		SCTP_IPI_ITERATOR_WQ_LOCK();
1436 		/* sa_ignore FREED_MEMORY */
1437 	}
1438 	sctp_it_ctl.iterator_running = 0;
1439 	return;
1440 }
1441 
1442 
1443 static void
1444 sctp_handle_addr_wq(void)
1445 {
1446 	/* deal with the ADDR wq from the rtsock calls */
1447 	struct sctp_laddr *wi, *nwi;
1448 	struct sctp_asconf_iterator *asc;
1449 
1450 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1451 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1452 	if (asc == NULL) {
1453 		/* Try later, no memory */
1454 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1455 		    (struct sctp_inpcb *)NULL,
1456 		    (struct sctp_tcb *)NULL,
1457 		    (struct sctp_nets *)NULL);
1458 		return;
1459 	}
1460 	LIST_INIT(&asc->list_of_work);
1461 	asc->cnt = 0;
1462 
1463 	SCTP_WQ_ADDR_LOCK();
1464 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1465 		LIST_REMOVE(wi, sctp_nxt_addr);
1466 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1467 		asc->cnt++;
1468 	}
1469 	SCTP_WQ_ADDR_UNLOCK();
1470 
1471 	if (asc->cnt == 0) {
1472 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1473 	} else {
1474 		int ret;
1475 
1476 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1477 		    sctp_asconf_iterator_stcb,
1478 		    NULL,	/* No ep end for boundall */
1479 		    SCTP_PCB_FLAGS_BOUNDALL,
1480 		    SCTP_PCB_ANY_FEATURES,
1481 		    SCTP_ASOC_ANY_STATE,
1482 		    (void *)asc, 0,
1483 		    sctp_asconf_iterator_end, NULL, 0);
1484 		if (ret) {
1485 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1486 			/*
1487 			 * Freeing if we are stopping or put back on the
1488 			 * addr_wq.
1489 			 */
1490 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1491 				sctp_asconf_iterator_end(asc, 0);
1492 			} else {
1493 				SCTP_WQ_ADDR_LOCK();
1494 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1495 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1496 				}
1497 				SCTP_WQ_ADDR_UNLOCK();
1498 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1499 			}
1500 		}
1501 	}
1502 }
1503 
1504 void
1505 sctp_timeout_handler(void *t)
1506 {
1507 	struct sctp_inpcb *inp;
1508 	struct sctp_tcb *stcb;
1509 	struct sctp_nets *net;
1510 	struct sctp_timer *tmr;
1511 	struct mbuf *op_err;
1512 
1513 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1514 	struct socket *so;
1515 
1516 #endif
1517 	int did_output;
1518 	int type;
1519 
1520 	tmr = (struct sctp_timer *)t;
1521 	inp = (struct sctp_inpcb *)tmr->ep;
1522 	stcb = (struct sctp_tcb *)tmr->tcb;
1523 	net = (struct sctp_nets *)tmr->net;
1524 	CURVNET_SET((struct vnet *)tmr->vnet);
1525 	did_output = 1;
1526 
1527 #ifdef SCTP_AUDITING_ENABLED
1528 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1529 	sctp_auditing(3, inp, stcb, net);
1530 #endif
1531 
1532 	/* sanity checks... */
1533 	if (tmr->self != (void *)tmr) {
1534 		/*
1535 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1536 		 * (void *)tmr);
1537 		 */
1538 		CURVNET_RESTORE();
1539 		return;
1540 	}
1541 	tmr->stopped_from = 0xa001;
1542 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1543 		/*
1544 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1545 		 * tmr->type);
1546 		 */
1547 		CURVNET_RESTORE();
1548 		return;
1549 	}
1550 	tmr->stopped_from = 0xa002;
1551 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1552 		CURVNET_RESTORE();
1553 		return;
1554 	}
1555 	/* if this is an iterator timeout, get the struct and clear inp */
1556 	tmr->stopped_from = 0xa003;
1557 	if (inp) {
1558 		SCTP_INP_INCR_REF(inp);
1559 		if ((inp->sctp_socket == NULL) &&
1560 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1568 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1569 		    ) {
1570 			SCTP_INP_DECR_REF(inp);
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	tmr->stopped_from = 0xa004;
1576 	if (stcb) {
1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
1578 		if (stcb->asoc.state == 0) {
1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
1580 			if (inp) {
1581 				SCTP_INP_DECR_REF(inp);
1582 			}
1583 			CURVNET_RESTORE();
1584 			return;
1585 		}
1586 	}
1587 	type = tmr->type;
1588 	tmr->stopped_from = 0xa005;
1589 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1590 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1591 		if (inp) {
1592 			SCTP_INP_DECR_REF(inp);
1593 		}
1594 		if (stcb) {
1595 			atomic_add_int(&stcb->asoc.refcnt, -1);
1596 		}
1597 		CURVNET_RESTORE();
1598 		return;
1599 	}
1600 	tmr->stopped_from = 0xa006;
1601 
1602 	if (stcb) {
1603 		SCTP_TCB_LOCK(stcb);
1604 		atomic_add_int(&stcb->asoc.refcnt, -1);
1605 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1606 		    ((stcb->asoc.state == 0) ||
1607 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1608 			SCTP_TCB_UNLOCK(stcb);
1609 			if (inp) {
1610 				SCTP_INP_DECR_REF(inp);
1611 			}
1612 			CURVNET_RESTORE();
1613 			return;
1614 		}
1615 	}
1616 	/* record in stopped what t-o occured */
1617 	tmr->stopped_from = type;
1618 
1619 	/* mark as being serviced now */
1620 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1621 		/*
1622 		 * Callout has been rescheduled.
1623 		 */
1624 		goto get_out;
1625 	}
1626 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1627 		/*
1628 		 * Not active, so no action.
1629 		 */
1630 		goto get_out;
1631 	}
1632 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1633 
1634 	/* call the handler for the appropriate timer type */
1635 	switch (type) {
1636 	case SCTP_TIMER_TYPE_ZERO_COPY:
1637 		if (inp == NULL) {
1638 			break;
1639 		}
1640 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1641 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1642 		}
1643 		break;
1644 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1645 		if (inp == NULL) {
1646 			break;
1647 		}
1648 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1649 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_ADDR_WQ:
1653 		sctp_handle_addr_wq();
1654 		break;
1655 	case SCTP_TIMER_TYPE_SEND:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		}
1659 		SCTP_STAT_INCR(sctps_timodata);
1660 		stcb->asoc.timodata++;
1661 		stcb->asoc.num_send_timers_up--;
1662 		if (stcb->asoc.num_send_timers_up < 0) {
1663 			stcb->asoc.num_send_timers_up = 0;
1664 		}
1665 		SCTP_TCB_LOCK_ASSERT(stcb);
1666 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1667 			/* no need to unlock on tcb its gone */
1668 
1669 			goto out_decr;
1670 		}
1671 		SCTP_TCB_LOCK_ASSERT(stcb);
1672 #ifdef SCTP_AUDITING_ENABLED
1673 		sctp_auditing(4, inp, stcb, net);
1674 #endif
1675 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1676 		if ((stcb->asoc.num_send_timers_up == 0) &&
1677 		    (stcb->asoc.sent_queue_cnt > 0)) {
1678 			struct sctp_tmit_chunk *chk;
1679 
1680 			/*
1681 			 * safeguard. If there on some on the sent queue
1682 			 * somewhere but no timers running something is
1683 			 * wrong... so we start a timer on the first chunk
1684 			 * on the send queue on whatever net it is sent to.
1685 			 */
1686 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1687 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1688 			    chk->whoTo);
1689 		}
1690 		break;
1691 	case SCTP_TIMER_TYPE_INIT:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timoinit);
1696 		stcb->asoc.timoinit++;
1697 		if (sctp_t1init_timer(inp, stcb, net)) {
1698 			/* no need to unlock on tcb its gone */
1699 			goto out_decr;
1700 		}
1701 		/* We do output but not here */
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_RECV:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timosack);
1709 		stcb->asoc.timosack++;
1710 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_SHUTDOWN:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		if (sctp_shutdown_timer(inp, stcb, net)) {
1721 			/* no need to unlock on tcb its gone */
1722 			goto out_decr;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timoshutdown);
1725 		stcb->asoc.timoshutdown++;
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1730 		break;
1731 	case SCTP_TIMER_TYPE_HEARTBEAT:
1732 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoheartbeat);
1736 		stcb->asoc.timoheartbeat++;
1737 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 #ifdef SCTP_AUDITING_ENABLED
1742 		sctp_auditing(4, inp, stcb, net);
1743 #endif
1744 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1745 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1746 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1747 		}
1748 		break;
1749 	case SCTP_TIMER_TYPE_COOKIE:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		if (sctp_cookie_timer(inp, stcb, net)) {
1754 			/* no need to unlock on tcb its gone */
1755 			goto out_decr;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timocookie);
1758 		stcb->asoc.timocookie++;
1759 #ifdef SCTP_AUDITING_ENABLED
1760 		sctp_auditing(4, inp, stcb, net);
1761 #endif
1762 		/*
1763 		 * We consider T3 and Cookie timer pretty much the same with
1764 		 * respect to where from in chunk_output.
1765 		 */
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1769 		{
1770 			struct timeval tv;
1771 			int i, secret;
1772 
1773 			if (inp == NULL) {
1774 				break;
1775 			}
1776 			SCTP_STAT_INCR(sctps_timosecret);
1777 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1778 			SCTP_INP_WLOCK(inp);
1779 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1780 			inp->sctp_ep.last_secret_number =
1781 			    inp->sctp_ep.current_secret_number;
1782 			inp->sctp_ep.current_secret_number++;
1783 			if (inp->sctp_ep.current_secret_number >=
1784 			    SCTP_HOW_MANY_SECRETS) {
1785 				inp->sctp_ep.current_secret_number = 0;
1786 			}
1787 			secret = (int)inp->sctp_ep.current_secret_number;
1788 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1789 				inp->sctp_ep.secret_key[secret][i] =
1790 				    sctp_select_initial_TSN(&inp->sctp_ep);
1791 			}
1792 			SCTP_INP_WUNLOCK(inp);
1793 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1794 		}
1795 		did_output = 0;
1796 		break;
1797 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timopathmtu);
1802 		sctp_pathmtu_timer(inp, stcb, net);
1803 		did_output = 0;
1804 		break;
1805 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdownack);
1814 		stcb->asoc.timoshutdownack++;
1815 #ifdef SCTP_AUDITING_ENABLED
1816 		sctp_auditing(4, inp, stcb, net);
1817 #endif
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1821 		if ((stcb == NULL) || (inp == NULL)) {
1822 			break;
1823 		}
1824 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1825 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1826 		    "Shutdown guard timer expired");
1827 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1828 		/* no need to unlock on tcb its gone */
1829 		goto out_decr;
1830 
1831 	case SCTP_TIMER_TYPE_STRRESET:
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		if (sctp_strreset_timer(inp, stcb, net)) {
1836 			/* no need to unlock on tcb its gone */
1837 			goto out_decr;
1838 		}
1839 		SCTP_STAT_INCR(sctps_timostrmrst);
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASCONF:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		if (sctp_asconf_timer(inp, stcb, net)) {
1847 			/* no need to unlock on tcb its gone */
1848 			goto out_decr;
1849 		}
1850 		SCTP_STAT_INCR(sctps_timoasconf);
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		sctp_delete_prim_timer(inp, stcb, net);
1861 		SCTP_STAT_INCR(sctps_timodelprim);
1862 		break;
1863 
1864 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1865 		if ((stcb == NULL) || (inp == NULL)) {
1866 			break;
1867 		}
1868 		SCTP_STAT_INCR(sctps_timoautoclose);
1869 		sctp_autoclose_timer(inp, stcb, net);
1870 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1871 		did_output = 0;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ASOCKILL:
1874 		if ((stcb == NULL) || (inp == NULL)) {
1875 			break;
1876 		}
1877 		SCTP_STAT_INCR(sctps_timoassockill);
1878 		/* Can we free it yet? */
1879 		SCTP_INP_DECR_REF(inp);
1880 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1881 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1883 		so = SCTP_INP_SO(inp);
1884 		atomic_add_int(&stcb->asoc.refcnt, 1);
1885 		SCTP_TCB_UNLOCK(stcb);
1886 		SCTP_SOCKET_LOCK(so, 1);
1887 		SCTP_TCB_LOCK(stcb);
1888 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1889 #endif
1890 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1892 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1893 		SCTP_SOCKET_UNLOCK(so, 1);
1894 #endif
1895 		/*
1896 		 * free asoc, always unlocks (or destroy's) so prevent
1897 		 * duplicate unlock or unlock of a free mtx :-0
1898 		 */
1899 		stcb = NULL;
1900 		goto out_no_decr;
1901 	case SCTP_TIMER_TYPE_INPKILL:
1902 		SCTP_STAT_INCR(sctps_timoinpkill);
1903 		if (inp == NULL) {
1904 			break;
1905 		}
1906 		/*
1907 		 * special case, take away our increment since WE are the
1908 		 * killer
1909 		 */
1910 		SCTP_INP_DECR_REF(inp);
1911 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1913 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1914 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1915 		inp = NULL;
1916 		goto out_no_decr;
1917 	default:
1918 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1919 		    type);
1920 		break;
1921 	}
1922 #ifdef SCTP_AUDITING_ENABLED
1923 	sctp_audit_log(0xF1, (uint8_t) type);
1924 	if (inp)
1925 		sctp_auditing(5, inp, stcb, net);
1926 #endif
1927 	if ((did_output) && stcb) {
1928 		/*
1929 		 * Now we need to clean up the control chunk chain if an
1930 		 * ECNE is on it. It must be marked as UNSENT again so next
1931 		 * call will continue to send it until such time that we get
1932 		 * a CWR, to remove it. It is, however, less likely that we
1933 		 * will find a ecn echo on the chain though.
1934 		 */
1935 		sctp_fix_ecn_echo(&stcb->asoc);
1936 	}
1937 get_out:
1938 	if (stcb) {
1939 		SCTP_TCB_UNLOCK(stcb);
1940 	}
1941 out_decr:
1942 	if (inp) {
1943 		SCTP_INP_DECR_REF(inp);
1944 	}
1945 out_no_decr:
1946 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1947 	CURVNET_RESTORE();
1948 }
1949 
1950 void
1951 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1952     struct sctp_nets *net)
1953 {
1954 	uint32_t to_ticks;
1955 	struct sctp_timer *tmr;
1956 
1957 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1958 		return;
1959 
1960 	tmr = NULL;
1961 	if (stcb) {
1962 		SCTP_TCB_LOCK_ASSERT(stcb);
1963 	}
1964 	switch (t_type) {
1965 	case SCTP_TIMER_TYPE_ZERO_COPY:
1966 		tmr = &inp->sctp_ep.zero_copy_timer;
1967 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1970 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1971 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1972 		break;
1973 	case SCTP_TIMER_TYPE_ADDR_WQ:
1974 		/* Only 1 tick away :-) */
1975 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1976 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1977 		break;
1978 	case SCTP_TIMER_TYPE_SEND:
1979 		/* Here we use the RTO timer */
1980 		{
1981 			int rto_val;
1982 
1983 			if ((stcb == NULL) || (net == NULL)) {
1984 				return;
1985 			}
1986 			tmr = &net->rxt_timer;
1987 			if (net->RTO == 0) {
1988 				rto_val = stcb->asoc.initial_rto;
1989 			} else {
1990 				rto_val = net->RTO;
1991 			}
1992 			to_ticks = MSEC_TO_TICKS(rto_val);
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_INIT:
1996 		/*
1997 		 * Here we use the INIT timer default usually about 1
1998 		 * minute.
1999 		 */
2000 		if ((stcb == NULL) || (net == NULL)) {
2001 			return;
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		break;
2010 	case SCTP_TIMER_TYPE_RECV:
2011 		/*
2012 		 * Here we use the Delayed-Ack timer value from the inp
2013 		 * ususually about 200ms.
2014 		 */
2015 		if (stcb == NULL) {
2016 			return;
2017 		}
2018 		tmr = &stcb->asoc.dack_timer;
2019 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2020 		break;
2021 	case SCTP_TIMER_TYPE_SHUTDOWN:
2022 		/* Here we use the RTO of the destination. */
2023 		if ((stcb == NULL) || (net == NULL)) {
2024 			return;
2025 		}
2026 		if (net->RTO == 0) {
2027 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2028 		} else {
2029 			to_ticks = MSEC_TO_TICKS(net->RTO);
2030 		}
2031 		tmr = &net->rxt_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_HEARTBEAT:
2034 		/*
2035 		 * the net is used here so that we can add in the RTO. Even
2036 		 * though we use a different timer. We also add the HB timer
2037 		 * PLUS a random jitter.
2038 		 */
2039 		if ((stcb == NULL) || (net == NULL)) {
2040 			return;
2041 		} else {
2042 			uint32_t rndval;
2043 			uint32_t jitter;
2044 
2045 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2046 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2047 				return;
2048 			}
2049 			if (net->RTO == 0) {
2050 				to_ticks = stcb->asoc.initial_rto;
2051 			} else {
2052 				to_ticks = net->RTO;
2053 			}
2054 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 			jitter = rndval % to_ticks;
2056 			if (jitter >= (to_ticks >> 1)) {
2057 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2058 			} else {
2059 				to_ticks = to_ticks - jitter;
2060 			}
2061 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2062 			    !(net->dest_state & SCTP_ADDR_PF)) {
2063 				to_ticks += net->heart_beat_delay;
2064 			}
2065 			/*
2066 			 * Now we must convert the to_ticks that are now in
2067 			 * ms to ticks.
2068 			 */
2069 			to_ticks = MSEC_TO_TICKS(to_ticks);
2070 			tmr = &net->hb_timer;
2071 		}
2072 		break;
2073 	case SCTP_TIMER_TYPE_COOKIE:
2074 		/*
2075 		 * Here we can use the RTO timer from the network since one
2076 		 * RTT was compelete. If a retran happened then we will be
2077 		 * using the RTO initial value.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &net->rxt_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2090 		/*
2091 		 * nothing needed but the endpoint here ususually about 60
2092 		 * minutes.
2093 		 */
2094 		tmr = &inp->sctp_ep.signature_change;
2095 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2096 		break;
2097 	case SCTP_TIMER_TYPE_ASOCKILL:
2098 		if (stcb == NULL) {
2099 			return;
2100 		}
2101 		tmr = &stcb->asoc.strreset_timer;
2102 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2103 		break;
2104 	case SCTP_TIMER_TYPE_INPKILL:
2105 		/*
2106 		 * The inp is setup to die. We re-use the signature_chage
2107 		 * timer since that has stopped and we are in the GONE
2108 		 * state.
2109 		 */
2110 		tmr = &inp->sctp_ep.signature_change;
2111 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2112 		break;
2113 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2114 		/*
2115 		 * Here we use the value found in the EP for PMTU ususually
2116 		 * about 10 minutes.
2117 		 */
2118 		if ((stcb == NULL) || (net == NULL)) {
2119 			return;
2120 		}
2121 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2122 			return;
2123 		}
2124 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2125 		tmr = &net->pmtu_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2128 		/* Here we use the RTO of the destination */
2129 		if ((stcb == NULL) || (net == NULL)) {
2130 			return;
2131 		}
2132 		if (net->RTO == 0) {
2133 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2134 		} else {
2135 			to_ticks = MSEC_TO_TICKS(net->RTO);
2136 		}
2137 		tmr = &net->rxt_timer;
2138 		break;
2139 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2140 		/*
2141 		 * Here we use the endpoints shutdown guard timer usually
2142 		 * about 3 minutes.
2143 		 */
2144 		if (stcb == NULL) {
2145 			return;
2146 		}
2147 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2148 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2149 		} else {
2150 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2151 		}
2152 		tmr = &stcb->asoc.shut_guard_timer;
2153 		break;
2154 	case SCTP_TIMER_TYPE_STRRESET:
2155 		/*
2156 		 * Here the timer comes from the stcb but its value is from
2157 		 * the net's RTO.
2158 		 */
2159 		if ((stcb == NULL) || (net == NULL)) {
2160 			return;
2161 		}
2162 		if (net->RTO == 0) {
2163 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2164 		} else {
2165 			to_ticks = MSEC_TO_TICKS(net->RTO);
2166 		}
2167 		tmr = &stcb->asoc.strreset_timer;
2168 		break;
2169 	case SCTP_TIMER_TYPE_ASCONF:
2170 		/*
2171 		 * Here the timer comes from the stcb but its value is from
2172 		 * the net's RTO.
2173 		 */
2174 		if ((stcb == NULL) || (net == NULL)) {
2175 			return;
2176 		}
2177 		if (net->RTO == 0) {
2178 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2179 		} else {
2180 			to_ticks = MSEC_TO_TICKS(net->RTO);
2181 		}
2182 		tmr = &stcb->asoc.asconf_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2185 		if ((stcb == NULL) || (net != NULL)) {
2186 			return;
2187 		}
2188 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2189 		tmr = &stcb->asoc.delete_prim_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2192 		if (stcb == NULL) {
2193 			return;
2194 		}
2195 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2196 			/*
2197 			 * Really an error since stcb is NOT set to
2198 			 * autoclose
2199 			 */
2200 			return;
2201 		}
2202 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2203 		tmr = &stcb->asoc.autoclose_timer;
2204 		break;
2205 	default:
2206 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2207 		    __func__, t_type);
2208 		return;
2209 		break;
2210 	}
2211 	if ((to_ticks <= 0) || (tmr == NULL)) {
2212 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2213 		    __func__, t_type, to_ticks, (void *)tmr);
2214 		return;
2215 	}
2216 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2217 		/*
2218 		 * we do NOT allow you to have it already running. if it is
2219 		 * we leave the current one up unchanged
2220 		 */
2221 		return;
2222 	}
2223 	/* At this point we can proceed */
2224 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2225 		stcb->asoc.num_send_timers_up++;
2226 	}
2227 	tmr->stopped_from = 0;
2228 	tmr->type = t_type;
2229 	tmr->ep = (void *)inp;
2230 	tmr->tcb = (void *)stcb;
2231 	tmr->net = (void *)net;
2232 	tmr->self = (void *)tmr;
2233 	tmr->vnet = (void *)curvnet;
2234 	tmr->ticks = sctp_get_tick_count();
2235 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2236 	return;
2237 }
2238 
2239 void
2240 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2241     struct sctp_nets *net, uint32_t from)
2242 {
2243 	struct sctp_timer *tmr;
2244 
2245 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2246 	    (inp == NULL))
2247 		return;
2248 
2249 	tmr = NULL;
2250 	if (stcb) {
2251 		SCTP_TCB_LOCK_ASSERT(stcb);
2252 	}
2253 	switch (t_type) {
2254 	case SCTP_TIMER_TYPE_ZERO_COPY:
2255 		tmr = &inp->sctp_ep.zero_copy_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2258 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_ADDR_WQ:
2261 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2262 		break;
2263 	case SCTP_TIMER_TYPE_SEND:
2264 		if ((stcb == NULL) || (net == NULL)) {
2265 			return;
2266 		}
2267 		tmr = &net->rxt_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_INIT:
2270 		if ((stcb == NULL) || (net == NULL)) {
2271 			return;
2272 		}
2273 		tmr = &net->rxt_timer;
2274 		break;
2275 	case SCTP_TIMER_TYPE_RECV:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.dack_timer;
2280 		break;
2281 	case SCTP_TIMER_TYPE_SHUTDOWN:
2282 		if ((stcb == NULL) || (net == NULL)) {
2283 			return;
2284 		}
2285 		tmr = &net->rxt_timer;
2286 		break;
2287 	case SCTP_TIMER_TYPE_HEARTBEAT:
2288 		if ((stcb == NULL) || (net == NULL)) {
2289 			return;
2290 		}
2291 		tmr = &net->hb_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_COOKIE:
2294 		if ((stcb == NULL) || (net == NULL)) {
2295 			return;
2296 		}
2297 		tmr = &net->rxt_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2300 		/* nothing needed but the endpoint here */
2301 		tmr = &inp->sctp_ep.signature_change;
2302 		/*
2303 		 * We re-use the newcookie timer for the INP kill timer. We
2304 		 * must assure that we do not kill it by accident.
2305 		 */
2306 		break;
2307 	case SCTP_TIMER_TYPE_ASOCKILL:
2308 		/*
2309 		 * Stop the asoc kill timer.
2310 		 */
2311 		if (stcb == NULL) {
2312 			return;
2313 		}
2314 		tmr = &stcb->asoc.strreset_timer;
2315 		break;
2316 
2317 	case SCTP_TIMER_TYPE_INPKILL:
2318 		/*
2319 		 * The inp is setup to die. We re-use the signature_chage
2320 		 * timer since that has stopped and we are in the GONE
2321 		 * state.
2322 		 */
2323 		tmr = &inp->sctp_ep.signature_change;
2324 		break;
2325 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2326 		if ((stcb == NULL) || (net == NULL)) {
2327 			return;
2328 		}
2329 		tmr = &net->pmtu_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2332 		if ((stcb == NULL) || (net == NULL)) {
2333 			return;
2334 		}
2335 		tmr = &net->rxt_timer;
2336 		break;
2337 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.shut_guard_timer;
2342 		break;
2343 	case SCTP_TIMER_TYPE_STRRESET:
2344 		if (stcb == NULL) {
2345 			return;
2346 		}
2347 		tmr = &stcb->asoc.strreset_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_ASCONF:
2350 		if (stcb == NULL) {
2351 			return;
2352 		}
2353 		tmr = &stcb->asoc.asconf_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2356 		if (stcb == NULL) {
2357 			return;
2358 		}
2359 		tmr = &stcb->asoc.delete_prim_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.autoclose_timer;
2366 		break;
2367 	default:
2368 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2369 		    __func__, t_type);
2370 		break;
2371 	}
2372 	if (tmr == NULL) {
2373 		return;
2374 	}
2375 	if ((tmr->type != t_type) && tmr->type) {
2376 		/*
2377 		 * Ok we have a timer that is under joint use. Cookie timer
2378 		 * per chance with the SEND timer. We therefore are NOT
2379 		 * running the timer that the caller wants stopped.  So just
2380 		 * return.
2381 		 */
2382 		return;
2383 	}
2384 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2385 		stcb->asoc.num_send_timers_up--;
2386 		if (stcb->asoc.num_send_timers_up < 0) {
2387 			stcb->asoc.num_send_timers_up = 0;
2388 		}
2389 	}
2390 	tmr->self = NULL;
2391 	tmr->stopped_from = from;
2392 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2393 	return;
2394 }
2395 
2396 uint32_t
2397 sctp_calculate_len(struct mbuf *m)
2398 {
2399 	uint32_t tlen = 0;
2400 	struct mbuf *at;
2401 
2402 	at = m;
2403 	while (at) {
2404 		tlen += SCTP_BUF_LEN(at);
2405 		at = SCTP_BUF_NEXT(at);
2406 	}
2407 	return (tlen);
2408 }
2409 
2410 void
2411 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2412     struct sctp_association *asoc, uint32_t mtu)
2413 {
2414 	/*
2415 	 * Reset the P-MTU size on this association, this involves changing
2416 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2417 	 * allow the DF flag to be cleared.
2418 	 */
2419 	struct sctp_tmit_chunk *chk;
2420 	unsigned int eff_mtu, ovh;
2421 
2422 	asoc->smallest_mtu = mtu;
2423 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2424 		ovh = SCTP_MIN_OVERHEAD;
2425 	} else {
2426 		ovh = SCTP_MIN_V4_OVERHEAD;
2427 	}
2428 	eff_mtu = mtu - ovh;
2429 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2430 		if (chk->send_size > eff_mtu) {
2431 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2432 		}
2433 	}
2434 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2435 		if (chk->send_size > eff_mtu) {
2436 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2437 		}
2438 	}
2439 }
2440 
2441 
2442 /*
2443  * given an association and starting time of the current RTT period return
2444  * RTO in number of msecs net should point to the current network
2445  */
2446 
2447 uint32_t
2448 sctp_calculate_rto(struct sctp_tcb *stcb,
2449     struct sctp_association *asoc,
2450     struct sctp_nets *net,
2451     struct timeval *told,
2452     int safe, int rtt_from_sack)
2453 {
2454 	/*-
2455 	 * given an association and the starting time of the current RTT
2456 	 * period (in value1/value2) return RTO in number of msecs.
2457 	 */
2458 	int32_t rtt;		/* RTT in ms */
2459 	uint32_t new_rto;
2460 	int first_measure = 0;
2461 	struct timeval now, then, *old;
2462 
2463 	/* Copy it out for sparc64 */
2464 	if (safe == sctp_align_unsafe_makecopy) {
2465 		old = &then;
2466 		memcpy(&then, told, sizeof(struct timeval));
2467 	} else if (safe == sctp_align_safe_nocopy) {
2468 		old = told;
2469 	} else {
2470 		/* error */
2471 		SCTP_PRINTF("Huh, bad rto calc call\n");
2472 		return (0);
2473 	}
2474 	/************************/
2475 	/* 1. calculate new RTT */
2476 	/************************/
2477 	/* get the current time */
2478 	if (stcb->asoc.use_precise_time) {
2479 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2480 	} else {
2481 		(void)SCTP_GETTIME_TIMEVAL(&now);
2482 	}
2483 	timevalsub(&now, old);
2484 	/* store the current RTT in us */
2485 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2486 	        (uint64_t) now.tv_usec;
2487 
2488 	/* compute rtt in ms */
2489 	rtt = (int32_t) (net->rtt / 1000);
2490 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2491 		/*
2492 		 * Tell the CC module that a new update has just occurred
2493 		 * from a sack
2494 		 */
2495 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2496 	}
2497 	/*
2498 	 * Do we need to determine the lan? We do this only on sacks i.e.
2499 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2500 	 */
2501 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2502 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2503 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2504 			net->lan_type = SCTP_LAN_INTERNET;
2505 		} else {
2506 			net->lan_type = SCTP_LAN_LOCAL;
2507 		}
2508 	}
2509 	/***************************/
2510 	/* 2. update RTTVAR & SRTT */
2511 	/***************************/
2512 	/*-
2513 	 * Compute the scaled average lastsa and the
2514 	 * scaled variance lastsv as described in van Jacobson
2515 	 * Paper "Congestion Avoidance and Control", Annex A.
2516 	 *
2517 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2518 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2519 	 */
2520 	if (net->RTO_measured) {
2521 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2522 		net->lastsa += rtt;
2523 		if (rtt < 0) {
2524 			rtt = -rtt;
2525 		}
2526 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2527 		net->lastsv += rtt;
2528 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2529 			rto_logging(net, SCTP_LOG_RTTVAR);
2530 		}
2531 	} else {
2532 		/* First RTO measurment */
2533 		net->RTO_measured = 1;
2534 		first_measure = 1;
2535 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2536 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2538 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2539 		}
2540 	}
2541 	if (net->lastsv == 0) {
2542 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2543 	}
2544 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2545 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2546 	    (stcb->asoc.sat_network_lockout == 0)) {
2547 		stcb->asoc.sat_network = 1;
2548 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2549 		stcb->asoc.sat_network = 0;
2550 		stcb->asoc.sat_network_lockout = 1;
2551 	}
2552 	/* bound it, per C6/C7 in Section 5.3.1 */
2553 	if (new_rto < stcb->asoc.minrto) {
2554 		new_rto = stcb->asoc.minrto;
2555 	}
2556 	if (new_rto > stcb->asoc.maxrto) {
2557 		new_rto = stcb->asoc.maxrto;
2558 	}
2559 	/* we are now returning the RTO */
2560 	return (new_rto);
2561 }
2562 
2563 /*
2564  * return a pointer to a contiguous piece of data from the given mbuf chain
2565  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2566  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2567  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2568  */
2569 caddr_t
2570 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2571 {
2572 	uint32_t count;
2573 	uint8_t *ptr;
2574 
2575 	ptr = in_ptr;
2576 	if ((off < 0) || (len <= 0))
2577 		return (NULL);
2578 
2579 	/* find the desired start location */
2580 	while ((m != NULL) && (off > 0)) {
2581 		if (off < SCTP_BUF_LEN(m))
2582 			break;
2583 		off -= SCTP_BUF_LEN(m);
2584 		m = SCTP_BUF_NEXT(m);
2585 	}
2586 	if (m == NULL)
2587 		return (NULL);
2588 
2589 	/* is the current mbuf large enough (eg. contiguous)? */
2590 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2591 		return (mtod(m, caddr_t)+off);
2592 	} else {
2593 		/* else, it spans more than one mbuf, so save a temp copy... */
2594 		while ((m != NULL) && (len > 0)) {
2595 			count = min(SCTP_BUF_LEN(m) - off, len);
2596 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2597 			len -= count;
2598 			ptr += count;
2599 			off = 0;
2600 			m = SCTP_BUF_NEXT(m);
2601 		}
2602 		if ((m == NULL) && (len > 0))
2603 			return (NULL);
2604 		else
2605 			return ((caddr_t)in_ptr);
2606 	}
2607 }
2608 
2609 
2610 
2611 struct sctp_paramhdr *
2612 sctp_get_next_param(struct mbuf *m,
2613     int offset,
2614     struct sctp_paramhdr *pull,
2615     int pull_limit)
2616 {
2617 	/* This just provides a typed signature to Peter's Pull routine */
2618 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2619 	    (uint8_t *) pull));
2620 }
2621 
2622 
2623 struct mbuf *
2624 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2625 {
2626 	struct mbuf *m_last;
2627 	caddr_t dp;
2628 
2629 	if (padlen > 3) {
2630 		return (NULL);
2631 	}
2632 	if (padlen <= M_TRAILINGSPACE(m)) {
2633 		/*
2634 		 * The easy way. We hope the majority of the time we hit
2635 		 * here :)
2636 		 */
2637 		m_last = m;
2638 	} else {
2639 		/* Hard way we must grow the mbuf chain */
2640 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2641 		if (m_last == NULL) {
2642 			return (NULL);
2643 		}
2644 		SCTP_BUF_LEN(m_last) = 0;
2645 		SCTP_BUF_NEXT(m_last) = NULL;
2646 		SCTP_BUF_NEXT(m) = m_last;
2647 	}
2648 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2649 	SCTP_BUF_LEN(m_last) += padlen;
2650 	memset(dp, 0, padlen);
2651 	return (m_last);
2652 }
2653 
2654 struct mbuf *
2655 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2656 {
2657 	/* find the last mbuf in chain and pad it */
2658 	struct mbuf *m_at;
2659 
2660 	if (last_mbuf != NULL) {
2661 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2662 	} else {
2663 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2664 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2665 				return (sctp_add_pad_tombuf(m_at, padval));
2666 			}
2667 		}
2668 	}
2669 	return (NULL);
2670 }
2671 
2672 static void
2673 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2674     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2675 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2676     SCTP_UNUSED
2677 #endif
2678 )
2679 {
2680 	struct mbuf *m_notify;
2681 	struct sctp_assoc_change *sac;
2682 	struct sctp_queued_to_read *control;
2683 	unsigned int notif_len;
2684 	uint16_t abort_len;
2685 	unsigned int i;
2686 
2687 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2688 	struct socket *so;
2689 
2690 #endif
2691 
2692 	if (stcb == NULL) {
2693 		return;
2694 	}
2695 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2696 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2697 		if (abort != NULL) {
2698 			abort_len = ntohs(abort->ch.chunk_length);
2699 		} else {
2700 			abort_len = 0;
2701 		}
2702 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2703 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2704 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2705 			notif_len += abort_len;
2706 		}
2707 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2708 		if (m_notify == NULL) {
2709 			/* Retry with smaller value. */
2710 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2711 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2712 			if (m_notify == NULL) {
2713 				goto set_error;
2714 			}
2715 		}
2716 		SCTP_BUF_NEXT(m_notify) = NULL;
2717 		sac = mtod(m_notify, struct sctp_assoc_change *);
2718 		memset(sac, 0, notif_len);
2719 		sac->sac_type = SCTP_ASSOC_CHANGE;
2720 		sac->sac_flags = 0;
2721 		sac->sac_length = sizeof(struct sctp_assoc_change);
2722 		sac->sac_state = state;
2723 		sac->sac_error = error;
2724 		/* XXX verify these stream counts */
2725 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2726 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2727 		sac->sac_assoc_id = sctp_get_associd(stcb);
2728 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2729 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2730 				i = 0;
2731 				if (stcb->asoc.prsctp_supported == 1) {
2732 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2733 				}
2734 				if (stcb->asoc.auth_supported == 1) {
2735 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2736 				}
2737 				if (stcb->asoc.asconf_supported == 1) {
2738 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2739 				}
2740 				if (stcb->asoc.idata_supported == 1) {
2741 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2742 				}
2743 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2744 				if (stcb->asoc.reconfig_supported == 1) {
2745 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2746 				}
2747 				sac->sac_length += i;
2748 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2749 				memcpy(sac->sac_info, abort, abort_len);
2750 				sac->sac_length += abort_len;
2751 			}
2752 		}
2753 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2754 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2755 		    0, 0, stcb->asoc.context, 0, 0, 0,
2756 		    m_notify);
2757 		if (control != NULL) {
2758 			control->length = SCTP_BUF_LEN(m_notify);
2759 			/* not that we need this */
2760 			control->tail_mbuf = m_notify;
2761 			control->spec_flags = M_NOTIFICATION;
2762 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2763 			    control,
2764 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2765 			    so_locked);
2766 		} else {
2767 			sctp_m_freem(m_notify);
2768 		}
2769 	}
2770 	/*
2771 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2772 	 * comes in.
2773 	 */
2774 set_error:
2775 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2776 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2777 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2778 		SOCK_LOCK(stcb->sctp_socket);
2779 		if (from_peer) {
2780 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2781 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2782 				stcb->sctp_socket->so_error = ECONNREFUSED;
2783 			} else {
2784 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2785 				stcb->sctp_socket->so_error = ECONNRESET;
2786 			}
2787 		} else {
2788 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2789 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2790 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2791 				stcb->sctp_socket->so_error = ETIMEDOUT;
2792 			} else {
2793 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2794 				stcb->sctp_socket->so_error = ECONNABORTED;
2795 			}
2796 		}
2797 	}
2798 	/* Wake ANY sleepers */
2799 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2800 	so = SCTP_INP_SO(stcb->sctp_ep);
2801 	if (!so_locked) {
2802 		atomic_add_int(&stcb->asoc.refcnt, 1);
2803 		SCTP_TCB_UNLOCK(stcb);
2804 		SCTP_SOCKET_LOCK(so, 1);
2805 		SCTP_TCB_LOCK(stcb);
2806 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2807 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2808 			SCTP_SOCKET_UNLOCK(so, 1);
2809 			return;
2810 		}
2811 	}
2812 #endif
2813 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2814 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2815 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2816 		socantrcvmore_locked(stcb->sctp_socket);
2817 	}
2818 	sorwakeup(stcb->sctp_socket);
2819 	sowwakeup(stcb->sctp_socket);
2820 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2821 	if (!so_locked) {
2822 		SCTP_SOCKET_UNLOCK(so, 1);
2823 	}
2824 #endif
2825 }
2826 
2827 static void
2828 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2829     struct sockaddr *sa, uint32_t error, int so_locked
2830 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2831     SCTP_UNUSED
2832 #endif
2833 )
2834 {
2835 	struct mbuf *m_notify;
2836 	struct sctp_paddr_change *spc;
2837 	struct sctp_queued_to_read *control;
2838 
2839 	if ((stcb == NULL) ||
2840 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2841 		/* event not enabled */
2842 		return;
2843 	}
2844 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2845 	if (m_notify == NULL)
2846 		return;
2847 	SCTP_BUF_LEN(m_notify) = 0;
2848 	spc = mtod(m_notify, struct sctp_paddr_change *);
2849 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2850 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2851 	spc->spc_flags = 0;
2852 	spc->spc_length = sizeof(struct sctp_paddr_change);
2853 	switch (sa->sa_family) {
2854 #ifdef INET
2855 	case AF_INET:
2856 #ifdef INET6
2857 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2858 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2859 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2860 		} else {
2861 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2862 		}
2863 #else
2864 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2865 #endif
2866 		break;
2867 #endif
2868 #ifdef INET6
2869 	case AF_INET6:
2870 		{
2871 			struct sockaddr_in6 *sin6;
2872 
2873 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2874 
2875 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2876 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2877 				if (sin6->sin6_scope_id == 0) {
2878 					/* recover scope_id for user */
2879 					(void)sa6_recoverscope(sin6);
2880 				} else {
2881 					/* clear embedded scope_id for user */
2882 					in6_clearscope(&sin6->sin6_addr);
2883 				}
2884 			}
2885 			break;
2886 		}
2887 #endif
2888 	default:
2889 		/* TSNH */
2890 		break;
2891 	}
2892 	spc->spc_state = state;
2893 	spc->spc_error = error;
2894 	spc->spc_assoc_id = sctp_get_associd(stcb);
2895 
2896 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2897 	SCTP_BUF_NEXT(m_notify) = NULL;
2898 
2899 	/* append to socket */
2900 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2901 	    0, 0, stcb->asoc.context, 0, 0, 0,
2902 	    m_notify);
2903 	if (control == NULL) {
2904 		/* no memory */
2905 		sctp_m_freem(m_notify);
2906 		return;
2907 	}
2908 	control->length = SCTP_BUF_LEN(m_notify);
2909 	control->spec_flags = M_NOTIFICATION;
2910 	/* not that we need this */
2911 	control->tail_mbuf = m_notify;
2912 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2913 	    control,
2914 	    &stcb->sctp_socket->so_rcv, 1,
2915 	    SCTP_READ_LOCK_NOT_HELD,
2916 	    so_locked);
2917 }
2918 
2919 
2920 static void
2921 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2922     struct sctp_tmit_chunk *chk, int so_locked
2923 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2924     SCTP_UNUSED
2925 #endif
2926 )
2927 {
2928 	struct mbuf *m_notify;
2929 	struct sctp_send_failed *ssf;
2930 	struct sctp_send_failed_event *ssfe;
2931 	struct sctp_queued_to_read *control;
2932 	int length;
2933 
2934 	if ((stcb == NULL) ||
2935 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2936 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2937 		/* event not enabled */
2938 		return;
2939 	}
2940 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2941 		length = sizeof(struct sctp_send_failed_event);
2942 	} else {
2943 		length = sizeof(struct sctp_send_failed);
2944 	}
2945 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2946 	if (m_notify == NULL)
2947 		/* no space left */
2948 		return;
2949 	SCTP_BUF_LEN(m_notify) = 0;
2950 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2951 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2952 		memset(ssfe, 0, length);
2953 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2954 		if (sent) {
2955 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2956 		} else {
2957 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2958 		}
2959 		length += chk->send_size;
2960 		length -= sizeof(struct sctp_data_chunk);
2961 		ssfe->ssfe_length = length;
2962 		ssfe->ssfe_error = error;
2963 		/* not exactly what the user sent in, but should be close :) */
2964 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2965 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2966 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2967 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2968 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2969 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2970 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2971 	} else {
2972 		ssf = mtod(m_notify, struct sctp_send_failed *);
2973 		memset(ssf, 0, length);
2974 		ssf->ssf_type = SCTP_SEND_FAILED;
2975 		if (sent) {
2976 			ssf->ssf_flags = SCTP_DATA_SENT;
2977 		} else {
2978 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2979 		}
2980 		length += chk->send_size;
2981 		length -= sizeof(struct sctp_data_chunk);
2982 		ssf->ssf_length = length;
2983 		ssf->ssf_error = error;
2984 		/* not exactly what the user sent in, but should be close :) */
2985 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2986 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2987 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2988 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2989 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2990 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2991 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2992 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2993 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2994 	}
2995 	if (chk->data) {
2996 		/*
2997 		 * trim off the sctp chunk header(it should be there)
2998 		 */
2999 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3000 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3001 			sctp_mbuf_crush(chk->data);
3002 			chk->send_size -= sizeof(struct sctp_data_chunk);
3003 		}
3004 	}
3005 	SCTP_BUF_NEXT(m_notify) = chk->data;
3006 	/* Steal off the mbuf */
3007 	chk->data = NULL;
3008 	/*
3009 	 * For this case, we check the actual socket buffer, since the assoc
3010 	 * is going away we don't want to overfill the socket buffer for a
3011 	 * non-reader
3012 	 */
3013 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3014 		sctp_m_freem(m_notify);
3015 		return;
3016 	}
3017 	/* append to socket */
3018 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3019 	    0, 0, stcb->asoc.context, 0, 0, 0,
3020 	    m_notify);
3021 	if (control == NULL) {
3022 		/* no memory */
3023 		sctp_m_freem(m_notify);
3024 		return;
3025 	}
3026 	control->spec_flags = M_NOTIFICATION;
3027 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3028 	    control,
3029 	    &stcb->sctp_socket->so_rcv, 1,
3030 	    SCTP_READ_LOCK_NOT_HELD,
3031 	    so_locked);
3032 }
3033 
3034 
3035 static void
3036 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3037     struct sctp_stream_queue_pending *sp, int so_locked
3038 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3039     SCTP_UNUSED
3040 #endif
3041 )
3042 {
3043 	struct mbuf *m_notify;
3044 	struct sctp_send_failed *ssf;
3045 	struct sctp_send_failed_event *ssfe;
3046 	struct sctp_queued_to_read *control;
3047 	int length;
3048 
3049 	if ((stcb == NULL) ||
3050 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3051 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3052 		/* event not enabled */
3053 		return;
3054 	}
3055 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3056 		length = sizeof(struct sctp_send_failed_event);
3057 	} else {
3058 		length = sizeof(struct sctp_send_failed);
3059 	}
3060 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3061 	if (m_notify == NULL) {
3062 		/* no space left */
3063 		return;
3064 	}
3065 	SCTP_BUF_LEN(m_notify) = 0;
3066 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3067 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3068 		memset(ssfe, 0, length);
3069 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3070 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3071 		length += sp->length;
3072 		ssfe->ssfe_length = length;
3073 		ssfe->ssfe_error = error;
3074 		/* not exactly what the user sent in, but should be close :) */
3075 		ssfe->ssfe_info.snd_sid = sp->stream;
3076 		if (sp->some_taken) {
3077 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3078 		} else {
3079 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3080 		}
3081 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3082 		ssfe->ssfe_info.snd_context = sp->context;
3083 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3084 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3085 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3086 	} else {
3087 		ssf = mtod(m_notify, struct sctp_send_failed *);
3088 		memset(ssf, 0, length);
3089 		ssf->ssf_type = SCTP_SEND_FAILED;
3090 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3091 		length += sp->length;
3092 		ssf->ssf_length = length;
3093 		ssf->ssf_error = error;
3094 		/* not exactly what the user sent in, but should be close :) */
3095 		ssf->ssf_info.sinfo_stream = sp->stream;
3096 		ssf->ssf_info.sinfo_ssn = 0;
3097 		if (sp->some_taken) {
3098 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3099 		} else {
3100 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3101 		}
3102 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3103 		ssf->ssf_info.sinfo_context = sp->context;
3104 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3105 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3106 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3107 	}
3108 	SCTP_BUF_NEXT(m_notify) = sp->data;
3109 
3110 	/* Steal off the mbuf */
3111 	sp->data = NULL;
3112 	/*
3113 	 * For this case, we check the actual socket buffer, since the assoc
3114 	 * is going away we don't want to overfill the socket buffer for a
3115 	 * non-reader
3116 	 */
3117 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3118 		sctp_m_freem(m_notify);
3119 		return;
3120 	}
3121 	/* append to socket */
3122 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3123 	    0, 0, stcb->asoc.context, 0, 0, 0,
3124 	    m_notify);
3125 	if (control == NULL) {
3126 		/* no memory */
3127 		sctp_m_freem(m_notify);
3128 		return;
3129 	}
3130 	control->spec_flags = M_NOTIFICATION;
3131 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3132 	    control,
3133 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3134 }
3135 
3136 
3137 
3138 static void
3139 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3140 {
3141 	struct mbuf *m_notify;
3142 	struct sctp_adaptation_event *sai;
3143 	struct sctp_queued_to_read *control;
3144 
3145 	if ((stcb == NULL) ||
3146 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3147 		/* event not enabled */
3148 		return;
3149 	}
3150 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3151 	if (m_notify == NULL)
3152 		/* no space left */
3153 		return;
3154 	SCTP_BUF_LEN(m_notify) = 0;
3155 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3156 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3157 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3158 	sai->sai_flags = 0;
3159 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3160 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3161 	sai->sai_assoc_id = sctp_get_associd(stcb);
3162 
3163 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3164 	SCTP_BUF_NEXT(m_notify) = NULL;
3165 
3166 	/* append to socket */
3167 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3168 	    0, 0, stcb->asoc.context, 0, 0, 0,
3169 	    m_notify);
3170 	if (control == NULL) {
3171 		/* no memory */
3172 		sctp_m_freem(m_notify);
3173 		return;
3174 	}
3175 	control->length = SCTP_BUF_LEN(m_notify);
3176 	control->spec_flags = M_NOTIFICATION;
3177 	/* not that we need this */
3178 	control->tail_mbuf = m_notify;
3179 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3180 	    control,
3181 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3182 }
3183 
3184 /* This always must be called with the read-queue LOCKED in the INP */
3185 static void
3186 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3187     uint32_t val, int so_locked
3188 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3189     SCTP_UNUSED
3190 #endif
3191 )
3192 {
3193 	struct mbuf *m_notify;
3194 	struct sctp_pdapi_event *pdapi;
3195 	struct sctp_queued_to_read *control;
3196 	struct sockbuf *sb;
3197 
3198 	if ((stcb == NULL) ||
3199 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3200 		/* event not enabled */
3201 		return;
3202 	}
3203 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3204 		return;
3205 	}
3206 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3207 	if (m_notify == NULL)
3208 		/* no space left */
3209 		return;
3210 	SCTP_BUF_LEN(m_notify) = 0;
3211 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3212 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3213 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3214 	pdapi->pdapi_flags = 0;
3215 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3216 	pdapi->pdapi_indication = error;
3217 	pdapi->pdapi_stream = (val >> 16);
3218 	pdapi->pdapi_seq = (val & 0x0000ffff);
3219 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3220 
3221 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3222 	SCTP_BUF_NEXT(m_notify) = NULL;
3223 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3224 	    0, 0, stcb->asoc.context, 0, 0, 0,
3225 	    m_notify);
3226 	if (control == NULL) {
3227 		/* no memory */
3228 		sctp_m_freem(m_notify);
3229 		return;
3230 	}
3231 	control->spec_flags = M_NOTIFICATION;
3232 	control->length = SCTP_BUF_LEN(m_notify);
3233 	/* not that we need this */
3234 	control->tail_mbuf = m_notify;
3235 	control->held_length = 0;
3236 	control->length = 0;
3237 	sb = &stcb->sctp_socket->so_rcv;
3238 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3239 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3240 	}
3241 	sctp_sballoc(stcb, sb, m_notify);
3242 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3243 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3244 	}
3245 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3246 	control->end_added = 1;
3247 	if (stcb->asoc.control_pdapi)
3248 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3249 	else {
3250 		/* we really should not see this case */
3251 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3252 	}
3253 	if (stcb->sctp_ep && stcb->sctp_socket) {
3254 		/* This should always be the case */
3255 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3256 		struct socket *so;
3257 
3258 		so = SCTP_INP_SO(stcb->sctp_ep);
3259 		if (!so_locked) {
3260 			atomic_add_int(&stcb->asoc.refcnt, 1);
3261 			SCTP_TCB_UNLOCK(stcb);
3262 			SCTP_SOCKET_LOCK(so, 1);
3263 			SCTP_TCB_LOCK(stcb);
3264 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3265 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3266 				SCTP_SOCKET_UNLOCK(so, 1);
3267 				return;
3268 			}
3269 		}
3270 #endif
3271 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3272 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3273 		if (!so_locked) {
3274 			SCTP_SOCKET_UNLOCK(so, 1);
3275 		}
3276 #endif
3277 	}
3278 }
3279 
3280 static void
3281 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3282 {
3283 	struct mbuf *m_notify;
3284 	struct sctp_shutdown_event *sse;
3285 	struct sctp_queued_to_read *control;
3286 
3287 	/*
3288 	 * For TCP model AND UDP connected sockets we will send an error up
3289 	 * when an SHUTDOWN completes
3290 	 */
3291 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3292 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3293 		/* mark socket closed for read/write and wakeup! */
3294 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3295 		struct socket *so;
3296 
3297 		so = SCTP_INP_SO(stcb->sctp_ep);
3298 		atomic_add_int(&stcb->asoc.refcnt, 1);
3299 		SCTP_TCB_UNLOCK(stcb);
3300 		SCTP_SOCKET_LOCK(so, 1);
3301 		SCTP_TCB_LOCK(stcb);
3302 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3303 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3304 			SCTP_SOCKET_UNLOCK(so, 1);
3305 			return;
3306 		}
3307 #endif
3308 		socantsendmore(stcb->sctp_socket);
3309 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3310 		SCTP_SOCKET_UNLOCK(so, 1);
3311 #endif
3312 	}
3313 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3314 		/* event not enabled */
3315 		return;
3316 	}
3317 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3318 	if (m_notify == NULL)
3319 		/* no space left */
3320 		return;
3321 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3322 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3323 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3324 	sse->sse_flags = 0;
3325 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3326 	sse->sse_assoc_id = sctp_get_associd(stcb);
3327 
3328 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3329 	SCTP_BUF_NEXT(m_notify) = NULL;
3330 
3331 	/* append to socket */
3332 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3333 	    0, 0, stcb->asoc.context, 0, 0, 0,
3334 	    m_notify);
3335 	if (control == NULL) {
3336 		/* no memory */
3337 		sctp_m_freem(m_notify);
3338 		return;
3339 	}
3340 	control->spec_flags = M_NOTIFICATION;
3341 	control->length = SCTP_BUF_LEN(m_notify);
3342 	/* not that we need this */
3343 	control->tail_mbuf = m_notify;
3344 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3345 	    control,
3346 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3347 }
3348 
3349 static void
3350 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3351     int so_locked
3352 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3353     SCTP_UNUSED
3354 #endif
3355 )
3356 {
3357 	struct mbuf *m_notify;
3358 	struct sctp_sender_dry_event *event;
3359 	struct sctp_queued_to_read *control;
3360 
3361 	if ((stcb == NULL) ||
3362 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3363 		/* event not enabled */
3364 		return;
3365 	}
3366 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3367 	if (m_notify == NULL) {
3368 		/* no space left */
3369 		return;
3370 	}
3371 	SCTP_BUF_LEN(m_notify) = 0;
3372 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3373 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3374 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3375 	event->sender_dry_flags = 0;
3376 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3377 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3378 
3379 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3380 	SCTP_BUF_NEXT(m_notify) = NULL;
3381 
3382 	/* append to socket */
3383 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3384 	    0, 0, stcb->asoc.context, 0, 0, 0,
3385 	    m_notify);
3386 	if (control == NULL) {
3387 		/* no memory */
3388 		sctp_m_freem(m_notify);
3389 		return;
3390 	}
3391 	control->length = SCTP_BUF_LEN(m_notify);
3392 	control->spec_flags = M_NOTIFICATION;
3393 	/* not that we need this */
3394 	control->tail_mbuf = m_notify;
3395 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3396 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3397 }
3398 
3399 
3400 void
3401 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3402 {
3403 	struct mbuf *m_notify;
3404 	struct sctp_queued_to_read *control;
3405 	struct sctp_stream_change_event *stradd;
3406 
3407 	if ((stcb == NULL) ||
3408 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3409 		/* event not enabled */
3410 		return;
3411 	}
3412 	if ((stcb->asoc.peer_req_out) && flag) {
3413 		/* Peer made the request, don't tell the local user */
3414 		stcb->asoc.peer_req_out = 0;
3415 		return;
3416 	}
3417 	stcb->asoc.peer_req_out = 0;
3418 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3419 	if (m_notify == NULL)
3420 		/* no space left */
3421 		return;
3422 	SCTP_BUF_LEN(m_notify) = 0;
3423 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3424 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3425 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3426 	stradd->strchange_flags = flag;
3427 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3428 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3429 	stradd->strchange_instrms = numberin;
3430 	stradd->strchange_outstrms = numberout;
3431 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3432 	SCTP_BUF_NEXT(m_notify) = NULL;
3433 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3434 		/* no space */
3435 		sctp_m_freem(m_notify);
3436 		return;
3437 	}
3438 	/* append to socket */
3439 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3440 	    0, 0, stcb->asoc.context, 0, 0, 0,
3441 	    m_notify);
3442 	if (control == NULL) {
3443 		/* no memory */
3444 		sctp_m_freem(m_notify);
3445 		return;
3446 	}
3447 	control->spec_flags = M_NOTIFICATION;
3448 	control->length = SCTP_BUF_LEN(m_notify);
3449 	/* not that we need this */
3450 	control->tail_mbuf = m_notify;
3451 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3452 	    control,
3453 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3454 }
3455 
3456 void
3457 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3458 {
3459 	struct mbuf *m_notify;
3460 	struct sctp_queued_to_read *control;
3461 	struct sctp_assoc_reset_event *strasoc;
3462 
3463 	if ((stcb == NULL) ||
3464 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3465 		/* event not enabled */
3466 		return;
3467 	}
3468 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3469 	if (m_notify == NULL)
3470 		/* no space left */
3471 		return;
3472 	SCTP_BUF_LEN(m_notify) = 0;
3473 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3474 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3475 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3476 	strasoc->assocreset_flags = flag;
3477 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3478 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3479 	strasoc->assocreset_local_tsn = sending_tsn;
3480 	strasoc->assocreset_remote_tsn = recv_tsn;
3481 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3482 	SCTP_BUF_NEXT(m_notify) = NULL;
3483 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3484 		/* no space */
3485 		sctp_m_freem(m_notify);
3486 		return;
3487 	}
3488 	/* append to socket */
3489 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3490 	    0, 0, stcb->asoc.context, 0, 0, 0,
3491 	    m_notify);
3492 	if (control == NULL) {
3493 		/* no memory */
3494 		sctp_m_freem(m_notify);
3495 		return;
3496 	}
3497 	control->spec_flags = M_NOTIFICATION;
3498 	control->length = SCTP_BUF_LEN(m_notify);
3499 	/* not that we need this */
3500 	control->tail_mbuf = m_notify;
3501 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3502 	    control,
3503 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3504 }
3505 
3506 
3507 
3508 static void
3509 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3510     int number_entries, uint16_t * list, int flag)
3511 {
3512 	struct mbuf *m_notify;
3513 	struct sctp_queued_to_read *control;
3514 	struct sctp_stream_reset_event *strreset;
3515 	int len;
3516 
3517 	if ((stcb == NULL) ||
3518 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3519 		/* event not enabled */
3520 		return;
3521 	}
3522 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3523 	if (m_notify == NULL)
3524 		/* no space left */
3525 		return;
3526 	SCTP_BUF_LEN(m_notify) = 0;
3527 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3528 	if (len > M_TRAILINGSPACE(m_notify)) {
3529 		/* never enough room */
3530 		sctp_m_freem(m_notify);
3531 		return;
3532 	}
3533 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3534 	memset(strreset, 0, len);
3535 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3536 	strreset->strreset_flags = flag;
3537 	strreset->strreset_length = len;
3538 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3539 	if (number_entries) {
3540 		int i;
3541 
3542 		for (i = 0; i < number_entries; i++) {
3543 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3544 		}
3545 	}
3546 	SCTP_BUF_LEN(m_notify) = len;
3547 	SCTP_BUF_NEXT(m_notify) = NULL;
3548 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3549 		/* no space */
3550 		sctp_m_freem(m_notify);
3551 		return;
3552 	}
3553 	/* append to socket */
3554 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3555 	    0, 0, stcb->asoc.context, 0, 0, 0,
3556 	    m_notify);
3557 	if (control == NULL) {
3558 		/* no memory */
3559 		sctp_m_freem(m_notify);
3560 		return;
3561 	}
3562 	control->spec_flags = M_NOTIFICATION;
3563 	control->length = SCTP_BUF_LEN(m_notify);
3564 	/* not that we need this */
3565 	control->tail_mbuf = m_notify;
3566 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3567 	    control,
3568 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3569 }
3570 
3571 
3572 static void
3573 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3574 {
3575 	struct mbuf *m_notify;
3576 	struct sctp_remote_error *sre;
3577 	struct sctp_queued_to_read *control;
3578 	unsigned int notif_len;
3579 	uint16_t chunk_len;
3580 
3581 	if ((stcb == NULL) ||
3582 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3583 		return;
3584 	}
3585 	if (chunk != NULL) {
3586 		chunk_len = ntohs(chunk->ch.chunk_length);
3587 	} else {
3588 		chunk_len = 0;
3589 	}
3590 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3591 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3592 	if (m_notify == NULL) {
3593 		/* Retry with smaller value. */
3594 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3595 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3596 		if (m_notify == NULL) {
3597 			return;
3598 		}
3599 	}
3600 	SCTP_BUF_NEXT(m_notify) = NULL;
3601 	sre = mtod(m_notify, struct sctp_remote_error *);
3602 	memset(sre, 0, notif_len);
3603 	sre->sre_type = SCTP_REMOTE_ERROR;
3604 	sre->sre_flags = 0;
3605 	sre->sre_length = sizeof(struct sctp_remote_error);
3606 	sre->sre_error = error;
3607 	sre->sre_assoc_id = sctp_get_associd(stcb);
3608 	if (notif_len > sizeof(struct sctp_remote_error)) {
3609 		memcpy(sre->sre_data, chunk, chunk_len);
3610 		sre->sre_length += chunk_len;
3611 	}
3612 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3613 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3614 	    0, 0, stcb->asoc.context, 0, 0, 0,
3615 	    m_notify);
3616 	if (control != NULL) {
3617 		control->length = SCTP_BUF_LEN(m_notify);
3618 		/* not that we need this */
3619 		control->tail_mbuf = m_notify;
3620 		control->spec_flags = M_NOTIFICATION;
3621 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3622 		    control,
3623 		    &stcb->sctp_socket->so_rcv, 1,
3624 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3625 	} else {
3626 		sctp_m_freem(m_notify);
3627 	}
3628 }
3629 
3630 
3631 void
3632 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3633     uint32_t error, void *data, int so_locked
3634 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3635     SCTP_UNUSED
3636 #endif
3637 )
3638 {
3639 	if ((stcb == NULL) ||
3640 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3641 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3642 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3643 		/* If the socket is gone we are out of here */
3644 		return;
3645 	}
3646 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3647 		return;
3648 	}
3649 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3650 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3651 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3652 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3653 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3654 			/* Don't report these in front states */
3655 			return;
3656 		}
3657 	}
3658 	switch (notification) {
3659 	case SCTP_NOTIFY_ASSOC_UP:
3660 		if (stcb->asoc.assoc_up_sent == 0) {
3661 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3662 			stcb->asoc.assoc_up_sent = 1;
3663 		}
3664 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3665 			sctp_notify_adaptation_layer(stcb);
3666 		}
3667 		if (stcb->asoc.auth_supported == 0) {
3668 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3669 			    NULL, so_locked);
3670 		}
3671 		break;
3672 	case SCTP_NOTIFY_ASSOC_DOWN:
3673 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3674 		break;
3675 	case SCTP_NOTIFY_INTERFACE_DOWN:
3676 		{
3677 			struct sctp_nets *net;
3678 
3679 			net = (struct sctp_nets *)data;
3680 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3681 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3682 			break;
3683 		}
3684 	case SCTP_NOTIFY_INTERFACE_UP:
3685 		{
3686 			struct sctp_nets *net;
3687 
3688 			net = (struct sctp_nets *)data;
3689 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3690 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3691 			break;
3692 		}
3693 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3694 		{
3695 			struct sctp_nets *net;
3696 
3697 			net = (struct sctp_nets *)data;
3698 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3699 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3700 			break;
3701 		}
3702 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3703 		sctp_notify_send_failed2(stcb, error,
3704 		    (struct sctp_stream_queue_pending *)data, so_locked);
3705 		break;
3706 	case SCTP_NOTIFY_SENT_DG_FAIL:
3707 		sctp_notify_send_failed(stcb, 1, error,
3708 		    (struct sctp_tmit_chunk *)data, so_locked);
3709 		break;
3710 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3711 		sctp_notify_send_failed(stcb, 0, error,
3712 		    (struct sctp_tmit_chunk *)data, so_locked);
3713 		break;
3714 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3715 		{
3716 			uint32_t val;
3717 
3718 			val = *((uint32_t *) data);
3719 
3720 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3721 			break;
3722 		}
3723 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3724 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3725 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3726 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3727 		} else {
3728 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3729 		}
3730 		break;
3731 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3732 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3733 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3734 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3735 		} else {
3736 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3737 		}
3738 		break;
3739 	case SCTP_NOTIFY_ASSOC_RESTART:
3740 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3741 		if (stcb->asoc.auth_supported == 0) {
3742 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3743 			    NULL, so_locked);
3744 		}
3745 		break;
3746 	case SCTP_NOTIFY_STR_RESET_SEND:
3747 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3748 		break;
3749 	case SCTP_NOTIFY_STR_RESET_RECV:
3750 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3751 		break;
3752 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3753 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3754 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3755 		break;
3756 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3757 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3758 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3759 		break;
3760 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3761 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3762 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3763 		break;
3764 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3765 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3766 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3767 		break;
3768 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3769 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3770 		    error, so_locked);
3771 		break;
3772 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3773 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3774 		    error, so_locked);
3775 		break;
3776 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3777 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3778 		    error, so_locked);
3779 		break;
3780 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3781 		sctp_notify_shutdown_event(stcb);
3782 		break;
3783 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3784 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3785 		    (uint16_t) (uintptr_t) data,
3786 		    so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3789 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3790 		    (uint16_t) (uintptr_t) data,
3791 		    so_locked);
3792 		break;
3793 	case SCTP_NOTIFY_NO_PEER_AUTH:
3794 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3795 		    (uint16_t) (uintptr_t) data,
3796 		    so_locked);
3797 		break;
3798 	case SCTP_NOTIFY_SENDER_DRY:
3799 		sctp_notify_sender_dry_event(stcb, so_locked);
3800 		break;
3801 	case SCTP_NOTIFY_REMOTE_ERROR:
3802 		sctp_notify_remote_error(stcb, error, data);
3803 		break;
3804 	default:
3805 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3806 		    __func__, notification, notification);
3807 		break;
3808 	}			/* end switch */
3809 }
3810 
3811 void
3812 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3813 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3814     SCTP_UNUSED
3815 #endif
3816 )
3817 {
3818 	struct sctp_association *asoc;
3819 	struct sctp_stream_out *outs;
3820 	struct sctp_tmit_chunk *chk, *nchk;
3821 	struct sctp_stream_queue_pending *sp, *nsp;
3822 	int i;
3823 
3824 	if (stcb == NULL) {
3825 		return;
3826 	}
3827 	asoc = &stcb->asoc;
3828 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3829 		/* already being freed */
3830 		return;
3831 	}
3832 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3833 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3834 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3835 		return;
3836 	}
3837 	/* now through all the gunk freeing chunks */
3838 	if (holds_lock == 0) {
3839 		SCTP_TCB_SEND_LOCK(stcb);
3840 	}
3841 	/* sent queue SHOULD be empty */
3842 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3843 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3844 		asoc->sent_queue_cnt--;
3845 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3846 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3847 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3848 #ifdef INVARIANTS
3849 			} else {
3850 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3851 #endif
3852 			}
3853 		}
3854 		if (chk->data != NULL) {
3855 			sctp_free_bufspace(stcb, asoc, chk, 1);
3856 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3857 			    error, chk, so_locked);
3858 			if (chk->data) {
3859 				sctp_m_freem(chk->data);
3860 				chk->data = NULL;
3861 			}
3862 		}
3863 		sctp_free_a_chunk(stcb, chk, so_locked);
3864 		/* sa_ignore FREED_MEMORY */
3865 	}
3866 	/* pending send queue SHOULD be empty */
3867 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3868 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3869 		asoc->send_queue_cnt--;
3870 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3871 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3872 #ifdef INVARIANTS
3873 		} else {
3874 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3875 #endif
3876 		}
3877 		if (chk->data != NULL) {
3878 			sctp_free_bufspace(stcb, asoc, chk, 1);
3879 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3880 			    error, chk, so_locked);
3881 			if (chk->data) {
3882 				sctp_m_freem(chk->data);
3883 				chk->data = NULL;
3884 			}
3885 		}
3886 		sctp_free_a_chunk(stcb, chk, so_locked);
3887 		/* sa_ignore FREED_MEMORY */
3888 	}
3889 	for (i = 0; i < asoc->streamoutcnt; i++) {
3890 		/* For each stream */
3891 		outs = &asoc->strmout[i];
3892 		/* clean up any sends there */
3893 		asoc->locked_on_sending = NULL;
3894 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3895 			asoc->stream_queue_cnt--;
3896 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3897 			sctp_free_spbufspace(stcb, asoc, sp);
3898 			if (sp->data) {
3899 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3900 				    error, (void *)sp, so_locked);
3901 				if (sp->data) {
3902 					sctp_m_freem(sp->data);
3903 					sp->data = NULL;
3904 					sp->tail_mbuf = NULL;
3905 					sp->length = 0;
3906 				}
3907 			}
3908 			if (sp->net) {
3909 				sctp_free_remote_addr(sp->net);
3910 				sp->net = NULL;
3911 			}
3912 			/* Free the chunk */
3913 			sctp_free_a_strmoq(stcb, sp, so_locked);
3914 			/* sa_ignore FREED_MEMORY */
3915 		}
3916 	}
3917 
3918 	if (holds_lock == 0) {
3919 		SCTP_TCB_SEND_UNLOCK(stcb);
3920 	}
3921 }
3922 
3923 void
3924 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3925     struct sctp_abort_chunk *abort, int so_locked
3926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3927     SCTP_UNUSED
3928 #endif
3929 )
3930 {
3931 	if (stcb == NULL) {
3932 		return;
3933 	}
3934 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3935 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3936 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3937 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3938 	}
3939 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3940 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3941 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3942 		return;
3943 	}
3944 	/* Tell them we lost the asoc */
3945 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3946 	if (from_peer) {
3947 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3948 	} else {
3949 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3950 	}
3951 }
3952 
3953 void
3954 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3955     struct mbuf *m, int iphlen,
3956     struct sockaddr *src, struct sockaddr *dst,
3957     struct sctphdr *sh, struct mbuf *op_err,
3958     uint8_t mflowtype, uint32_t mflowid,
3959     uint32_t vrf_id, uint16_t port)
3960 {
3961 	uint32_t vtag;
3962 
3963 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3964 	struct socket *so;
3965 
3966 #endif
3967 
3968 	vtag = 0;
3969 	if (stcb != NULL) {
3970 		/* We have a TCB to abort, send notification too */
3971 		vtag = stcb->asoc.peer_vtag;
3972 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3973 		/* get the assoc vrf id and table id */
3974 		vrf_id = stcb->asoc.vrf_id;
3975 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3976 	}
3977 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3978 	    mflowtype, mflowid, inp->fibnum,
3979 	    vrf_id, port);
3980 	if (stcb != NULL) {
3981 		/* Ok, now lets free it */
3982 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3983 		so = SCTP_INP_SO(inp);
3984 		atomic_add_int(&stcb->asoc.refcnt, 1);
3985 		SCTP_TCB_UNLOCK(stcb);
3986 		SCTP_SOCKET_LOCK(so, 1);
3987 		SCTP_TCB_LOCK(stcb);
3988 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3989 #endif
3990 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3991 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3992 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3993 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3994 		}
3995 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3996 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3997 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3998 		SCTP_SOCKET_UNLOCK(so, 1);
3999 #endif
4000 	}
4001 }
4002 
4003 #ifdef SCTP_ASOCLOG_OF_TSNS
4004 void
4005 sctp_print_out_track_log(struct sctp_tcb *stcb)
4006 {
4007 #ifdef NOSIY_PRINTS
4008 	int i;
4009 
4010 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4011 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4012 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4013 		SCTP_PRINTF("None rcvd\n");
4014 		goto none_in;
4015 	}
4016 	if (stcb->asoc.tsn_in_wrapped) {
4017 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4018 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4019 			    stcb->asoc.in_tsnlog[i].tsn,
4020 			    stcb->asoc.in_tsnlog[i].strm,
4021 			    stcb->asoc.in_tsnlog[i].seq,
4022 			    stcb->asoc.in_tsnlog[i].flgs,
4023 			    stcb->asoc.in_tsnlog[i].sz);
4024 		}
4025 	}
4026 	if (stcb->asoc.tsn_in_at) {
4027 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4028 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4029 			    stcb->asoc.in_tsnlog[i].tsn,
4030 			    stcb->asoc.in_tsnlog[i].strm,
4031 			    stcb->asoc.in_tsnlog[i].seq,
4032 			    stcb->asoc.in_tsnlog[i].flgs,
4033 			    stcb->asoc.in_tsnlog[i].sz);
4034 		}
4035 	}
4036 none_in:
4037 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4038 	if ((stcb->asoc.tsn_out_at == 0) &&
4039 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4040 		SCTP_PRINTF("None sent\n");
4041 	}
4042 	if (stcb->asoc.tsn_out_wrapped) {
4043 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4044 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4045 			    stcb->asoc.out_tsnlog[i].tsn,
4046 			    stcb->asoc.out_tsnlog[i].strm,
4047 			    stcb->asoc.out_tsnlog[i].seq,
4048 			    stcb->asoc.out_tsnlog[i].flgs,
4049 			    stcb->asoc.out_tsnlog[i].sz);
4050 		}
4051 	}
4052 	if (stcb->asoc.tsn_out_at) {
4053 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4054 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4055 			    stcb->asoc.out_tsnlog[i].tsn,
4056 			    stcb->asoc.out_tsnlog[i].strm,
4057 			    stcb->asoc.out_tsnlog[i].seq,
4058 			    stcb->asoc.out_tsnlog[i].flgs,
4059 			    stcb->asoc.out_tsnlog[i].sz);
4060 		}
4061 	}
4062 #endif
4063 }
4064 
4065 #endif
4066 
4067 void
4068 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4069     struct mbuf *op_err,
4070     int so_locked
4071 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4072     SCTP_UNUSED
4073 #endif
4074 )
4075 {
4076 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4077 	struct socket *so;
4078 
4079 #endif
4080 
4081 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4082 	so = SCTP_INP_SO(inp);
4083 #endif
4084 	if (stcb == NULL) {
4085 		/* Got to have a TCB */
4086 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4087 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4088 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4089 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4090 			}
4091 		}
4092 		return;
4093 	} else {
4094 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4095 	}
4096 	/* notify the ulp */
4097 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4098 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4099 	}
4100 	/* notify the peer */
4101 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4102 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4103 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4104 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4105 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4106 	}
4107 	/* now free the asoc */
4108 #ifdef SCTP_ASOCLOG_OF_TSNS
4109 	sctp_print_out_track_log(stcb);
4110 #endif
4111 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4112 	if (!so_locked) {
4113 		atomic_add_int(&stcb->asoc.refcnt, 1);
4114 		SCTP_TCB_UNLOCK(stcb);
4115 		SCTP_SOCKET_LOCK(so, 1);
4116 		SCTP_TCB_LOCK(stcb);
4117 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4118 	}
4119 #endif
4120 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4121 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4122 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4123 	if (!so_locked) {
4124 		SCTP_SOCKET_UNLOCK(so, 1);
4125 	}
4126 #endif
4127 }
4128 
4129 void
4130 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4131     struct sockaddr *src, struct sockaddr *dst,
4132     struct sctphdr *sh, struct sctp_inpcb *inp,
4133     struct mbuf *cause,
4134     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4135     uint32_t vrf_id, uint16_t port)
4136 {
4137 	struct sctp_chunkhdr *ch, chunk_buf;
4138 	unsigned int chk_length;
4139 	int contains_init_chunk;
4140 
4141 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4142 	/* Generate a TO address for future reference */
4143 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4144 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4145 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4146 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4147 		}
4148 	}
4149 	contains_init_chunk = 0;
4150 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4151 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4152 	while (ch != NULL) {
4153 		chk_length = ntohs(ch->chunk_length);
4154 		if (chk_length < sizeof(*ch)) {
4155 			/* break to abort land */
4156 			break;
4157 		}
4158 		switch (ch->chunk_type) {
4159 		case SCTP_INIT:
4160 			contains_init_chunk = 1;
4161 			break;
4162 		case SCTP_PACKET_DROPPED:
4163 			/* we don't respond to pkt-dropped */
4164 			return;
4165 		case SCTP_ABORT_ASSOCIATION:
4166 			/* we don't respond with an ABORT to an ABORT */
4167 			return;
4168 		case SCTP_SHUTDOWN_COMPLETE:
4169 			/*
4170 			 * we ignore it since we are not waiting for it and
4171 			 * peer is gone
4172 			 */
4173 			return;
4174 		case SCTP_SHUTDOWN_ACK:
4175 			sctp_send_shutdown_complete2(src, dst, sh,
4176 			    mflowtype, mflowid, fibnum,
4177 			    vrf_id, port);
4178 			return;
4179 		default:
4180 			break;
4181 		}
4182 		offset += SCTP_SIZE32(chk_length);
4183 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4184 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4185 	}
4186 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4187 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4188 	    (contains_init_chunk == 0))) {
4189 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4190 		    mflowtype, mflowid, fibnum,
4191 		    vrf_id, port);
4192 	}
4193 }
4194 
4195 /*
4196  * check the inbound datagram to make sure there is not an abort inside it,
4197  * if there is return 1, else return 0.
4198  */
4199 int
4200 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4201 {
4202 	struct sctp_chunkhdr *ch;
4203 	struct sctp_init_chunk *init_chk, chunk_buf;
4204 	int offset;
4205 	unsigned int chk_length;
4206 
4207 	offset = iphlen + sizeof(struct sctphdr);
4208 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4209 	    (uint8_t *) & chunk_buf);
4210 	while (ch != NULL) {
4211 		chk_length = ntohs(ch->chunk_length);
4212 		if (chk_length < sizeof(*ch)) {
4213 			/* packet is probably corrupt */
4214 			break;
4215 		}
4216 		/* we seem to be ok, is it an abort? */
4217 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4218 			/* yep, tell them */
4219 			return (1);
4220 		}
4221 		if (ch->chunk_type == SCTP_INITIATION) {
4222 			/* need to update the Vtag */
4223 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4224 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4225 			if (init_chk != NULL) {
4226 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4227 			}
4228 		}
4229 		/* Nope, move to the next chunk */
4230 		offset += SCTP_SIZE32(chk_length);
4231 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4232 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4233 	}
4234 	return (0);
4235 }
4236 
4237 /*
4238  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4239  * set (i.e. it's 0) so, create this function to compare link local scopes
4240  */
4241 #ifdef INET6
4242 uint32_t
4243 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4244 {
4245 	struct sockaddr_in6 a, b;
4246 
4247 	/* save copies */
4248 	a = *addr1;
4249 	b = *addr2;
4250 
4251 	if (a.sin6_scope_id == 0)
4252 		if (sa6_recoverscope(&a)) {
4253 			/* can't get scope, so can't match */
4254 			return (0);
4255 		}
4256 	if (b.sin6_scope_id == 0)
4257 		if (sa6_recoverscope(&b)) {
4258 			/* can't get scope, so can't match */
4259 			return (0);
4260 		}
4261 	if (a.sin6_scope_id != b.sin6_scope_id)
4262 		return (0);
4263 
4264 	return (1);
4265 }
4266 
4267 /*
4268  * returns a sockaddr_in6 with embedded scope recovered and removed
4269  */
4270 struct sockaddr_in6 *
4271 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4272 {
4273 	/* check and strip embedded scope junk */
4274 	if (addr->sin6_family == AF_INET6) {
4275 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4276 			if (addr->sin6_scope_id == 0) {
4277 				*store = *addr;
4278 				if (!sa6_recoverscope(store)) {
4279 					/* use the recovered scope */
4280 					addr = store;
4281 				}
4282 			} else {
4283 				/* else, return the original "to" addr */
4284 				in6_clearscope(&addr->sin6_addr);
4285 			}
4286 		}
4287 	}
4288 	return (addr);
4289 }
4290 
4291 #endif
4292 
4293 /*
4294  * are the two addresses the same?  currently a "scopeless" check returns: 1
4295  * if same, 0 if not
4296  */
4297 int
4298 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4299 {
4300 
4301 	/* must be valid */
4302 	if (sa1 == NULL || sa2 == NULL)
4303 		return (0);
4304 
4305 	/* must be the same family */
4306 	if (sa1->sa_family != sa2->sa_family)
4307 		return (0);
4308 
4309 	switch (sa1->sa_family) {
4310 #ifdef INET6
4311 	case AF_INET6:
4312 		{
4313 			/* IPv6 addresses */
4314 			struct sockaddr_in6 *sin6_1, *sin6_2;
4315 
4316 			sin6_1 = (struct sockaddr_in6 *)sa1;
4317 			sin6_2 = (struct sockaddr_in6 *)sa2;
4318 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4319 			    sin6_2));
4320 		}
4321 #endif
4322 #ifdef INET
4323 	case AF_INET:
4324 		{
4325 			/* IPv4 addresses */
4326 			struct sockaddr_in *sin_1, *sin_2;
4327 
4328 			sin_1 = (struct sockaddr_in *)sa1;
4329 			sin_2 = (struct sockaddr_in *)sa2;
4330 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4331 		}
4332 #endif
4333 	default:
4334 		/* we don't do these... */
4335 		return (0);
4336 	}
4337 }
4338 
4339 void
4340 sctp_print_address(struct sockaddr *sa)
4341 {
4342 #ifdef INET6
4343 	char ip6buf[INET6_ADDRSTRLEN];
4344 
4345 #endif
4346 
4347 	switch (sa->sa_family) {
4348 #ifdef INET6
4349 	case AF_INET6:
4350 		{
4351 			struct sockaddr_in6 *sin6;
4352 
4353 			sin6 = (struct sockaddr_in6 *)sa;
4354 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4355 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4356 			    ntohs(sin6->sin6_port),
4357 			    sin6->sin6_scope_id);
4358 			break;
4359 		}
4360 #endif
4361 #ifdef INET
4362 	case AF_INET:
4363 		{
4364 			struct sockaddr_in *sin;
4365 			unsigned char *p;
4366 
4367 			sin = (struct sockaddr_in *)sa;
4368 			p = (unsigned char *)&sin->sin_addr;
4369 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4370 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4371 			break;
4372 		}
4373 #endif
4374 	default:
4375 		SCTP_PRINTF("?\n");
4376 		break;
4377 	}
4378 }
4379 
4380 void
4381 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4382     struct sctp_inpcb *new_inp,
4383     struct sctp_tcb *stcb,
4384     int waitflags)
4385 {
4386 	/*
4387 	 * go through our old INP and pull off any control structures that
4388 	 * belong to stcb and move then to the new inp.
4389 	 */
4390 	struct socket *old_so, *new_so;
4391 	struct sctp_queued_to_read *control, *nctl;
4392 	struct sctp_readhead tmp_queue;
4393 	struct mbuf *m;
4394 	int error = 0;
4395 
4396 	old_so = old_inp->sctp_socket;
4397 	new_so = new_inp->sctp_socket;
4398 	TAILQ_INIT(&tmp_queue);
4399 	error = sblock(&old_so->so_rcv, waitflags);
4400 	if (error) {
4401 		/*
4402 		 * Gak, can't get sblock, we have a problem. data will be
4403 		 * left stranded.. and we don't dare look at it since the
4404 		 * other thread may be reading something. Oh well, its a
4405 		 * screwed up app that does a peeloff OR a accept while
4406 		 * reading from the main socket... actually its only the
4407 		 * peeloff() case, since I think read will fail on a
4408 		 * listening socket..
4409 		 */
4410 		return;
4411 	}
4412 	/* lock the socket buffers */
4413 	SCTP_INP_READ_LOCK(old_inp);
4414 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4415 		/* Pull off all for out target stcb */
4416 		if (control->stcb == stcb) {
4417 			/* remove it we want it */
4418 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4419 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4420 			m = control->data;
4421 			while (m) {
4422 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4423 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4424 				}
4425 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4426 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4427 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4428 				}
4429 				m = SCTP_BUF_NEXT(m);
4430 			}
4431 		}
4432 	}
4433 	SCTP_INP_READ_UNLOCK(old_inp);
4434 	/* Remove the sb-lock on the old socket */
4435 
4436 	sbunlock(&old_so->so_rcv);
4437 	/* Now we move them over to the new socket buffer */
4438 	SCTP_INP_READ_LOCK(new_inp);
4439 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4440 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4441 		m = control->data;
4442 		while (m) {
4443 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4444 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4445 			}
4446 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4448 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4449 			}
4450 			m = SCTP_BUF_NEXT(m);
4451 		}
4452 	}
4453 	SCTP_INP_READ_UNLOCK(new_inp);
4454 }
4455 
4456 void
4457 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp)
4458 {
4459 	if (inp && inp->sctp_socket) {
4460 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4461 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4462 		} else {
4463 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4464 			struct socket *so;
4465 
4466 			so = SCTP_INP_SO(inp);
4467 			if (!so_locked) {
4468 				if (stcb) {
4469 					atomic_add_int(&stcb->asoc.refcnt, 1);
4470 					SCTP_TCB_UNLOCK(stcb);
4471 				}
4472 				SCTP_SOCKET_LOCK(so, 1);
4473 				if (stcb) {
4474 					SCTP_TCB_LOCK(stcb);
4475 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4476 				}
4477 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4478 					SCTP_SOCKET_UNLOCK(so, 1);
4479 					return;
4480 				}
4481 			}
4482 #endif
4483 			sctp_sorwakeup(inp, inp->sctp_socket);
4484 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4485 			if (!so_locked) {
4486 				SCTP_SOCKET_UNLOCK(so, 1);
4487 			}
4488 #endif
4489 		}
4490 	}
4491 }
4492 
4493 void
4494 sctp_add_to_readq(struct sctp_inpcb *inp,
4495     struct sctp_tcb *stcb,
4496     struct sctp_queued_to_read *control,
4497     struct sockbuf *sb,
4498     int end,
4499     int inp_read_lock_held,
4500     int so_locked
4501 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4502     SCTP_UNUSED
4503 #endif
4504 )
4505 {
4506 	/*
4507 	 * Here we must place the control on the end of the socket read
4508 	 * queue AND increment sb_cc so that select will work properly on
4509 	 * read.
4510 	 */
4511 	struct mbuf *m, *prev = NULL;
4512 
4513 	if (inp == NULL) {
4514 		/* Gak, TSNH!! */
4515 #ifdef INVARIANTS
4516 		panic("Gak, inp NULL on add_to_readq");
4517 #endif
4518 		return;
4519 	}
4520 	if (inp_read_lock_held == 0)
4521 		SCTP_INP_READ_LOCK(inp);
4522 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4523 		sctp_free_remote_addr(control->whoFrom);
4524 		if (control->data) {
4525 			sctp_m_freem(control->data);
4526 			control->data = NULL;
4527 		}
4528 		sctp_free_a_readq(stcb, control);
4529 		if (inp_read_lock_held == 0)
4530 			SCTP_INP_READ_UNLOCK(inp);
4531 		return;
4532 	}
4533 	if (!(control->spec_flags & M_NOTIFICATION)) {
4534 		atomic_add_int(&inp->total_recvs, 1);
4535 		if (!control->do_not_ref_stcb) {
4536 			atomic_add_int(&stcb->total_recvs, 1);
4537 		}
4538 	}
4539 	m = control->data;
4540 	control->held_length = 0;
4541 	control->length = 0;
4542 	while (m) {
4543 		if (SCTP_BUF_LEN(m) == 0) {
4544 			/* Skip mbufs with NO length */
4545 			if (prev == NULL) {
4546 				/* First one */
4547 				control->data = sctp_m_free(m);
4548 				m = control->data;
4549 			} else {
4550 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4551 				m = SCTP_BUF_NEXT(prev);
4552 			}
4553 			if (m == NULL) {
4554 				control->tail_mbuf = prev;
4555 			}
4556 			continue;
4557 		}
4558 		prev = m;
4559 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4560 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4561 		}
4562 		sctp_sballoc(stcb, sb, m);
4563 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4564 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4565 		}
4566 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4567 		m = SCTP_BUF_NEXT(m);
4568 	}
4569 	if (prev != NULL) {
4570 		control->tail_mbuf = prev;
4571 	} else {
4572 		/* Everything got collapsed out?? */
4573 		sctp_free_remote_addr(control->whoFrom);
4574 		sctp_free_a_readq(stcb, control);
4575 		if (inp_read_lock_held == 0)
4576 			SCTP_INP_READ_UNLOCK(inp);
4577 		return;
4578 	}
4579 	if (end) {
4580 		control->end_added = 1;
4581 	}
4582 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4583 	control->on_read_q = 1;
4584 	if (inp_read_lock_held == 0)
4585 		SCTP_INP_READ_UNLOCK(inp);
4586 	if (inp && inp->sctp_socket) {
4587 		sctp_wakeup_the_read_socket(inp);
4588 	}
4589 }
4590 
4591 
4592 int
4593 sctp_append_to_readq(struct sctp_inpcb *inp,
4594     struct sctp_tcb *stcb,
4595     struct sctp_queued_to_read *control,
4596     struct mbuf *m,
4597     int end,
4598     int ctls_cumack,
4599     struct sockbuf *sb)
4600 {
4601 	/*
4602 	 * A partial delivery API event is underway. OR we are appending on
4603 	 * the reassembly queue.
4604 	 *
4605 	 * If PDAPI this means we need to add m to the end of the data.
4606 	 * Increase the length in the control AND increment the sb_cc.
4607 	 * Otherwise sb is NULL and all we need to do is put it at the end
4608 	 * of the mbuf chain.
4609 	 */
4610 	int len = 0;
4611 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4612 
4613 	if (inp) {
4614 		SCTP_INP_READ_LOCK(inp);
4615 	}
4616 	if (control == NULL) {
4617 get_out:
4618 		if (inp) {
4619 			SCTP_INP_READ_UNLOCK(inp);
4620 		}
4621 		return (-1);
4622 	}
4623 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4624 		SCTP_INP_READ_UNLOCK(inp);
4625 		return (0);
4626 	}
4627 	if (control->end_added) {
4628 		/* huh this one is complete? */
4629 		goto get_out;
4630 	}
4631 	mm = m;
4632 	if (mm == NULL) {
4633 		goto get_out;
4634 	}
4635 	while (mm) {
4636 		if (SCTP_BUF_LEN(mm) == 0) {
4637 			/* Skip mbufs with NO lenght */
4638 			if (prev == NULL) {
4639 				/* First one */
4640 				m = sctp_m_free(mm);
4641 				mm = m;
4642 			} else {
4643 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4644 				mm = SCTP_BUF_NEXT(prev);
4645 			}
4646 			continue;
4647 		}
4648 		prev = mm;
4649 		len += SCTP_BUF_LEN(mm);
4650 		if (sb) {
4651 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4652 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4653 			}
4654 			sctp_sballoc(stcb, sb, mm);
4655 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4656 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4657 			}
4658 		}
4659 		mm = SCTP_BUF_NEXT(mm);
4660 	}
4661 	if (prev) {
4662 		tail = prev;
4663 	} else {
4664 		/* Really there should always be a prev */
4665 		if (m == NULL) {
4666 			/* Huh nothing left? */
4667 #ifdef INVARIANTS
4668 			panic("Nothing left to add?");
4669 #else
4670 			goto get_out;
4671 #endif
4672 		}
4673 		tail = m;
4674 	}
4675 	if (control->tail_mbuf) {
4676 		/* append */
4677 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4678 		control->tail_mbuf = tail;
4679 	} else {
4680 		/* nothing there */
4681 #ifdef INVARIANTS
4682 		if (control->data != NULL) {
4683 			panic("This should NOT happen");
4684 		}
4685 #endif
4686 		control->data = m;
4687 		control->tail_mbuf = tail;
4688 	}
4689 	atomic_add_int(&control->length, len);
4690 	if (end) {
4691 		/* message is complete */
4692 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4693 			stcb->asoc.control_pdapi = NULL;
4694 		}
4695 		control->held_length = 0;
4696 		control->end_added = 1;
4697 	}
4698 	if (stcb == NULL) {
4699 		control->do_not_ref_stcb = 1;
4700 	}
4701 	/*
4702 	 * When we are appending in partial delivery, the cum-ack is used
4703 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4704 	 * is populated in the outbound sinfo structure from the true cumack
4705 	 * if the association exists...
4706 	 */
4707 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4708 	if (inp) {
4709 		SCTP_INP_READ_UNLOCK(inp);
4710 	}
4711 	if (inp && inp->sctp_socket) {
4712 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4713 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4714 		} else {
4715 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4716 			struct socket *so;
4717 
4718 			so = SCTP_INP_SO(inp);
4719 			if (stcb) {
4720 				atomic_add_int(&stcb->asoc.refcnt, 1);
4721 				SCTP_TCB_UNLOCK(stcb);
4722 			}
4723 			SCTP_SOCKET_LOCK(so, 1);
4724 			if (stcb) {
4725 				SCTP_TCB_LOCK(stcb);
4726 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4727 			}
4728 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4729 				SCTP_SOCKET_UNLOCK(so, 1);
4730 				return (0);
4731 			}
4732 #endif
4733 			sctp_sorwakeup(inp, inp->sctp_socket);
4734 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4735 			SCTP_SOCKET_UNLOCK(so, 1);
4736 #endif
4737 		}
4738 	}
4739 	return (0);
4740 }
4741 
4742 
4743 
4744 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4745  *************ALTERNATE ROUTING CODE
4746  */
4747 
4748 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4749  *************ALTERNATE ROUTING CODE
4750  */
4751 
4752 struct mbuf *
4753 sctp_generate_cause(uint16_t code, char *info)
4754 {
4755 	struct mbuf *m;
4756 	struct sctp_gen_error_cause *cause;
4757 	size_t info_len;
4758 	uint16_t len;
4759 
4760 	if ((code == 0) || (info == NULL)) {
4761 		return (NULL);
4762 	}
4763 	info_len = strlen(info);
4764 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4765 		return (NULL);
4766 	}
4767 	len = (uint16_t) (sizeof(struct sctp_paramhdr) + info_len);
4768 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4769 	if (m != NULL) {
4770 		SCTP_BUF_LEN(m) = len;
4771 		cause = mtod(m, struct sctp_gen_error_cause *);
4772 		cause->code = htons(code);
4773 		cause->length = htons(len);
4774 		memcpy(cause->info, info, info_len);
4775 	}
4776 	return (m);
4777 }
4778 
4779 struct mbuf *
4780 sctp_generate_no_user_data_cause(uint32_t tsn)
4781 {
4782 	struct mbuf *m;
4783 	struct sctp_error_no_user_data *no_user_data_cause;
4784 	uint16_t len;
4785 
4786 	len = (uint16_t) sizeof(struct sctp_error_no_user_data);
4787 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4788 	if (m != NULL) {
4789 		SCTP_BUF_LEN(m) = len;
4790 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4791 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4792 		no_user_data_cause->cause.length = htons(len);
4793 		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4794 	}
4795 	return (m);
4796 }
4797 
4798 #ifdef SCTP_MBCNT_LOGGING
4799 void
4800 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4801     struct sctp_tmit_chunk *tp1, int chk_cnt)
4802 {
4803 	if (tp1->data == NULL) {
4804 		return;
4805 	}
4806 	asoc->chunks_on_out_queue -= chk_cnt;
4807 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4808 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4809 		    asoc->total_output_queue_size,
4810 		    tp1->book_size,
4811 		    0,
4812 		    tp1->mbcnt);
4813 	}
4814 	if (asoc->total_output_queue_size >= tp1->book_size) {
4815 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4816 	} else {
4817 		asoc->total_output_queue_size = 0;
4818 	}
4819 
4820 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4821 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4822 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4823 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4824 		} else {
4825 			stcb->sctp_socket->so_snd.sb_cc = 0;
4826 
4827 		}
4828 	}
4829 }
4830 
4831 #endif
4832 
4833 int
4834 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4835     uint8_t sent, int so_locked
4836 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4837     SCTP_UNUSED
4838 #endif
4839 )
4840 {
4841 	struct sctp_stream_out *strq;
4842 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4843 	struct sctp_stream_queue_pending *sp;
4844 	uint16_t stream = 0, seq = 0;
4845 	uint8_t foundeom = 0;
4846 	int ret_sz = 0;
4847 	int notdone;
4848 	int do_wakeup_routine = 0;
4849 
4850 	stream = tp1->rec.data.stream_number;
4851 	seq = tp1->rec.data.stream_seq;
4852 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4853 		stcb->asoc.abandoned_sent[0]++;
4854 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4855 		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4856 #if defined(SCTP_DETAILED_STR_STATS)
4857 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4858 #endif
4859 	} else {
4860 		stcb->asoc.abandoned_unsent[0]++;
4861 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4862 		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4863 #if defined(SCTP_DETAILED_STR_STATS)
4864 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4865 #endif
4866 	}
4867 	do {
4868 		ret_sz += tp1->book_size;
4869 		if (tp1->data != NULL) {
4870 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4871 				sctp_flight_size_decrease(tp1);
4872 				sctp_total_flight_decrease(stcb, tp1);
4873 			}
4874 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4875 			stcb->asoc.peers_rwnd += tp1->send_size;
4876 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4877 			if (sent) {
4878 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4879 			} else {
4880 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4881 			}
4882 			if (tp1->data) {
4883 				sctp_m_freem(tp1->data);
4884 				tp1->data = NULL;
4885 			}
4886 			do_wakeup_routine = 1;
4887 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4888 				stcb->asoc.sent_queue_cnt_removeable--;
4889 			}
4890 		}
4891 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4892 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4893 		    SCTP_DATA_NOT_FRAG) {
4894 			/* not frag'ed we ae done   */
4895 			notdone = 0;
4896 			foundeom = 1;
4897 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4898 			/* end of frag, we are done */
4899 			notdone = 0;
4900 			foundeom = 1;
4901 		} else {
4902 			/*
4903 			 * Its a begin or middle piece, we must mark all of
4904 			 * it
4905 			 */
4906 			notdone = 1;
4907 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4908 		}
4909 	} while (tp1 && notdone);
4910 	if (foundeom == 0) {
4911 		/*
4912 		 * The multi-part message was scattered across the send and
4913 		 * sent queue.
4914 		 */
4915 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4916 			if ((tp1->rec.data.stream_number != stream) ||
4917 			    (tp1->rec.data.stream_seq != seq)) {
4918 				break;
4919 			}
4920 			/*
4921 			 * save to chk in case we have some on stream out
4922 			 * queue. If so and we have an un-transmitted one we
4923 			 * don't have to fudge the TSN.
4924 			 */
4925 			chk = tp1;
4926 			ret_sz += tp1->book_size;
4927 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4928 			if (sent) {
4929 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4930 			} else {
4931 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4932 			}
4933 			if (tp1->data) {
4934 				sctp_m_freem(tp1->data);
4935 				tp1->data = NULL;
4936 			}
4937 			/* No flight involved here book the size to 0 */
4938 			tp1->book_size = 0;
4939 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4940 				foundeom = 1;
4941 			}
4942 			do_wakeup_routine = 1;
4943 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4944 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4945 			/*
4946 			 * on to the sent queue so we can wait for it to be
4947 			 * passed by.
4948 			 */
4949 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4950 			    sctp_next);
4951 			stcb->asoc.send_queue_cnt--;
4952 			stcb->asoc.sent_queue_cnt++;
4953 		}
4954 	}
4955 	if (foundeom == 0) {
4956 		/*
4957 		 * Still no eom found. That means there is stuff left on the
4958 		 * stream out queue.. yuck.
4959 		 */
4960 		SCTP_TCB_SEND_LOCK(stcb);
4961 		strq = &stcb->asoc.strmout[stream];
4962 		sp = TAILQ_FIRST(&strq->outqueue);
4963 		if (sp != NULL) {
4964 			sp->discard_rest = 1;
4965 			/*
4966 			 * We may need to put a chunk on the queue that
4967 			 * holds the TSN that would have been sent with the
4968 			 * LAST bit.
4969 			 */
4970 			if (chk == NULL) {
4971 				/* Yep, we have to */
4972 				sctp_alloc_a_chunk(stcb, chk);
4973 				if (chk == NULL) {
4974 					/*
4975 					 * we are hosed. All we can do is
4976 					 * nothing.. which will cause an
4977 					 * abort if the peer is paying
4978 					 * attention.
4979 					 */
4980 					goto oh_well;
4981 				}
4982 				memset(chk, 0, sizeof(*chk));
4983 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4984 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4985 				chk->asoc = &stcb->asoc;
4986 				chk->rec.data.stream_seq = strq->next_sequence_send;
4987 				chk->rec.data.stream_number = sp->stream;
4988 				chk->rec.data.payloadtype = sp->ppid;
4989 				chk->rec.data.context = sp->context;
4990 				chk->flags = sp->act_flags;
4991 				chk->whoTo = NULL;
4992 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4993 				strq->chunks_on_queues++;
4994 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4995 				stcb->asoc.sent_queue_cnt++;
4996 				stcb->asoc.pr_sctp_cnt++;
4997 			} else {
4998 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4999 			}
5000 			strq->next_sequence_send++;
5001 	oh_well:
5002 			if (sp->data) {
5003 				/*
5004 				 * Pull any data to free up the SB and allow
5005 				 * sender to "add more" while we will throw
5006 				 * away :-)
5007 				 */
5008 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5009 				ret_sz += sp->length;
5010 				do_wakeup_routine = 1;
5011 				sp->some_taken = 1;
5012 				sctp_m_freem(sp->data);
5013 				sp->data = NULL;
5014 				sp->tail_mbuf = NULL;
5015 				sp->length = 0;
5016 			}
5017 		}
5018 		SCTP_TCB_SEND_UNLOCK(stcb);
5019 	}
5020 	if (do_wakeup_routine) {
5021 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5022 		struct socket *so;
5023 
5024 		so = SCTP_INP_SO(stcb->sctp_ep);
5025 		if (!so_locked) {
5026 			atomic_add_int(&stcb->asoc.refcnt, 1);
5027 			SCTP_TCB_UNLOCK(stcb);
5028 			SCTP_SOCKET_LOCK(so, 1);
5029 			SCTP_TCB_LOCK(stcb);
5030 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5031 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5032 				/* assoc was freed while we were unlocked */
5033 				SCTP_SOCKET_UNLOCK(so, 1);
5034 				return (ret_sz);
5035 			}
5036 		}
5037 #endif
5038 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5039 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5040 		if (!so_locked) {
5041 			SCTP_SOCKET_UNLOCK(so, 1);
5042 		}
5043 #endif
5044 	}
5045 	return (ret_sz);
5046 }
5047 
5048 /*
5049  * checks to see if the given address, sa, is one that is currently known by
5050  * the kernel note: can't distinguish the same address on multiple interfaces
5051  * and doesn't handle multiple addresses with different zone/scope id's note:
5052  * ifa_ifwithaddr() compares the entire sockaddr struct
5053  */
5054 struct sctp_ifa *
5055 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5056     int holds_lock)
5057 {
5058 	struct sctp_laddr *laddr;
5059 
5060 	if (holds_lock == 0) {
5061 		SCTP_INP_RLOCK(inp);
5062 	}
5063 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5064 		if (laddr->ifa == NULL)
5065 			continue;
5066 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5067 			continue;
5068 #ifdef INET
5069 		if (addr->sa_family == AF_INET) {
5070 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5071 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5072 				/* found him. */
5073 				if (holds_lock == 0) {
5074 					SCTP_INP_RUNLOCK(inp);
5075 				}
5076 				return (laddr->ifa);
5077 				break;
5078 			}
5079 		}
5080 #endif
5081 #ifdef INET6
5082 		if (addr->sa_family == AF_INET6) {
5083 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5084 			    &laddr->ifa->address.sin6)) {
5085 				/* found him. */
5086 				if (holds_lock == 0) {
5087 					SCTP_INP_RUNLOCK(inp);
5088 				}
5089 				return (laddr->ifa);
5090 				break;
5091 			}
5092 		}
5093 #endif
5094 	}
5095 	if (holds_lock == 0) {
5096 		SCTP_INP_RUNLOCK(inp);
5097 	}
5098 	return (NULL);
5099 }
5100 
5101 uint32_t
5102 sctp_get_ifa_hash_val(struct sockaddr *addr)
5103 {
5104 	switch (addr->sa_family) {
5105 #ifdef INET
5106 	case AF_INET:
5107 		{
5108 			struct sockaddr_in *sin;
5109 
5110 			sin = (struct sockaddr_in *)addr;
5111 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5112 		}
5113 #endif
5114 #ifdef INET6
5115 	case AF_INET6:
5116 		{
5117 			struct sockaddr_in6 *sin6;
5118 			uint32_t hash_of_addr;
5119 
5120 			sin6 = (struct sockaddr_in6 *)addr;
5121 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5122 			    sin6->sin6_addr.s6_addr32[1] +
5123 			    sin6->sin6_addr.s6_addr32[2] +
5124 			    sin6->sin6_addr.s6_addr32[3]);
5125 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5126 			return (hash_of_addr);
5127 		}
5128 #endif
5129 	default:
5130 		break;
5131 	}
5132 	return (0);
5133 }
5134 
5135 struct sctp_ifa *
5136 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5137 {
5138 	struct sctp_ifa *sctp_ifap;
5139 	struct sctp_vrf *vrf;
5140 	struct sctp_ifalist *hash_head;
5141 	uint32_t hash_of_addr;
5142 
5143 	if (holds_lock == 0)
5144 		SCTP_IPI_ADDR_RLOCK();
5145 
5146 	vrf = sctp_find_vrf(vrf_id);
5147 	if (vrf == NULL) {
5148 		if (holds_lock == 0)
5149 			SCTP_IPI_ADDR_RUNLOCK();
5150 		return (NULL);
5151 	}
5152 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5153 
5154 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5155 	if (hash_head == NULL) {
5156 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5157 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5158 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5159 		sctp_print_address(addr);
5160 		SCTP_PRINTF("No such bucket for address\n");
5161 		if (holds_lock == 0)
5162 			SCTP_IPI_ADDR_RUNLOCK();
5163 
5164 		return (NULL);
5165 	}
5166 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5167 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5168 			continue;
5169 #ifdef INET
5170 		if (addr->sa_family == AF_INET) {
5171 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5172 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5173 				/* found him. */
5174 				if (holds_lock == 0)
5175 					SCTP_IPI_ADDR_RUNLOCK();
5176 				return (sctp_ifap);
5177 				break;
5178 			}
5179 		}
5180 #endif
5181 #ifdef INET6
5182 		if (addr->sa_family == AF_INET6) {
5183 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5184 			    &sctp_ifap->address.sin6)) {
5185 				/* found him. */
5186 				if (holds_lock == 0)
5187 					SCTP_IPI_ADDR_RUNLOCK();
5188 				return (sctp_ifap);
5189 				break;
5190 			}
5191 		}
5192 #endif
5193 	}
5194 	if (holds_lock == 0)
5195 		SCTP_IPI_ADDR_RUNLOCK();
5196 	return (NULL);
5197 }
5198 
5199 static void
5200 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5201     uint32_t rwnd_req)
5202 {
5203 	/* User pulled some data, do we need a rwnd update? */
5204 	int r_unlocked = 0;
5205 	uint32_t dif, rwnd;
5206 	struct socket *so = NULL;
5207 
5208 	if (stcb == NULL)
5209 		return;
5210 
5211 	atomic_add_int(&stcb->asoc.refcnt, 1);
5212 
5213 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5214 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5215 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5216 		/* Pre-check If we are freeing no update */
5217 		goto no_lock;
5218 	}
5219 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5220 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5221 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5222 		goto out;
5223 	}
5224 	so = stcb->sctp_socket;
5225 	if (so == NULL) {
5226 		goto out;
5227 	}
5228 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5229 	/* Have you have freed enough to look */
5230 	*freed_so_far = 0;
5231 	/* Yep, its worth a look and the lock overhead */
5232 
5233 	/* Figure out what the rwnd would be */
5234 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5235 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5236 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5237 	} else {
5238 		dif = 0;
5239 	}
5240 	if (dif >= rwnd_req) {
5241 		if (hold_rlock) {
5242 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5243 			r_unlocked = 1;
5244 		}
5245 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5246 			/*
5247 			 * One last check before we allow the guy possibly
5248 			 * to get in. There is a race, where the guy has not
5249 			 * reached the gate. In that case
5250 			 */
5251 			goto out;
5252 		}
5253 		SCTP_TCB_LOCK(stcb);
5254 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5255 			/* No reports here */
5256 			SCTP_TCB_UNLOCK(stcb);
5257 			goto out;
5258 		}
5259 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5260 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5261 
5262 		sctp_chunk_output(stcb->sctp_ep, stcb,
5263 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5264 		/* make sure no timer is running */
5265 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5266 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5267 		SCTP_TCB_UNLOCK(stcb);
5268 	} else {
5269 		/* Update how much we have pending */
5270 		stcb->freed_by_sorcv_sincelast = dif;
5271 	}
5272 out:
5273 	if (so && r_unlocked && hold_rlock) {
5274 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5275 	}
5276 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5277 no_lock:
5278 	atomic_add_int(&stcb->asoc.refcnt, -1);
5279 	return;
5280 }
5281 
5282 int
5283 sctp_sorecvmsg(struct socket *so,
5284     struct uio *uio,
5285     struct mbuf **mp,
5286     struct sockaddr *from,
5287     int fromlen,
5288     int *msg_flags,
5289     struct sctp_sndrcvinfo *sinfo,
5290     int filling_sinfo)
5291 {
5292 	/*
5293 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5294 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5295 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5296 	 * On the way out we may send out any combination of:
5297 	 * MSG_NOTIFICATION MSG_EOR
5298 	 *
5299 	 */
5300 	struct sctp_inpcb *inp = NULL;
5301 	int my_len = 0;
5302 	int cp_len = 0, error = 0;
5303 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5304 	struct mbuf *m = NULL;
5305 	struct sctp_tcb *stcb = NULL;
5306 	int wakeup_read_socket = 0;
5307 	int freecnt_applied = 0;
5308 	int out_flags = 0, in_flags = 0;
5309 	int block_allowed = 1;
5310 	uint32_t freed_so_far = 0;
5311 	uint32_t copied_so_far = 0;
5312 	int in_eeor_mode = 0;
5313 	int no_rcv_needed = 0;
5314 	uint32_t rwnd_req = 0;
5315 	int hold_sblock = 0;
5316 	int hold_rlock = 0;
5317 	ssize_t slen = 0;
5318 	uint32_t held_length = 0;
5319 	int sockbuf_lock = 0;
5320 
5321 	if (uio == NULL) {
5322 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5323 		return (EINVAL);
5324 	}
5325 	if (msg_flags) {
5326 		in_flags = *msg_flags;
5327 		if (in_flags & MSG_PEEK)
5328 			SCTP_STAT_INCR(sctps_read_peeks);
5329 	} else {
5330 		in_flags = 0;
5331 	}
5332 	slen = uio->uio_resid;
5333 
5334 	/* Pull in and set up our int flags */
5335 	if (in_flags & MSG_OOB) {
5336 		/* Out of band's NOT supported */
5337 		return (EOPNOTSUPP);
5338 	}
5339 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5340 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5341 		return (EINVAL);
5342 	}
5343 	if ((in_flags & (MSG_DONTWAIT
5344 	    | MSG_NBIO
5345 	    )) ||
5346 	    SCTP_SO_IS_NBIO(so)) {
5347 		block_allowed = 0;
5348 	}
5349 	/* setup the endpoint */
5350 	inp = (struct sctp_inpcb *)so->so_pcb;
5351 	if (inp == NULL) {
5352 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5353 		return (EFAULT);
5354 	}
5355 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5356 	/* Must be at least a MTU's worth */
5357 	if (rwnd_req < SCTP_MIN_RWND)
5358 		rwnd_req = SCTP_MIN_RWND;
5359 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5360 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5361 		sctp_misc_ints(SCTP_SORECV_ENTER,
5362 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5363 	}
5364 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5365 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5366 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t) uio->uio_resid);
5367 	}
5368 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5369 	if (error) {
5370 		goto release_unlocked;
5371 	}
5372 	sockbuf_lock = 1;
5373 restart:
5374 
5375 
5376 restart_nosblocks:
5377 	if (hold_sblock == 0) {
5378 		SOCKBUF_LOCK(&so->so_rcv);
5379 		hold_sblock = 1;
5380 	}
5381 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5382 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5383 		goto out;
5384 	}
5385 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5386 		if (so->so_error) {
5387 			error = so->so_error;
5388 			if ((in_flags & MSG_PEEK) == 0)
5389 				so->so_error = 0;
5390 			goto out;
5391 		} else {
5392 			if (so->so_rcv.sb_cc == 0) {
5393 				/* indicate EOF */
5394 				error = 0;
5395 				goto out;
5396 			}
5397 		}
5398 	}
5399 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5400 		/* we need to wait for data */
5401 		if ((so->so_rcv.sb_cc == 0) &&
5402 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5403 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5404 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5405 				/*
5406 				 * For active open side clear flags for
5407 				 * re-use passive open is blocked by
5408 				 * connect.
5409 				 */
5410 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5411 					/*
5412 					 * You were aborted, passive side
5413 					 * always hits here
5414 					 */
5415 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5416 					error = ECONNRESET;
5417 				}
5418 				so->so_state &= ~(SS_ISCONNECTING |
5419 				    SS_ISDISCONNECTING |
5420 				    SS_ISCONFIRMING |
5421 				    SS_ISCONNECTED);
5422 				if (error == 0) {
5423 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5424 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5425 						error = ENOTCONN;
5426 					}
5427 				}
5428 				goto out;
5429 			}
5430 		}
5431 		error = sbwait(&so->so_rcv);
5432 		if (error) {
5433 			goto out;
5434 		}
5435 		held_length = 0;
5436 		goto restart_nosblocks;
5437 	} else if (so->so_rcv.sb_cc == 0) {
5438 		if (so->so_error) {
5439 			error = so->so_error;
5440 			if ((in_flags & MSG_PEEK) == 0)
5441 				so->so_error = 0;
5442 		} else {
5443 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5444 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5445 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5446 					/*
5447 					 * For active open side clear flags
5448 					 * for re-use passive open is
5449 					 * blocked by connect.
5450 					 */
5451 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5452 						/*
5453 						 * You were aborted, passive
5454 						 * side always hits here
5455 						 */
5456 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5457 						error = ECONNRESET;
5458 					}
5459 					so->so_state &= ~(SS_ISCONNECTING |
5460 					    SS_ISDISCONNECTING |
5461 					    SS_ISCONFIRMING |
5462 					    SS_ISCONNECTED);
5463 					if (error == 0) {
5464 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5465 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5466 							error = ENOTCONN;
5467 						}
5468 					}
5469 					goto out;
5470 				}
5471 			}
5472 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5473 			error = EWOULDBLOCK;
5474 		}
5475 		goto out;
5476 	}
5477 	if (hold_sblock == 1) {
5478 		SOCKBUF_UNLOCK(&so->so_rcv);
5479 		hold_sblock = 0;
5480 	}
5481 	/* we possibly have data we can read */
5482 	/* sa_ignore FREED_MEMORY */
5483 	control = TAILQ_FIRST(&inp->read_queue);
5484 	if (control == NULL) {
5485 		/*
5486 		 * This could be happening since the appender did the
5487 		 * increment but as not yet did the tailq insert onto the
5488 		 * read_queue
5489 		 */
5490 		if (hold_rlock == 0) {
5491 			SCTP_INP_READ_LOCK(inp);
5492 		}
5493 		control = TAILQ_FIRST(&inp->read_queue);
5494 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5495 #ifdef INVARIANTS
5496 			panic("Huh, its non zero and nothing on control?");
5497 #endif
5498 			so->so_rcv.sb_cc = 0;
5499 		}
5500 		SCTP_INP_READ_UNLOCK(inp);
5501 		hold_rlock = 0;
5502 		goto restart;
5503 	}
5504 	if ((control->length == 0) &&
5505 	    (control->do_not_ref_stcb)) {
5506 		/*
5507 		 * Clean up code for freeing assoc that left behind a
5508 		 * pdapi.. maybe a peer in EEOR that just closed after
5509 		 * sending and never indicated a EOR.
5510 		 */
5511 		if (hold_rlock == 0) {
5512 			hold_rlock = 1;
5513 			SCTP_INP_READ_LOCK(inp);
5514 		}
5515 		control->held_length = 0;
5516 		if (control->data) {
5517 			/* Hmm there is data here .. fix */
5518 			struct mbuf *m_tmp;
5519 			int cnt = 0;
5520 
5521 			m_tmp = control->data;
5522 			while (m_tmp) {
5523 				cnt += SCTP_BUF_LEN(m_tmp);
5524 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5525 					control->tail_mbuf = m_tmp;
5526 					control->end_added = 1;
5527 				}
5528 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5529 			}
5530 			control->length = cnt;
5531 		} else {
5532 			/* remove it */
5533 			TAILQ_REMOVE(&inp->read_queue, control, next);
5534 			/* Add back any hiddend data */
5535 			sctp_free_remote_addr(control->whoFrom);
5536 			sctp_free_a_readq(stcb, control);
5537 		}
5538 		if (hold_rlock) {
5539 			hold_rlock = 0;
5540 			SCTP_INP_READ_UNLOCK(inp);
5541 		}
5542 		goto restart;
5543 	}
5544 	if ((control->length == 0) &&
5545 	    (control->end_added == 1)) {
5546 		/*
5547 		 * Do we also need to check for (control->pdapi_aborted ==
5548 		 * 1)?
5549 		 */
5550 		if (hold_rlock == 0) {
5551 			hold_rlock = 1;
5552 			SCTP_INP_READ_LOCK(inp);
5553 		}
5554 		TAILQ_REMOVE(&inp->read_queue, control, next);
5555 		if (control->data) {
5556 #ifdef INVARIANTS
5557 			panic("control->data not null but control->length == 0");
5558 #else
5559 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5560 			sctp_m_freem(control->data);
5561 			control->data = NULL;
5562 #endif
5563 		}
5564 		if (control->aux_data) {
5565 			sctp_m_free(control->aux_data);
5566 			control->aux_data = NULL;
5567 		}
5568 		if (control->on_strm_q) {
5569 			panic("About to free ctl:%p so:%p and its in %d",
5570 			    control, so, control->on_strm_q);
5571 		}
5572 		sctp_free_remote_addr(control->whoFrom);
5573 		sctp_free_a_readq(stcb, control);
5574 		if (hold_rlock) {
5575 			hold_rlock = 0;
5576 			SCTP_INP_READ_UNLOCK(inp);
5577 		}
5578 		goto restart;
5579 	}
5580 	if (control->length == 0) {
5581 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5582 		    (filling_sinfo)) {
5583 			/* find a more suitable one then this */
5584 			ctl = TAILQ_NEXT(control, next);
5585 			while (ctl) {
5586 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5587 				    (ctl->some_taken ||
5588 				    (ctl->spec_flags & M_NOTIFICATION) ||
5589 				    ((ctl->do_not_ref_stcb == 0) &&
5590 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5591 				    ) {
5592 					/*-
5593 					 * If we have a different TCB next, and there is data
5594 					 * present. If we have already taken some (pdapi), OR we can
5595 					 * ref the tcb and no delivery as started on this stream, we
5596 					 * take it. Note we allow a notification on a different
5597 					 * assoc to be delivered..
5598 					 */
5599 					control = ctl;
5600 					goto found_one;
5601 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5602 					    (ctl->length) &&
5603 					    ((ctl->some_taken) ||
5604 					    ((ctl->do_not_ref_stcb == 0) &&
5605 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5606 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5607 					/*-
5608 					 * If we have the same tcb, and there is data present, and we
5609 					 * have the strm interleave feature present. Then if we have
5610 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5611 					 * not started a delivery for this stream, we can take it.
5612 					 * Note we do NOT allow a notificaiton on the same assoc to
5613 					 * be delivered.
5614 					 */
5615 					control = ctl;
5616 					goto found_one;
5617 				}
5618 				ctl = TAILQ_NEXT(ctl, next);
5619 			}
5620 		}
5621 		/*
5622 		 * if we reach here, not suitable replacement is available
5623 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5624 		 * into the our held count, and its time to sleep again.
5625 		 */
5626 		held_length = so->so_rcv.sb_cc;
5627 		control->held_length = so->so_rcv.sb_cc;
5628 		goto restart;
5629 	}
5630 	/* Clear the held length since there is something to read */
5631 	control->held_length = 0;
5632 	if (hold_rlock) {
5633 		SCTP_INP_READ_UNLOCK(inp);
5634 		hold_rlock = 0;
5635 	}
5636 found_one:
5637 	/*
5638 	 * If we reach here, control has a some data for us to read off.
5639 	 * Note that stcb COULD be NULL.
5640 	 */
5641 	control->some_taken++;
5642 	if (hold_sblock) {
5643 		SOCKBUF_UNLOCK(&so->so_rcv);
5644 		hold_sblock = 0;
5645 	}
5646 	stcb = control->stcb;
5647 	if (stcb) {
5648 		if ((control->do_not_ref_stcb == 0) &&
5649 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5650 			if (freecnt_applied == 0)
5651 				stcb = NULL;
5652 		} else if (control->do_not_ref_stcb == 0) {
5653 			/* you can't free it on me please */
5654 			/*
5655 			 * The lock on the socket buffer protects us so the
5656 			 * free code will stop. But since we used the
5657 			 * socketbuf lock and the sender uses the tcb_lock
5658 			 * to increment, we need to use the atomic add to
5659 			 * the refcnt
5660 			 */
5661 			if (freecnt_applied) {
5662 #ifdef INVARIANTS
5663 				panic("refcnt already incremented");
5664 #else
5665 				SCTP_PRINTF("refcnt already incremented?\n");
5666 #endif
5667 			} else {
5668 				atomic_add_int(&stcb->asoc.refcnt, 1);
5669 				freecnt_applied = 1;
5670 			}
5671 			/*
5672 			 * Setup to remember how much we have not yet told
5673 			 * the peer our rwnd has opened up. Note we grab the
5674 			 * value from the tcb from last time. Note too that
5675 			 * sack sending clears this when a sack is sent,
5676 			 * which is fine. Once we hit the rwnd_req, we then
5677 			 * will go to the sctp_user_rcvd() that will not
5678 			 * lock until it KNOWs it MUST send a WUP-SACK.
5679 			 */
5680 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5681 			stcb->freed_by_sorcv_sincelast = 0;
5682 		}
5683 	}
5684 	if (stcb &&
5685 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5686 	    control->do_not_ref_stcb == 0) {
5687 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5688 	}
5689 	/* First lets get off the sinfo and sockaddr info */
5690 	if ((sinfo) && filling_sinfo) {
5691 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5692 		nxt = TAILQ_NEXT(control, next);
5693 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5694 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5695 			struct sctp_extrcvinfo *s_extra;
5696 
5697 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5698 			if ((nxt) &&
5699 			    (nxt->length)) {
5700 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5701 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5702 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5703 				}
5704 				if (nxt->spec_flags & M_NOTIFICATION) {
5705 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5706 				}
5707 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5708 				s_extra->serinfo_next_length = nxt->length;
5709 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5710 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5711 				if (nxt->tail_mbuf != NULL) {
5712 					if (nxt->end_added) {
5713 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5714 					}
5715 				}
5716 			} else {
5717 				/*
5718 				 * we explicitly 0 this, since the memcpy
5719 				 * got some other things beyond the older
5720 				 * sinfo_ that is on the control's structure
5721 				 * :-D
5722 				 */
5723 				nxt = NULL;
5724 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5725 				s_extra->serinfo_next_aid = 0;
5726 				s_extra->serinfo_next_length = 0;
5727 				s_extra->serinfo_next_ppid = 0;
5728 				s_extra->serinfo_next_stream = 0;
5729 			}
5730 		}
5731 		/*
5732 		 * update off the real current cum-ack, if we have an stcb.
5733 		 */
5734 		if ((control->do_not_ref_stcb == 0) && stcb)
5735 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5736 		/*
5737 		 * mask off the high bits, we keep the actual chunk bits in
5738 		 * there.
5739 		 */
5740 		sinfo->sinfo_flags &= 0x00ff;
5741 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5742 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5743 		}
5744 	}
5745 #ifdef SCTP_ASOCLOG_OF_TSNS
5746 	{
5747 		int index, newindex;
5748 		struct sctp_pcbtsn_rlog *entry;
5749 
5750 		do {
5751 			index = inp->readlog_index;
5752 			newindex = index + 1;
5753 			if (newindex >= SCTP_READ_LOG_SIZE) {
5754 				newindex = 0;
5755 			}
5756 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5757 		entry = &inp->readlog[index];
5758 		entry->vtag = control->sinfo_assoc_id;
5759 		entry->strm = control->sinfo_stream;
5760 		entry->seq = control->sinfo_ssn;
5761 		entry->sz = control->length;
5762 		entry->flgs = control->sinfo_flags;
5763 	}
5764 #endif
5765 	if ((fromlen > 0) && (from != NULL)) {
5766 		union sctp_sockstore store;
5767 		size_t len;
5768 
5769 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5770 #ifdef INET6
5771 		case AF_INET6:
5772 			len = sizeof(struct sockaddr_in6);
5773 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5774 			store.sin6.sin6_port = control->port_from;
5775 			break;
5776 #endif
5777 #ifdef INET
5778 		case AF_INET:
5779 #ifdef INET6
5780 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5781 				len = sizeof(struct sockaddr_in6);
5782 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5783 				    &store.sin6);
5784 				store.sin6.sin6_port = control->port_from;
5785 			} else {
5786 				len = sizeof(struct sockaddr_in);
5787 				store.sin = control->whoFrom->ro._l_addr.sin;
5788 				store.sin.sin_port = control->port_from;
5789 			}
5790 #else
5791 			len = sizeof(struct sockaddr_in);
5792 			store.sin = control->whoFrom->ro._l_addr.sin;
5793 			store.sin.sin_port = control->port_from;
5794 #endif
5795 			break;
5796 #endif
5797 		default:
5798 			len = 0;
5799 			break;
5800 		}
5801 		memcpy(from, &store, min((size_t)fromlen, len));
5802 #ifdef INET6
5803 		{
5804 			struct sockaddr_in6 lsa6, *from6;
5805 
5806 			from6 = (struct sockaddr_in6 *)from;
5807 			sctp_recover_scope_mac(from6, (&lsa6));
5808 		}
5809 #endif
5810 	}
5811 	/* now copy out what data we can */
5812 	if (mp == NULL) {
5813 		/* copy out each mbuf in the chain up to length */
5814 get_more_data:
5815 		m = control->data;
5816 		while (m) {
5817 			/* Move out all we can */
5818 			cp_len = (int)uio->uio_resid;
5819 			my_len = (int)SCTP_BUF_LEN(m);
5820 			if (cp_len > my_len) {
5821 				/* not enough in this buf */
5822 				cp_len = my_len;
5823 			}
5824 			if (hold_rlock) {
5825 				SCTP_INP_READ_UNLOCK(inp);
5826 				hold_rlock = 0;
5827 			}
5828 			if (cp_len > 0)
5829 				error = uiomove(mtod(m, char *), cp_len, uio);
5830 			/* re-read */
5831 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5832 				goto release;
5833 			}
5834 			if ((control->do_not_ref_stcb == 0) && stcb &&
5835 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5836 				no_rcv_needed = 1;
5837 			}
5838 			if (error) {
5839 				/* error we are out of here */
5840 				goto release;
5841 			}
5842 			SCTP_INP_READ_LOCK(inp);
5843 			hold_rlock = 1;
5844 			if (cp_len == SCTP_BUF_LEN(m)) {
5845 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5846 				    (control->end_added)) {
5847 					out_flags |= MSG_EOR;
5848 					if ((control->do_not_ref_stcb == 0) &&
5849 					    (control->stcb != NULL) &&
5850 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5851 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5852 				}
5853 				if (control->spec_flags & M_NOTIFICATION) {
5854 					out_flags |= MSG_NOTIFICATION;
5855 				}
5856 				/* we ate up the mbuf */
5857 				if (in_flags & MSG_PEEK) {
5858 					/* just looking */
5859 					m = SCTP_BUF_NEXT(m);
5860 					copied_so_far += cp_len;
5861 				} else {
5862 					/* dispose of the mbuf */
5863 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5864 						sctp_sblog(&so->so_rcv,
5865 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5866 					}
5867 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5868 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5869 						sctp_sblog(&so->so_rcv,
5870 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5871 					}
5872 					copied_so_far += cp_len;
5873 					freed_so_far += cp_len;
5874 					freed_so_far += MSIZE;
5875 					atomic_subtract_int(&control->length, cp_len);
5876 					control->data = sctp_m_free(m);
5877 					m = control->data;
5878 					/*
5879 					 * been through it all, must hold sb
5880 					 * lock ok to null tail
5881 					 */
5882 					if (control->data == NULL) {
5883 #ifdef INVARIANTS
5884 						if ((control->end_added == 0) ||
5885 						    (TAILQ_NEXT(control, next) == NULL)) {
5886 							/*
5887 							 * If the end is not
5888 							 * added, OR the
5889 							 * next is NOT null
5890 							 * we MUST have the
5891 							 * lock.
5892 							 */
5893 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5894 								panic("Hmm we don't own the lock?");
5895 							}
5896 						}
5897 #endif
5898 						control->tail_mbuf = NULL;
5899 #ifdef INVARIANTS
5900 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5901 							panic("end_added, nothing left and no MSG_EOR");
5902 						}
5903 #endif
5904 					}
5905 				}
5906 			} else {
5907 				/* Do we need to trim the mbuf? */
5908 				if (control->spec_flags & M_NOTIFICATION) {
5909 					out_flags |= MSG_NOTIFICATION;
5910 				}
5911 				if ((in_flags & MSG_PEEK) == 0) {
5912 					SCTP_BUF_RESV_UF(m, cp_len);
5913 					SCTP_BUF_LEN(m) -= cp_len;
5914 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5915 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5916 					}
5917 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5918 					if ((control->do_not_ref_stcb == 0) &&
5919 					    stcb) {
5920 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5921 					}
5922 					copied_so_far += cp_len;
5923 					freed_so_far += cp_len;
5924 					freed_so_far += MSIZE;
5925 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5926 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5927 						    SCTP_LOG_SBRESULT, 0);
5928 					}
5929 					atomic_subtract_int(&control->length, cp_len);
5930 				} else {
5931 					copied_so_far += cp_len;
5932 				}
5933 			}
5934 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5935 				break;
5936 			}
5937 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5938 			    (control->do_not_ref_stcb == 0) &&
5939 			    (freed_so_far >= rwnd_req)) {
5940 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5941 			}
5942 		}		/* end while(m) */
5943 		/*
5944 		 * At this point we have looked at it all and we either have
5945 		 * a MSG_EOR/or read all the user wants... <OR>
5946 		 * control->length == 0.
5947 		 */
5948 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5949 			/* we are done with this control */
5950 			if (control->length == 0) {
5951 				if (control->data) {
5952 #ifdef INVARIANTS
5953 					panic("control->data not null at read eor?");
5954 #else
5955 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5956 					sctp_m_freem(control->data);
5957 					control->data = NULL;
5958 #endif
5959 				}
5960 		done_with_control:
5961 				if (hold_rlock == 0) {
5962 					SCTP_INP_READ_LOCK(inp);
5963 					hold_rlock = 1;
5964 				}
5965 				TAILQ_REMOVE(&inp->read_queue, control, next);
5966 				/* Add back any hiddend data */
5967 				if (control->held_length) {
5968 					held_length = 0;
5969 					control->held_length = 0;
5970 					wakeup_read_socket = 1;
5971 				}
5972 				if (control->aux_data) {
5973 					sctp_m_free(control->aux_data);
5974 					control->aux_data = NULL;
5975 				}
5976 				no_rcv_needed = control->do_not_ref_stcb;
5977 				sctp_free_remote_addr(control->whoFrom);
5978 				control->data = NULL;
5979 				if (control->on_strm_q) {
5980 					panic("About to free ctl:%p so:%p and its in %d",
5981 					    control, so, control->on_strm_q);
5982 				}
5983 				sctp_free_a_readq(stcb, control);
5984 				control = NULL;
5985 				if ((freed_so_far >= rwnd_req) &&
5986 				    (no_rcv_needed == 0))
5987 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5988 
5989 			} else {
5990 				/*
5991 				 * The user did not read all of this
5992 				 * message, turn off the returned MSG_EOR
5993 				 * since we are leaving more behind on the
5994 				 * control to read.
5995 				 */
5996 #ifdef INVARIANTS
5997 				if (control->end_added &&
5998 				    (control->data == NULL) &&
5999 				    (control->tail_mbuf == NULL)) {
6000 					panic("Gak, control->length is corrupt?");
6001 				}
6002 #endif
6003 				no_rcv_needed = control->do_not_ref_stcb;
6004 				out_flags &= ~MSG_EOR;
6005 			}
6006 		}
6007 		if (out_flags & MSG_EOR) {
6008 			goto release;
6009 		}
6010 		if ((uio->uio_resid == 0) ||
6011 		    ((in_eeor_mode) &&
6012 		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
6013 			goto release;
6014 		}
6015 		/*
6016 		 * If I hit here the receiver wants more and this message is
6017 		 * NOT done (pd-api). So two questions. Can we block? if not
6018 		 * we are done. Did the user NOT set MSG_WAITALL?
6019 		 */
6020 		if (block_allowed == 0) {
6021 			goto release;
6022 		}
6023 		/*
6024 		 * We need to wait for more data a few things: - We don't
6025 		 * sbunlock() so we don't get someone else reading. - We
6026 		 * must be sure to account for the case where what is added
6027 		 * is NOT to our control when we wakeup.
6028 		 */
6029 
6030 		/*
6031 		 * Do we need to tell the transport a rwnd update might be
6032 		 * needed before we go to sleep?
6033 		 */
6034 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6035 		    ((freed_so_far >= rwnd_req) &&
6036 		    (control->do_not_ref_stcb == 0) &&
6037 		    (no_rcv_needed == 0))) {
6038 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6039 		}
6040 wait_some_more:
6041 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6042 			goto release;
6043 		}
6044 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6045 			goto release;
6046 
6047 		if (hold_rlock == 1) {
6048 			SCTP_INP_READ_UNLOCK(inp);
6049 			hold_rlock = 0;
6050 		}
6051 		if (hold_sblock == 0) {
6052 			SOCKBUF_LOCK(&so->so_rcv);
6053 			hold_sblock = 1;
6054 		}
6055 		if ((copied_so_far) && (control->length == 0) &&
6056 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6057 			goto release;
6058 		}
6059 		if (so->so_rcv.sb_cc <= control->held_length) {
6060 			error = sbwait(&so->so_rcv);
6061 			if (error) {
6062 				goto release;
6063 			}
6064 			control->held_length = 0;
6065 		}
6066 		if (hold_sblock) {
6067 			SOCKBUF_UNLOCK(&so->so_rcv);
6068 			hold_sblock = 0;
6069 		}
6070 		if (control->length == 0) {
6071 			/* still nothing here */
6072 			if (control->end_added == 1) {
6073 				/* he aborted, or is done i.e.did a shutdown */
6074 				out_flags |= MSG_EOR;
6075 				if (control->pdapi_aborted) {
6076 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6077 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6078 
6079 					out_flags |= MSG_TRUNC;
6080 				} else {
6081 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6082 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6083 				}
6084 				goto done_with_control;
6085 			}
6086 			if (so->so_rcv.sb_cc > held_length) {
6087 				control->held_length = so->so_rcv.sb_cc;
6088 				held_length = 0;
6089 			}
6090 			goto wait_some_more;
6091 		} else if (control->data == NULL) {
6092 			/*
6093 			 * we must re-sync since data is probably being
6094 			 * added
6095 			 */
6096 			SCTP_INP_READ_LOCK(inp);
6097 			if ((control->length > 0) && (control->data == NULL)) {
6098 				/*
6099 				 * big trouble.. we have the lock and its
6100 				 * corrupt?
6101 				 */
6102 #ifdef INVARIANTS
6103 				panic("Impossible data==NULL length !=0");
6104 #endif
6105 				out_flags |= MSG_EOR;
6106 				out_flags |= MSG_TRUNC;
6107 				control->length = 0;
6108 				SCTP_INP_READ_UNLOCK(inp);
6109 				goto done_with_control;
6110 			}
6111 			SCTP_INP_READ_UNLOCK(inp);
6112 			/* We will fall around to get more data */
6113 		}
6114 		goto get_more_data;
6115 	} else {
6116 		/*-
6117 		 * Give caller back the mbuf chain,
6118 		 * store in uio_resid the length
6119 		 */
6120 		wakeup_read_socket = 0;
6121 		if ((control->end_added == 0) ||
6122 		    (TAILQ_NEXT(control, next) == NULL)) {
6123 			/* Need to get rlock */
6124 			if (hold_rlock == 0) {
6125 				SCTP_INP_READ_LOCK(inp);
6126 				hold_rlock = 1;
6127 			}
6128 		}
6129 		if (control->end_added) {
6130 			out_flags |= MSG_EOR;
6131 			if ((control->do_not_ref_stcb == 0) &&
6132 			    (control->stcb != NULL) &&
6133 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6134 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6135 		}
6136 		if (control->spec_flags & M_NOTIFICATION) {
6137 			out_flags |= MSG_NOTIFICATION;
6138 		}
6139 		uio->uio_resid = control->length;
6140 		*mp = control->data;
6141 		m = control->data;
6142 		while (m) {
6143 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6144 				sctp_sblog(&so->so_rcv,
6145 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6146 			}
6147 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6148 			freed_so_far += SCTP_BUF_LEN(m);
6149 			freed_so_far += MSIZE;
6150 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6151 				sctp_sblog(&so->so_rcv,
6152 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6153 			}
6154 			m = SCTP_BUF_NEXT(m);
6155 		}
6156 		control->data = control->tail_mbuf = NULL;
6157 		control->length = 0;
6158 		if (out_flags & MSG_EOR) {
6159 			/* Done with this control */
6160 			goto done_with_control;
6161 		}
6162 	}
6163 release:
6164 	if (hold_rlock == 1) {
6165 		SCTP_INP_READ_UNLOCK(inp);
6166 		hold_rlock = 0;
6167 	}
6168 	if (hold_sblock == 1) {
6169 		SOCKBUF_UNLOCK(&so->so_rcv);
6170 		hold_sblock = 0;
6171 	}
6172 	sbunlock(&so->so_rcv);
6173 	sockbuf_lock = 0;
6174 
6175 release_unlocked:
6176 	if (hold_sblock) {
6177 		SOCKBUF_UNLOCK(&so->so_rcv);
6178 		hold_sblock = 0;
6179 	}
6180 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6181 		if ((freed_so_far >= rwnd_req) &&
6182 		    (control && (control->do_not_ref_stcb == 0)) &&
6183 		    (no_rcv_needed == 0))
6184 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6185 	}
6186 out:
6187 	if (msg_flags) {
6188 		*msg_flags = out_flags;
6189 	}
6190 	if (((out_flags & MSG_EOR) == 0) &&
6191 	    ((in_flags & MSG_PEEK) == 0) &&
6192 	    (sinfo) &&
6193 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6194 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6195 		struct sctp_extrcvinfo *s_extra;
6196 
6197 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6198 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6199 	}
6200 	if (hold_rlock == 1) {
6201 		SCTP_INP_READ_UNLOCK(inp);
6202 	}
6203 	if (hold_sblock) {
6204 		SOCKBUF_UNLOCK(&so->so_rcv);
6205 	}
6206 	if (sockbuf_lock) {
6207 		sbunlock(&so->so_rcv);
6208 	}
6209 	if (freecnt_applied) {
6210 		/*
6211 		 * The lock on the socket buffer protects us so the free
6212 		 * code will stop. But since we used the socketbuf lock and
6213 		 * the sender uses the tcb_lock to increment, we need to use
6214 		 * the atomic add to the refcnt.
6215 		 */
6216 		if (stcb == NULL) {
6217 #ifdef INVARIANTS
6218 			panic("stcb for refcnt has gone NULL?");
6219 			goto stage_left;
6220 #else
6221 			goto stage_left;
6222 #endif
6223 		}
6224 		atomic_add_int(&stcb->asoc.refcnt, -1);
6225 		/* Save the value back for next time */
6226 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6227 	}
6228 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6229 		if (stcb) {
6230 			sctp_misc_ints(SCTP_SORECV_DONE,
6231 			    freed_so_far,
6232 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6233 			    stcb->asoc.my_rwnd,
6234 			    so->so_rcv.sb_cc);
6235 		} else {
6236 			sctp_misc_ints(SCTP_SORECV_DONE,
6237 			    freed_so_far,
6238 			    (uint32_t) ((uio) ? (slen - uio->uio_resid) : slen),
6239 			    0,
6240 			    so->so_rcv.sb_cc);
6241 		}
6242 	}
6243 stage_left:
6244 	if (wakeup_read_socket) {
6245 		sctp_sorwakeup(inp, so);
6246 	}
6247 	return (error);
6248 }
6249 
6250 
6251 #ifdef SCTP_MBUF_LOGGING
6252 struct mbuf *
6253 sctp_m_free(struct mbuf *m)
6254 {
6255 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6256 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6257 	}
6258 	return (m_free(m));
6259 }
6260 
6261 void
6262 sctp_m_freem(struct mbuf *mb)
6263 {
6264 	while (mb != NULL)
6265 		mb = sctp_m_free(mb);
6266 }
6267 
6268 #endif
6269 
6270 int
6271 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6272 {
6273 	/*
6274 	 * Given a local address. For all associations that holds the
6275 	 * address, request a peer-set-primary.
6276 	 */
6277 	struct sctp_ifa *ifa;
6278 	struct sctp_laddr *wi;
6279 
6280 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6281 	if (ifa == NULL) {
6282 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6283 		return (EADDRNOTAVAIL);
6284 	}
6285 	/*
6286 	 * Now that we have the ifa we must awaken the iterator with this
6287 	 * message.
6288 	 */
6289 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6290 	if (wi == NULL) {
6291 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6292 		return (ENOMEM);
6293 	}
6294 	/* Now incr the count and int wi structure */
6295 	SCTP_INCR_LADDR_COUNT();
6296 	bzero(wi, sizeof(*wi));
6297 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6298 	wi->ifa = ifa;
6299 	wi->action = SCTP_SET_PRIM_ADDR;
6300 	atomic_add_int(&ifa->refcount, 1);
6301 
6302 	/* Now add it to the work queue */
6303 	SCTP_WQ_ADDR_LOCK();
6304 	/*
6305 	 * Should this really be a tailq? As it is we will process the
6306 	 * newest first :-0
6307 	 */
6308 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6309 	SCTP_WQ_ADDR_UNLOCK();
6310 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6311 	    (struct sctp_inpcb *)NULL,
6312 	    (struct sctp_tcb *)NULL,
6313 	    (struct sctp_nets *)NULL);
6314 	return (0);
6315 }
6316 
6317 
6318 int
6319 sctp_soreceive(struct socket *so,
6320     struct sockaddr **psa,
6321     struct uio *uio,
6322     struct mbuf **mp0,
6323     struct mbuf **controlp,
6324     int *flagsp)
6325 {
6326 	int error, fromlen;
6327 	uint8_t sockbuf[256];
6328 	struct sockaddr *from;
6329 	struct sctp_extrcvinfo sinfo;
6330 	int filling_sinfo = 1;
6331 	struct sctp_inpcb *inp;
6332 
6333 	inp = (struct sctp_inpcb *)so->so_pcb;
6334 	/* pickup the assoc we are reading from */
6335 	if (inp == NULL) {
6336 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6337 		return (EINVAL);
6338 	}
6339 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6340 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6341 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6342 	    (controlp == NULL)) {
6343 		/* user does not want the sndrcv ctl */
6344 		filling_sinfo = 0;
6345 	}
6346 	if (psa) {
6347 		from = (struct sockaddr *)sockbuf;
6348 		fromlen = sizeof(sockbuf);
6349 		from->sa_len = 0;
6350 	} else {
6351 		from = NULL;
6352 		fromlen = 0;
6353 	}
6354 
6355 	if (filling_sinfo) {
6356 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6357 	}
6358 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6359 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6360 	if (controlp != NULL) {
6361 		/* copy back the sinfo in a CMSG format */
6362 		if (filling_sinfo)
6363 			*controlp = sctp_build_ctl_nchunk(inp,
6364 			    (struct sctp_sndrcvinfo *)&sinfo);
6365 		else
6366 			*controlp = NULL;
6367 	}
6368 	if (psa) {
6369 		/* copy back the address info */
6370 		if (from && from->sa_len) {
6371 			*psa = sodupsockaddr(from, M_NOWAIT);
6372 		} else {
6373 			*psa = NULL;
6374 		}
6375 	}
6376 	return (error);
6377 }
6378 
6379 
6380 
6381 
6382 
6383 int
6384 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6385     int totaddr, int *error)
6386 {
6387 	int added = 0;
6388 	int i;
6389 	struct sctp_inpcb *inp;
6390 	struct sockaddr *sa;
6391 	size_t incr = 0;
6392 
6393 #ifdef INET
6394 	struct sockaddr_in *sin;
6395 
6396 #endif
6397 #ifdef INET6
6398 	struct sockaddr_in6 *sin6;
6399 
6400 #endif
6401 
6402 	sa = addr;
6403 	inp = stcb->sctp_ep;
6404 	*error = 0;
6405 	for (i = 0; i < totaddr; i++) {
6406 		switch (sa->sa_family) {
6407 #ifdef INET
6408 		case AF_INET:
6409 			incr = sizeof(struct sockaddr_in);
6410 			sin = (struct sockaddr_in *)sa;
6411 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6412 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6413 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6414 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6415 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6416 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6417 				*error = EINVAL;
6418 				goto out_now;
6419 			}
6420 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6421 				/* assoc gone no un-lock */
6422 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6423 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6424 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6425 				*error = ENOBUFS;
6426 				goto out_now;
6427 			}
6428 			added++;
6429 			break;
6430 #endif
6431 #ifdef INET6
6432 		case AF_INET6:
6433 			incr = sizeof(struct sockaddr_in6);
6434 			sin6 = (struct sockaddr_in6 *)sa;
6435 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6436 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6437 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6438 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6439 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6440 				*error = EINVAL;
6441 				goto out_now;
6442 			}
6443 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6444 				/* assoc gone no un-lock */
6445 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6446 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6447 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6448 				*error = ENOBUFS;
6449 				goto out_now;
6450 			}
6451 			added++;
6452 			break;
6453 #endif
6454 		default:
6455 			break;
6456 		}
6457 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6458 	}
6459 out_now:
6460 	return (added);
6461 }
6462 
6463 struct sctp_tcb *
6464 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6465     unsigned int *totaddr,
6466     unsigned int *num_v4, unsigned int *num_v6, int *error,
6467     unsigned int limit, int *bad_addr)
6468 {
6469 	struct sockaddr *sa;
6470 	struct sctp_tcb *stcb = NULL;
6471 	unsigned int incr, at, i;
6472 
6473 	at = incr = 0;
6474 	sa = addr;
6475 	*error = *num_v6 = *num_v4 = 0;
6476 	/* account and validate addresses */
6477 	for (i = 0; i < *totaddr; i++) {
6478 		switch (sa->sa_family) {
6479 #ifdef INET
6480 		case AF_INET:
6481 			if (sa->sa_len != incr) {
6482 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6483 				*error = EINVAL;
6484 				*bad_addr = 1;
6485 				return (NULL);
6486 			}
6487 			(*num_v4) += 1;
6488 			incr = (unsigned int)sizeof(struct sockaddr_in);
6489 			break;
6490 #endif
6491 #ifdef INET6
6492 		case AF_INET6:
6493 			{
6494 				struct sockaddr_in6 *sin6;
6495 
6496 				sin6 = (struct sockaddr_in6 *)sa;
6497 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6498 					/* Must be non-mapped for connectx */
6499 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6500 					*error = EINVAL;
6501 					*bad_addr = 1;
6502 					return (NULL);
6503 				}
6504 				if (sa->sa_len != incr) {
6505 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6506 					*error = EINVAL;
6507 					*bad_addr = 1;
6508 					return (NULL);
6509 				}
6510 				(*num_v6) += 1;
6511 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6512 				break;
6513 			}
6514 #endif
6515 		default:
6516 			*totaddr = i;
6517 			/* we are done */
6518 			break;
6519 		}
6520 		if (i == *totaddr) {
6521 			break;
6522 		}
6523 		SCTP_INP_INCR_REF(inp);
6524 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6525 		if (stcb != NULL) {
6526 			/* Already have or am bring up an association */
6527 			return (stcb);
6528 		} else {
6529 			SCTP_INP_DECR_REF(inp);
6530 		}
6531 		if ((at + incr) > limit) {
6532 			*totaddr = i;
6533 			break;
6534 		}
6535 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6536 	}
6537 	return ((struct sctp_tcb *)NULL);
6538 }
6539 
6540 /*
6541  * sctp_bindx(ADD) for one address.
6542  * assumes all arguments are valid/checked by caller.
6543  */
6544 void
6545 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6546     struct sockaddr *sa, sctp_assoc_t assoc_id,
6547     uint32_t vrf_id, int *error, void *p)
6548 {
6549 	struct sockaddr *addr_touse;
6550 
6551 #if defined(INET) && defined(INET6)
6552 	struct sockaddr_in sin;
6553 
6554 #endif
6555 
6556 	/* see if we're bound all already! */
6557 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6558 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6559 		*error = EINVAL;
6560 		return;
6561 	}
6562 	addr_touse = sa;
6563 #ifdef INET6
6564 	if (sa->sa_family == AF_INET6) {
6565 #ifdef INET
6566 		struct sockaddr_in6 *sin6;
6567 
6568 #endif
6569 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6570 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6571 			*error = EINVAL;
6572 			return;
6573 		}
6574 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6575 			/* can only bind v6 on PF_INET6 sockets */
6576 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 			*error = EINVAL;
6578 			return;
6579 		}
6580 #ifdef INET
6581 		sin6 = (struct sockaddr_in6 *)addr_touse;
6582 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6583 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6584 			    SCTP_IPV6_V6ONLY(inp)) {
6585 				/* can't bind v4-mapped on PF_INET sockets */
6586 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6587 				*error = EINVAL;
6588 				return;
6589 			}
6590 			in6_sin6_2_sin(&sin, sin6);
6591 			addr_touse = (struct sockaddr *)&sin;
6592 		}
6593 #endif
6594 	}
6595 #endif
6596 #ifdef INET
6597 	if (sa->sa_family == AF_INET) {
6598 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6599 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6600 			*error = EINVAL;
6601 			return;
6602 		}
6603 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6604 		    SCTP_IPV6_V6ONLY(inp)) {
6605 			/* can't bind v4 on PF_INET sockets */
6606 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6607 			*error = EINVAL;
6608 			return;
6609 		}
6610 	}
6611 #endif
6612 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6613 		if (p == NULL) {
6614 			/* Can't get proc for Net/Open BSD */
6615 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6616 			*error = EINVAL;
6617 			return;
6618 		}
6619 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6620 		return;
6621 	}
6622 	/*
6623 	 * No locks required here since bind and mgmt_ep_sa all do their own
6624 	 * locking. If we do something for the FIX: below we may need to
6625 	 * lock in that case.
6626 	 */
6627 	if (assoc_id == 0) {
6628 		/* add the address */
6629 		struct sctp_inpcb *lep;
6630 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6631 
6632 		/* validate the incoming port */
6633 		if ((lsin->sin_port != 0) &&
6634 		    (lsin->sin_port != inp->sctp_lport)) {
6635 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6636 			*error = EINVAL;
6637 			return;
6638 		} else {
6639 			/* user specified 0 port, set it to existing port */
6640 			lsin->sin_port = inp->sctp_lport;
6641 		}
6642 
6643 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6644 		if (lep != NULL) {
6645 			/*
6646 			 * We must decrement the refcount since we have the
6647 			 * ep already and are binding. No remove going on
6648 			 * here.
6649 			 */
6650 			SCTP_INP_DECR_REF(lep);
6651 		}
6652 		if (lep == inp) {
6653 			/* already bound to it.. ok */
6654 			return;
6655 		} else if (lep == NULL) {
6656 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6657 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6658 			    SCTP_ADD_IP_ADDRESS,
6659 			    vrf_id, NULL);
6660 		} else {
6661 			*error = EADDRINUSE;
6662 		}
6663 		if (*error)
6664 			return;
6665 	} else {
6666 		/*
6667 		 * FIX: decide whether we allow assoc based bindx
6668 		 */
6669 	}
6670 }
6671 
6672 /*
6673  * sctp_bindx(DELETE) for one address.
6674  * assumes all arguments are valid/checked by caller.
6675  */
6676 void
6677 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6678     struct sockaddr *sa, sctp_assoc_t assoc_id,
6679     uint32_t vrf_id, int *error)
6680 {
6681 	struct sockaddr *addr_touse;
6682 
6683 #if defined(INET) && defined(INET6)
6684 	struct sockaddr_in sin;
6685 
6686 #endif
6687 
6688 	/* see if we're bound all already! */
6689 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6690 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6691 		*error = EINVAL;
6692 		return;
6693 	}
6694 	addr_touse = sa;
6695 #ifdef INET6
6696 	if (sa->sa_family == AF_INET6) {
6697 #ifdef INET
6698 		struct sockaddr_in6 *sin6;
6699 
6700 #endif
6701 
6702 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6703 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6704 			*error = EINVAL;
6705 			return;
6706 		}
6707 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6708 			/* can only bind v6 on PF_INET6 sockets */
6709 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6710 			*error = EINVAL;
6711 			return;
6712 		}
6713 #ifdef INET
6714 		sin6 = (struct sockaddr_in6 *)addr_touse;
6715 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6716 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6717 			    SCTP_IPV6_V6ONLY(inp)) {
6718 				/* can't bind mapped-v4 on PF_INET sockets */
6719 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6720 				*error = EINVAL;
6721 				return;
6722 			}
6723 			in6_sin6_2_sin(&sin, sin6);
6724 			addr_touse = (struct sockaddr *)&sin;
6725 		}
6726 #endif
6727 	}
6728 #endif
6729 #ifdef INET
6730 	if (sa->sa_family == AF_INET) {
6731 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6732 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6733 			*error = EINVAL;
6734 			return;
6735 		}
6736 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6737 		    SCTP_IPV6_V6ONLY(inp)) {
6738 			/* can't bind v4 on PF_INET sockets */
6739 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6740 			*error = EINVAL;
6741 			return;
6742 		}
6743 	}
6744 #endif
6745 	/*
6746 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6747 	 * below is ever changed we may need to lock before calling
6748 	 * association level binding.
6749 	 */
6750 	if (assoc_id == 0) {
6751 		/* delete the address */
6752 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6753 		    SCTP_DEL_IP_ADDRESS,
6754 		    vrf_id, NULL);
6755 	} else {
6756 		/*
6757 		 * FIX: decide whether we allow assoc based bindx
6758 		 */
6759 	}
6760 }
6761 
6762 /*
6763  * returns the valid local address count for an assoc, taking into account
6764  * all scoping rules
6765  */
6766 int
6767 sctp_local_addr_count(struct sctp_tcb *stcb)
6768 {
6769 	int loopback_scope;
6770 
6771 #if defined(INET)
6772 	int ipv4_local_scope, ipv4_addr_legal;
6773 
6774 #endif
6775 #if defined (INET6)
6776 	int local_scope, site_scope, ipv6_addr_legal;
6777 
6778 #endif
6779 	struct sctp_vrf *vrf;
6780 	struct sctp_ifn *sctp_ifn;
6781 	struct sctp_ifa *sctp_ifa;
6782 	int count = 0;
6783 
6784 	/* Turn on all the appropriate scopes */
6785 	loopback_scope = stcb->asoc.scope.loopback_scope;
6786 #if defined(INET)
6787 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6788 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6789 #endif
6790 #if defined(INET6)
6791 	local_scope = stcb->asoc.scope.local_scope;
6792 	site_scope = stcb->asoc.scope.site_scope;
6793 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6794 #endif
6795 	SCTP_IPI_ADDR_RLOCK();
6796 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6797 	if (vrf == NULL) {
6798 		/* no vrf, no addresses */
6799 		SCTP_IPI_ADDR_RUNLOCK();
6800 		return (0);
6801 	}
6802 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6803 		/*
6804 		 * bound all case: go through all ifns on the vrf
6805 		 */
6806 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6807 			if ((loopback_scope == 0) &&
6808 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6809 				continue;
6810 			}
6811 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6812 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6813 					continue;
6814 				switch (sctp_ifa->address.sa.sa_family) {
6815 #ifdef INET
6816 				case AF_INET:
6817 					if (ipv4_addr_legal) {
6818 						struct sockaddr_in *sin;
6819 
6820 						sin = &sctp_ifa->address.sin;
6821 						if (sin->sin_addr.s_addr == 0) {
6822 							/*
6823 							 * skip unspecified
6824 							 * addrs
6825 							 */
6826 							continue;
6827 						}
6828 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6829 						    &sin->sin_addr) != 0) {
6830 							continue;
6831 						}
6832 						if ((ipv4_local_scope == 0) &&
6833 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6834 							continue;
6835 						}
6836 						/* count this one */
6837 						count++;
6838 					} else {
6839 						continue;
6840 					}
6841 					break;
6842 #endif
6843 #ifdef INET6
6844 				case AF_INET6:
6845 					if (ipv6_addr_legal) {
6846 						struct sockaddr_in6 *sin6;
6847 
6848 						sin6 = &sctp_ifa->address.sin6;
6849 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6850 							continue;
6851 						}
6852 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6853 						    &sin6->sin6_addr) != 0) {
6854 							continue;
6855 						}
6856 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6857 							if (local_scope == 0)
6858 								continue;
6859 							if (sin6->sin6_scope_id == 0) {
6860 								if (sa6_recoverscope(sin6) != 0)
6861 									/*
6862 									 *
6863 									 * bad
6864 									 *
6865 									 * li
6866 									 * nk
6867 									 *
6868 									 * loc
6869 									 * al
6870 									 *
6871 									 * add
6872 									 * re
6873 									 * ss
6874 									 * */
6875 									continue;
6876 							}
6877 						}
6878 						if ((site_scope == 0) &&
6879 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6880 							continue;
6881 						}
6882 						/* count this one */
6883 						count++;
6884 					}
6885 					break;
6886 #endif
6887 				default:
6888 					/* TSNH */
6889 					break;
6890 				}
6891 			}
6892 		}
6893 	} else {
6894 		/*
6895 		 * subset bound case
6896 		 */
6897 		struct sctp_laddr *laddr;
6898 
6899 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6900 		    sctp_nxt_addr) {
6901 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6902 				continue;
6903 			}
6904 			/* count this one */
6905 			count++;
6906 		}
6907 	}
6908 	SCTP_IPI_ADDR_RUNLOCK();
6909 	return (count);
6910 }
6911 
6912 #if defined(SCTP_LOCAL_TRACE_BUF)
6913 
6914 void
6915 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6916 {
6917 	uint32_t saveindex, newindex;
6918 
6919 	do {
6920 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6921 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6922 			newindex = 1;
6923 		} else {
6924 			newindex = saveindex + 1;
6925 		}
6926 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6927 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6928 		saveindex = 0;
6929 	}
6930 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6931 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6932 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6933 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6934 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6935 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6936 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6937 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6938 }
6939 
6940 #endif
6941 static void
6942 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6943     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6944 {
6945 	struct ip *iph;
6946 
6947 #ifdef INET6
6948 	struct ip6_hdr *ip6;
6949 
6950 #endif
6951 	struct mbuf *sp, *last;
6952 	struct udphdr *uhdr;
6953 	uint16_t port;
6954 
6955 	if ((m->m_flags & M_PKTHDR) == 0) {
6956 		/* Can't handle one that is not a pkt hdr */
6957 		goto out;
6958 	}
6959 	/* Pull the src port */
6960 	iph = mtod(m, struct ip *);
6961 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6962 	port = uhdr->uh_sport;
6963 	/*
6964 	 * Split out the mbuf chain. Leave the IP header in m, place the
6965 	 * rest in the sp.
6966 	 */
6967 	sp = m_split(m, off, M_NOWAIT);
6968 	if (sp == NULL) {
6969 		/* Gak, drop packet, we can't do a split */
6970 		goto out;
6971 	}
6972 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6973 		/* Gak, packet can't have an SCTP header in it - too small */
6974 		m_freem(sp);
6975 		goto out;
6976 	}
6977 	/* Now pull up the UDP header and SCTP header together */
6978 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6979 	if (sp == NULL) {
6980 		/* Gak pullup failed */
6981 		goto out;
6982 	}
6983 	/* Trim out the UDP header */
6984 	m_adj(sp, sizeof(struct udphdr));
6985 
6986 	/* Now reconstruct the mbuf chain */
6987 	for (last = m; last->m_next; last = last->m_next);
6988 	last->m_next = sp;
6989 	m->m_pkthdr.len += sp->m_pkthdr.len;
6990 	/*
6991 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6992 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6993 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6994 	 * SCTP checksum. Therefore, clear the bit.
6995 	 */
6996 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6997 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6998 	    m->m_pkthdr.len,
6999 	    if_name(m->m_pkthdr.rcvif),
7000 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7001 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7002 	iph = mtod(m, struct ip *);
7003 	switch (iph->ip_v) {
7004 #ifdef INET
7005 	case IPVERSION:
7006 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7007 		sctp_input_with_port(m, off, port);
7008 		break;
7009 #endif
7010 #ifdef INET6
7011 	case IPV6_VERSION >> 4:
7012 		ip6 = mtod(m, struct ip6_hdr *);
7013 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7014 		sctp6_input_with_port(&m, &off, port);
7015 		break;
7016 #endif
7017 	default:
7018 		goto out;
7019 		break;
7020 	}
7021 	return;
7022 out:
7023 	m_freem(m);
7024 }
7025 
7026 void
7027 sctp_over_udp_stop(void)
7028 {
7029 	/*
7030 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7031 	 * for writting!
7032 	 */
7033 #ifdef INET
7034 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7035 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7036 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7037 	}
7038 #endif
7039 #ifdef INET6
7040 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7041 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7042 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7043 	}
7044 #endif
7045 }
7046 
7047 int
7048 sctp_over_udp_start(void)
7049 {
7050 	uint16_t port;
7051 	int ret;
7052 
7053 #ifdef INET
7054 	struct sockaddr_in sin;
7055 
7056 #endif
7057 #ifdef INET6
7058 	struct sockaddr_in6 sin6;
7059 
7060 #endif
7061 	/*
7062 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7063 	 * for writting!
7064 	 */
7065 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7066 	if (ntohs(port) == 0) {
7067 		/* Must have a port set */
7068 		return (EINVAL);
7069 	}
7070 #ifdef INET
7071 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7072 		/* Already running -- must stop first */
7073 		return (EALREADY);
7074 	}
7075 #endif
7076 #ifdef INET6
7077 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7078 		/* Already running -- must stop first */
7079 		return (EALREADY);
7080 	}
7081 #endif
7082 #ifdef INET
7083 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7084 	    SOCK_DGRAM, IPPROTO_UDP,
7085 	    curthread->td_ucred, curthread))) {
7086 		sctp_over_udp_stop();
7087 		return (ret);
7088 	}
7089 	/* Call the special UDP hook. */
7090 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7091 	    sctp_recv_udp_tunneled_packet, NULL))) {
7092 		sctp_over_udp_stop();
7093 		return (ret);
7094 	}
7095 	/* Ok, we have a socket, bind it to the port. */
7096 	memset(&sin, 0, sizeof(struct sockaddr_in));
7097 	sin.sin_len = sizeof(struct sockaddr_in);
7098 	sin.sin_family = AF_INET;
7099 	sin.sin_port = htons(port);
7100 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7101 	    (struct sockaddr *)&sin, curthread))) {
7102 		sctp_over_udp_stop();
7103 		return (ret);
7104 	}
7105 #endif
7106 #ifdef INET6
7107 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7108 	    SOCK_DGRAM, IPPROTO_UDP,
7109 	    curthread->td_ucred, curthread))) {
7110 		sctp_over_udp_stop();
7111 		return (ret);
7112 	}
7113 	/* Call the special UDP hook. */
7114 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7115 	    sctp_recv_udp_tunneled_packet, NULL))) {
7116 		sctp_over_udp_stop();
7117 		return (ret);
7118 	}
7119 	/* Ok, we have a socket, bind it to the port. */
7120 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7121 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7122 	sin6.sin6_family = AF_INET6;
7123 	sin6.sin6_port = htons(port);
7124 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7125 	    (struct sockaddr *)&sin6, curthread))) {
7126 		sctp_over_udp_stop();
7127 		return (ret);
7128 	}
7129 #endif
7130 	return (0);
7131 }
7132