xref: /freebsd/sys/netinet/sctputil.c (revision a02aba5f3c73d7ed377f88327fedd11f70f23353)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 
125 }
126 
127 void
128 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
129 {
130 	struct sctp_cwnd_log sctp_clog;
131 
132 	sctp_clog.x.strlog.stcb = stcb;
133 	sctp_clog.x.strlog.n_tsn = tsn;
134 	sctp_clog.x.strlog.n_sseq = sseq;
135 	sctp_clog.x.strlog.e_tsn = 0;
136 	sctp_clog.x.strlog.e_sseq = 0;
137 	sctp_clog.x.strlog.strm = stream;
138 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
139 	    SCTP_LOG_EVENT_STRM,
140 	    from,
141 	    sctp_clog.x.misc.log1,
142 	    sctp_clog.x.misc.log2,
143 	    sctp_clog.x.misc.log3,
144 	    sctp_clog.x.misc.log4);
145 
146 }
147 
148 void
149 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
150 {
151 	struct sctp_cwnd_log sctp_clog;
152 
153 	sctp_clog.x.nagle.stcb = (void *)stcb;
154 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
155 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
156 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
157 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
158 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
159 	    SCTP_LOG_EVENT_NAGLE,
160 	    action,
161 	    sctp_clog.x.misc.log1,
162 	    sctp_clog.x.misc.log2,
163 	    sctp_clog.x.misc.log3,
164 	    sctp_clog.x.misc.log4);
165 }
166 
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
207     int from)
208 {
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
213 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
214 	sctp_clog.x.fr.tsn = tsn;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_FR,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 
223 }
224 
225 
226 void
227 sctp_log_mb(struct mbuf *m, int from)
228 {
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	sctp_clog.x.mb.mp = m;
232 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
233 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
234 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
235 	if (SCTP_BUF_IS_EXTENDED(m)) {
236 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
237 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
238 	} else {
239 		sctp_clog.x.mb.ext = 0;
240 		sctp_clog.x.mb.refcnt = 0;
241 	}
242 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
243 	    SCTP_LOG_EVENT_MBUF,
244 	    from,
245 	    sctp_clog.x.misc.log1,
246 	    sctp_clog.x.misc.log2,
247 	    sctp_clog.x.misc.log3,
248 	    sctp_clog.x.misc.log4);
249 }
250 
251 
252 void
253 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
254     int from)
255 {
256 	struct sctp_cwnd_log sctp_clog;
257 
258 	if (control == NULL) {
259 		SCTP_PRINTF("Gak log of NULL?\n");
260 		return;
261 	}
262 	sctp_clog.x.strlog.stcb = control->stcb;
263 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
264 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
265 	sctp_clog.x.strlog.strm = control->sinfo_stream;
266 	if (poschk != NULL) {
267 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
268 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
269 	} else {
270 		sctp_clog.x.strlog.e_tsn = 0;
271 		sctp_clog.x.strlog.e_sseq = 0;
272 	}
273 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
274 	    SCTP_LOG_EVENT_STRM,
275 	    from,
276 	    sctp_clog.x.misc.log1,
277 	    sctp_clog.x.misc.log2,
278 	    sctp_clog.x.misc.log3,
279 	    sctp_clog.x.misc.log4);
280 
281 }
282 
283 void
284 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
285 {
286 	struct sctp_cwnd_log sctp_clog;
287 
288 	sctp_clog.x.cwnd.net = net;
289 	if (stcb->asoc.send_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_send = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
293 	if (stcb->asoc.stream_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_str = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
297 
298 	if (net) {
299 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
300 		sctp_clog.x.cwnd.inflight = net->flight_size;
301 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
303 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
304 	}
305 	if (SCTP_CWNDLOG_PRESEND == from) {
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
307 	}
308 	sctp_clog.x.cwnd.cwnd_augment = augment;
309 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
310 	    SCTP_LOG_EVENT_CWND,
311 	    from,
312 	    sctp_clog.x.misc.log1,
313 	    sctp_clog.x.misc.log2,
314 	    sctp_clog.x.misc.log3,
315 	    sctp_clog.x.misc.log4);
316 
317 }
318 
319 void
320 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
321 {
322 	struct sctp_cwnd_log sctp_clog;
323 
324 	memset(&sctp_clog, 0, sizeof(sctp_clog));
325 	if (inp) {
326 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
327 
328 	} else {
329 		sctp_clog.x.lock.sock = (void *)NULL;
330 	}
331 	sctp_clog.x.lock.inp = (void *)inp;
332 	if (stcb) {
333 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
334 	} else {
335 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
336 	}
337 	if (inp) {
338 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
339 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
340 	} else {
341 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
345 	if (inp && (inp->sctp_socket)) {
346 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
347 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
348 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
349 	} else {
350 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
351 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
355 	    SCTP_LOG_LOCK_EVENT,
356 	    from,
357 	    sctp_clog.x.misc.log1,
358 	    sctp_clog.x.misc.log2,
359 	    sctp_clog.x.misc.log3,
360 	    sctp_clog.x.misc.log4);
361 
362 }
363 
364 void
365 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
366 {
367 	struct sctp_cwnd_log sctp_clog;
368 
369 	memset(&sctp_clog, 0, sizeof(sctp_clog));
370 	sctp_clog.x.cwnd.net = net;
371 	sctp_clog.x.cwnd.cwnd_new_value = error;
372 	sctp_clog.x.cwnd.inflight = net->flight_size;
373 	sctp_clog.x.cwnd.cwnd_augment = burst;
374 	if (stcb->asoc.send_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_send = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
378 	if (stcb->asoc.stream_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_str = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
382 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
383 	    SCTP_LOG_EVENT_MAXBURST,
384 	    from,
385 	    sctp_clog.x.misc.log1,
386 	    sctp_clog.x.misc.log2,
387 	    sctp_clog.x.misc.log3,
388 	    sctp_clog.x.misc.log4);
389 
390 }
391 
392 void
393 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
394 {
395 	struct sctp_cwnd_log sctp_clog;
396 
397 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
398 	sctp_clog.x.rwnd.send_size = snd_size;
399 	sctp_clog.x.rwnd.overhead = overhead;
400 	sctp_clog.x.rwnd.new_rwnd = 0;
401 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
402 	    SCTP_LOG_EVENT_RWND,
403 	    from,
404 	    sctp_clog.x.misc.log1,
405 	    sctp_clog.x.misc.log2,
406 	    sctp_clog.x.misc.log3,
407 	    sctp_clog.x.misc.log4);
408 }
409 
410 void
411 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
412 {
413 	struct sctp_cwnd_log sctp_clog;
414 
415 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
416 	sctp_clog.x.rwnd.send_size = flight_size;
417 	sctp_clog.x.rwnd.overhead = overhead;
418 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
419 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
420 	    SCTP_LOG_EVENT_RWND,
421 	    from,
422 	    sctp_clog.x.misc.log1,
423 	    sctp_clog.x.misc.log2,
424 	    sctp_clog.x.misc.log3,
425 	    sctp_clog.x.misc.log4);
426 }
427 
428 void
429 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
430 {
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
434 	sctp_clog.x.mbcnt.size_change = book;
435 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
436 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_MBCNT,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 
445 }
446 
447 void
448 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
449 {
450 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
451 	    SCTP_LOG_MISC_EVENT,
452 	    from,
453 	    a, b, c, d);
454 }
455 
456 void
457 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
458 {
459 	struct sctp_cwnd_log sctp_clog;
460 
461 	sctp_clog.x.wake.stcb = (void *)stcb;
462 	sctp_clog.x.wake.wake_cnt = wake_cnt;
463 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
464 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
465 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
466 
467 	if (stcb->asoc.stream_queue_cnt < 0xff)
468 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
469 	else
470 		sctp_clog.x.wake.stream_qcnt = 0xff;
471 
472 	if (stcb->asoc.chunks_on_out_queue < 0xff)
473 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
474 	else
475 		sctp_clog.x.wake.chunks_on_oque = 0xff;
476 
477 	sctp_clog.x.wake.sctpflags = 0;
478 	/* set in the defered mode stuff */
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
480 		sctp_clog.x.wake.sctpflags |= 1;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
482 		sctp_clog.x.wake.sctpflags |= 2;
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
484 		sctp_clog.x.wake.sctpflags |= 4;
485 	/* what about the sb */
486 	if (stcb->sctp_socket) {
487 		struct socket *so = stcb->sctp_socket;
488 
489 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
490 	} else {
491 		sctp_clog.x.wake.sbflags = 0xff;
492 	}
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_EVENT_WAKE,
495 	    from,
496 	    sctp_clog.x.misc.log1,
497 	    sctp_clog.x.misc.log2,
498 	    sctp_clog.x.misc.log3,
499 	    sctp_clog.x.misc.log4);
500 
501 }
502 
503 void
504 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
505 {
506 	struct sctp_cwnd_log sctp_clog;
507 
508 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
509 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
510 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
511 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
512 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
513 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
514 	sctp_clog.x.blk.sndlen = sendlen;
515 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
516 	    SCTP_LOG_EVENT_BLOCK,
517 	    from,
518 	    sctp_clog.x.misc.log1,
519 	    sctp_clog.x.misc.log2,
520 	    sctp_clog.x.misc.log3,
521 	    sctp_clog.x.misc.log4);
522 
523 }
524 
525 int
526 sctp_fill_stat_log(void *optval, size_t *optsize)
527 {
528 	/* May need to fix this if ktrdump does not work */
529 	return (0);
530 }
531 
532 #ifdef SCTP_AUDITING_ENABLED
533 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
534 static int sctp_audit_indx = 0;
535 
536 static
537 void
538 sctp_print_audit_report(void)
539 {
540 	int i;
541 	int cnt;
542 
543 	cnt = 0;
544 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
545 		if ((sctp_audit_data[i][0] == 0xe0) &&
546 		    (sctp_audit_data[i][1] == 0x01)) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if (sctp_audit_data[i][0] == 0xf0) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			SCTP_PRINTF("\n");
555 			cnt = 0;
556 		}
557 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
558 		    (uint32_t) sctp_audit_data[i][1]);
559 		cnt++;
560 		if ((cnt % 14) == 0)
561 			SCTP_PRINTF("\n");
562 	}
563 	for (i = 0; i < sctp_audit_indx; i++) {
564 		if ((sctp_audit_data[i][0] == 0xe0) &&
565 		    (sctp_audit_data[i][1] == 0x01)) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if (sctp_audit_data[i][0] == 0xf0) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			SCTP_PRINTF("\n");
574 			cnt = 0;
575 		}
576 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
577 		    (uint32_t) sctp_audit_data[i][1]);
578 		cnt++;
579 		if ((cnt % 14) == 0)
580 			SCTP_PRINTF("\n");
581 	}
582 	SCTP_PRINTF("\n");
583 }
584 
585 void
586 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
587     struct sctp_nets *net)
588 {
589 	int resend_cnt, tot_out, rep, tot_book_cnt;
590 	struct sctp_nets *lnet;
591 	struct sctp_tmit_chunk *chk;
592 
593 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
594 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
595 	sctp_audit_indx++;
596 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
597 		sctp_audit_indx = 0;
598 	}
599 	if (inp == NULL) {
600 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
601 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
602 		sctp_audit_indx++;
603 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 			sctp_audit_indx = 0;
605 		}
606 		return;
607 	}
608 	if (stcb == NULL) {
609 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
610 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
611 		sctp_audit_indx++;
612 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
613 			sctp_audit_indx = 0;
614 		}
615 		return;
616 	}
617 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
618 	sctp_audit_data[sctp_audit_indx][1] =
619 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
620 	sctp_audit_indx++;
621 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
622 		sctp_audit_indx = 0;
623 	}
624 	rep = 0;
625 	tot_book_cnt = 0;
626 	resend_cnt = tot_out = 0;
627 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
628 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
629 			resend_cnt++;
630 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
631 			tot_out += chk->book_size;
632 			tot_book_cnt++;
633 		}
634 	}
635 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
636 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
637 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
638 		sctp_audit_indx++;
639 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 			sctp_audit_indx = 0;
641 		}
642 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
643 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
644 		rep = 1;
645 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
646 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
647 		sctp_audit_data[sctp_audit_indx][1] =
648 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
649 		sctp_audit_indx++;
650 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 			sctp_audit_indx = 0;
652 		}
653 	}
654 	if (tot_out != stcb->asoc.total_flight) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		rep = 1;
662 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
663 		    (int)stcb->asoc.total_flight);
664 		stcb->asoc.total_flight = tot_out;
665 	}
666 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
667 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
668 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
669 		sctp_audit_indx++;
670 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
671 			sctp_audit_indx = 0;
672 		}
673 		rep = 1;
674 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
675 
676 		stcb->asoc.total_flight_count = tot_book_cnt;
677 	}
678 	tot_out = 0;
679 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
680 		tot_out += lnet->flight_size;
681 	}
682 	if (tot_out != stcb->asoc.total_flight) {
683 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
684 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
685 		sctp_audit_indx++;
686 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
687 			sctp_audit_indx = 0;
688 		}
689 		rep = 1;
690 		SCTP_PRINTF("real flight:%d net total was %d\n",
691 		    stcb->asoc.total_flight, tot_out);
692 		/* now corrective action */
693 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
694 
695 			tot_out = 0;
696 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 				if ((chk->whoTo == lnet) &&
698 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
699 					tot_out += chk->book_size;
700 				}
701 			}
702 			if (lnet->flight_size != tot_out) {
703 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
704 				    lnet, lnet->flight_size,
705 				    tot_out);
706 				lnet->flight_size = tot_out;
707 			}
708 		}
709 	}
710 	if (rep) {
711 		sctp_print_audit_report();
712 	}
713 }
714 
715 void
716 sctp_audit_log(uint8_t ev, uint8_t fd)
717 {
718 
719 	sctp_audit_data[sctp_audit_indx][0] = ev;
720 	sctp_audit_data[sctp_audit_indx][1] = fd;
721 	sctp_audit_indx++;
722 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
723 		sctp_audit_indx = 0;
724 	}
725 }
726 
727 #endif
728 
729 /*
730  * sctp_stop_timers_for_shutdown() should be called
731  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
732  * state to make sure that all timers are stopped.
733  */
734 void
735 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
736 {
737 	struct sctp_association *asoc;
738 	struct sctp_nets *net;
739 
740 	asoc = &stcb->asoc;
741 
742 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
747 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
748 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
749 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
750 	}
751 }
752 
753 /*
754  * a list of sizes based on typical mtu's, used only if next hop size not
755  * returned.
756  */
757 static uint32_t sctp_mtu_sizes[] = {
758 	68,
759 	296,
760 	508,
761 	512,
762 	544,
763 	576,
764 	1006,
765 	1492,
766 	1500,
767 	1536,
768 	2002,
769 	2048,
770 	4352,
771 	4464,
772 	8166,
773 	17914,
774 	32000,
775 	65535
776 };
777 
778 /*
779  * Return the largest MTU smaller than val. If there is no
780  * entry, just return val.
781  */
782 uint32_t
783 sctp_get_prev_mtu(uint32_t val)
784 {
785 	uint32_t i;
786 
787 	if (val <= sctp_mtu_sizes[0]) {
788 		return (val);
789 	}
790 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
791 		if (val <= sctp_mtu_sizes[i]) {
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[i - 1]);
796 }
797 
798 /*
799  * Return the smallest MTU larger than val. If there is no
800  * entry, just return val.
801  */
802 uint32_t
803 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
804 {
805 	/* select another MTU that is just bigger than this one */
806 	uint32_t i;
807 
808 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
809 		if (val < sctp_mtu_sizes[i]) {
810 			return (sctp_mtu_sizes[i]);
811 		}
812 	}
813 	return (val);
814 }
815 
816 void
817 sctp_fill_random_store(struct sctp_pcb *m)
818 {
819 	/*
820 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
821 	 * our counter. The result becomes our good random numbers and we
822 	 * then setup to give these out. Note that we do no locking to
823 	 * protect this. This is ok, since if competing folks call this we
824 	 * will get more gobbled gook in the random store which is what we
825 	 * want. There is a danger that two guys will use the same random
826 	 * numbers, but thats ok too since that is random as well :->
827 	 */
828 	m->store_at = 0;
829 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
830 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
831 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
832 	m->random_counter++;
833 }
834 
835 uint32_t
836 sctp_select_initial_TSN(struct sctp_pcb *inp)
837 {
838 	/*
839 	 * A true implementation should use random selection process to get
840 	 * the initial stream sequence number, using RFC1750 as a good
841 	 * guideline
842 	 */
843 	uint32_t x, *xp;
844 	uint8_t *p;
845 	int store_at, new_store;
846 
847 	if (inp->initial_sequence_debug != 0) {
848 		uint32_t ret;
849 
850 		ret = inp->initial_sequence_debug;
851 		inp->initial_sequence_debug++;
852 		return (ret);
853 	}
854 retry:
855 	store_at = inp->store_at;
856 	new_store = store_at + sizeof(uint32_t);
857 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
858 		new_store = 0;
859 	}
860 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
861 		goto retry;
862 	}
863 	if (new_store == 0) {
864 		/* Refill the random store */
865 		sctp_fill_random_store(inp);
866 	}
867 	p = &inp->random_store[store_at];
868 	xp = (uint32_t *) p;
869 	x = *xp;
870 	return (x);
871 }
872 
873 uint32_t
874 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
875 {
876 	uint32_t x, not_done;
877 	struct timeval now;
878 
879 	(void)SCTP_GETTIME_TIMEVAL(&now);
880 	not_done = 1;
881 	while (not_done) {
882 		x = sctp_select_initial_TSN(&inp->sctp_ep);
883 		if (x == 0) {
884 			/* we never use 0 */
885 			continue;
886 		}
887 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
888 			not_done = 0;
889 		}
890 	}
891 	return (x);
892 }
893 
894 int
895 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
896     uint32_t override_tag, uint32_t vrf_id)
897 {
898 	struct sctp_association *asoc;
899 
900 	/*
901 	 * Anything set to zero is taken care of by the allocation routine's
902 	 * bzero
903 	 */
904 
905 	/*
906 	 * Up front select what scoping to apply on addresses I tell my peer
907 	 * Not sure what to do with these right now, we will need to come up
908 	 * with a way to set them. We may need to pass them through from the
909 	 * caller in the sctp_aloc_assoc() function.
910 	 */
911 	int i;
912 
913 	asoc = &stcb->asoc;
914 	/* init all variables to a known value. */
915 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
916 	asoc->max_burst = m->sctp_ep.max_burst;
917 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
918 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
919 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
920 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
921 	asoc->ecn_allowed = m->sctp_ecn_enable;
922 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
923 	asoc->sctp_cmt_pf = (uint8_t) 0;
924 	asoc->sctp_frag_point = m->sctp_frag_point;
925 	asoc->sctp_features = m->sctp_features;
926 #ifdef INET
927 	asoc->default_dscp = m->ip_inp.inp.inp_ip_tos;
928 #else
929 	asoc->default_dscp = 0;
930 #endif
931 
932 #ifdef INET6
933 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
934 #else
935 	asoc->default_flowlabel = 0;
936 #endif
937 	asoc->sb_send_resv = 0;
938 	if (override_tag) {
939 		asoc->my_vtag = override_tag;
940 	} else {
941 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
942 	}
943 	/* Get the nonce tags */
944 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
945 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
946 	asoc->vrf_id = vrf_id;
947 
948 #ifdef SCTP_ASOCLOG_OF_TSNS
949 	asoc->tsn_in_at = 0;
950 	asoc->tsn_out_at = 0;
951 	asoc->tsn_in_wrapped = 0;
952 	asoc->tsn_out_wrapped = 0;
953 	asoc->cumack_log_at = 0;
954 	asoc->cumack_log_atsnt = 0;
955 #endif
956 #ifdef SCTP_FS_SPEC_LOG
957 	asoc->fs_index = 0;
958 #endif
959 	asoc->refcnt = 0;
960 	asoc->assoc_up_sent = 0;
961 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
962 	    sctp_select_initial_TSN(&m->sctp_ep);
963 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
964 	/* we are optimisitic here */
965 	asoc->peer_supports_pktdrop = 1;
966 	asoc->peer_supports_nat = 0;
967 	asoc->sent_queue_retran_cnt = 0;
968 
969 	/* for CMT */
970 	asoc->last_net_cmt_send_started = NULL;
971 
972 	/* This will need to be adjusted */
973 	asoc->last_acked_seq = asoc->init_seq_number - 1;
974 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
975 	asoc->asconf_seq_in = asoc->last_acked_seq;
976 
977 	/* here we are different, we hold the next one we expect */
978 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
979 
980 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
981 	asoc->initial_rto = m->sctp_ep.initial_rto;
982 
983 	asoc->max_init_times = m->sctp_ep.max_init_times;
984 	asoc->max_send_times = m->sctp_ep.max_send_times;
985 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
986 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
987 	asoc->free_chunk_cnt = 0;
988 
989 	asoc->iam_blocking = 0;
990 
991 	asoc->context = m->sctp_context;
992 	asoc->def_send = m->def_send;
993 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
994 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
995 	asoc->pr_sctp_cnt = 0;
996 	asoc->total_output_queue_size = 0;
997 
998 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
999 		struct in6pcb *inp6;
1000 
1001 		/* Its a V6 socket */
1002 		inp6 = (struct in6pcb *)m;
1003 		asoc->ipv6_addr_legal = 1;
1004 		/* Now look at the binding flag to see if V4 will be legal */
1005 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1006 			asoc->ipv4_addr_legal = 1;
1007 		} else {
1008 			/* V4 addresses are NOT legal on the association */
1009 			asoc->ipv4_addr_legal = 0;
1010 		}
1011 	} else {
1012 		/* Its a V4 socket, no - V6 */
1013 		asoc->ipv4_addr_legal = 1;
1014 		asoc->ipv6_addr_legal = 0;
1015 	}
1016 
1017 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1018 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1019 
1020 	asoc->smallest_mtu = m->sctp_frag_point;
1021 	asoc->minrto = m->sctp_ep.sctp_minrto;
1022 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1023 
1024 	asoc->locked_on_sending = NULL;
1025 	asoc->stream_locked_on = 0;
1026 	asoc->ecn_echo_cnt_onq = 0;
1027 	asoc->stream_locked = 0;
1028 
1029 	asoc->send_sack = 1;
1030 
1031 	LIST_INIT(&asoc->sctp_restricted_addrs);
1032 
1033 	TAILQ_INIT(&asoc->nets);
1034 	TAILQ_INIT(&asoc->pending_reply_queue);
1035 	TAILQ_INIT(&asoc->asconf_ack_sent);
1036 	/* Setup to fill the hb random cache at first HB */
1037 	asoc->hb_random_idx = 4;
1038 
1039 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1040 
1041 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1042 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1043 
1044 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1045 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1046 
1047 	/*
1048 	 * Now the stream parameters, here we allocate space for all streams
1049 	 * that we request by default.
1050 	 */
1051 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1052 	    m->sctp_ep.pre_open_stream_count;
1053 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1054 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1055 	    SCTP_M_STRMO);
1056 	if (asoc->strmout == NULL) {
1057 		/* big trouble no memory */
1058 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1059 		return (ENOMEM);
1060 	}
1061 	for (i = 0; i < asoc->streamoutcnt; i++) {
1062 		/*
1063 		 * inbound side must be set to 0xffff, also NOTE when we get
1064 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1065 		 * count (streamoutcnt) but first check if we sent to any of
1066 		 * the upper streams that were dropped (if some were). Those
1067 		 * that were dropped must be notified to the upper layer as
1068 		 * failed to send.
1069 		 */
1070 		asoc->strmout[i].next_sequence_sent = 0x0;
1071 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1072 		asoc->strmout[i].stream_no = i;
1073 		asoc->strmout[i].last_msg_incomplete = 0;
1074 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1075 	}
1076 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1077 
1078 	/* Now the mapping array */
1079 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1080 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1081 	    SCTP_M_MAP);
1082 	if (asoc->mapping_array == NULL) {
1083 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1084 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1085 		return (ENOMEM);
1086 	}
1087 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1088 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1089 	    SCTP_M_MAP);
1090 	if (asoc->nr_mapping_array == NULL) {
1091 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1092 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1093 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1094 		return (ENOMEM);
1095 	}
1096 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1097 
1098 	/* Now the init of the other outqueues */
1099 	TAILQ_INIT(&asoc->free_chunks);
1100 	TAILQ_INIT(&asoc->control_send_queue);
1101 	TAILQ_INIT(&asoc->asconf_send_queue);
1102 	TAILQ_INIT(&asoc->send_queue);
1103 	TAILQ_INIT(&asoc->sent_queue);
1104 	TAILQ_INIT(&asoc->reasmqueue);
1105 	TAILQ_INIT(&asoc->resetHead);
1106 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1107 	TAILQ_INIT(&asoc->asconf_queue);
1108 	/* authentication fields */
1109 	asoc->authinfo.random = NULL;
1110 	asoc->authinfo.active_keyid = 0;
1111 	asoc->authinfo.assoc_key = NULL;
1112 	asoc->authinfo.assoc_keyid = 0;
1113 	asoc->authinfo.recv_key = NULL;
1114 	asoc->authinfo.recv_keyid = 0;
1115 	LIST_INIT(&asoc->shared_keys);
1116 	asoc->marked_retrans = 0;
1117 	asoc->timoinit = 0;
1118 	asoc->timodata = 0;
1119 	asoc->timosack = 0;
1120 	asoc->timoshutdown = 0;
1121 	asoc->timoheartbeat = 0;
1122 	asoc->timocookie = 0;
1123 	asoc->timoshutdownack = 0;
1124 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1125 	asoc->discontinuity_time = asoc->start_time;
1126 	/*
1127 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1128 	 * freed later when the association is freed.
1129 	 */
1130 	return (0);
1131 }
1132 
1133 void
1134 sctp_print_mapping_array(struct sctp_association *asoc)
1135 {
1136 	unsigned int i, limit;
1137 
1138 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1139 	    asoc->mapping_array_size,
1140 	    asoc->mapping_array_base_tsn,
1141 	    asoc->cumulative_tsn,
1142 	    asoc->highest_tsn_inside_map,
1143 	    asoc->highest_tsn_inside_nr_map);
1144 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1145 		if (asoc->mapping_array[limit - 1]) {
1146 			break;
1147 		}
1148 	}
1149 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1150 	for (i = 0; i < limit; i++) {
1151 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1152 	}
1153 	if (limit % 16)
1154 		printf("\n");
1155 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1156 		if (asoc->nr_mapping_array[limit - 1]) {
1157 			break;
1158 		}
1159 	}
1160 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1161 	for (i = 0; i < limit; i++) {
1162 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1163 	}
1164 	if (limit % 16)
1165 		printf("\n");
1166 }
1167 
1168 int
1169 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1170 {
1171 	/* mapping array needs to grow */
1172 	uint8_t *new_array1, *new_array2;
1173 	uint32_t new_size;
1174 
1175 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1176 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1177 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1178 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1179 		/* can't get more, forget it */
1180 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1181 		if (new_array1) {
1182 			SCTP_FREE(new_array1, SCTP_M_MAP);
1183 		}
1184 		if (new_array2) {
1185 			SCTP_FREE(new_array2, SCTP_M_MAP);
1186 		}
1187 		return (-1);
1188 	}
1189 	memset(new_array1, 0, new_size);
1190 	memset(new_array2, 0, new_size);
1191 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1192 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1193 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1194 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1195 	asoc->mapping_array = new_array1;
1196 	asoc->nr_mapping_array = new_array2;
1197 	asoc->mapping_array_size = new_size;
1198 	return (0);
1199 }
1200 
1201 
1202 static void
1203 sctp_iterator_work(struct sctp_iterator *it)
1204 {
1205 	int iteration_count = 0;
1206 	int inp_skip = 0;
1207 	int first_in = 1;
1208 	struct sctp_inpcb *tinp;
1209 
1210 	SCTP_INP_INFO_RLOCK();
1211 	SCTP_ITERATOR_LOCK();
1212 	if (it->inp) {
1213 		SCTP_INP_RLOCK(it->inp);
1214 		SCTP_INP_DECR_REF(it->inp);
1215 	}
1216 	if (it->inp == NULL) {
1217 		/* iterator is complete */
1218 done_with_iterator:
1219 		SCTP_ITERATOR_UNLOCK();
1220 		SCTP_INP_INFO_RUNLOCK();
1221 		if (it->function_atend != NULL) {
1222 			(*it->function_atend) (it->pointer, it->val);
1223 		}
1224 		SCTP_FREE(it, SCTP_M_ITER);
1225 		return;
1226 	}
1227 select_a_new_ep:
1228 	if (first_in) {
1229 		first_in = 0;
1230 	} else {
1231 		SCTP_INP_RLOCK(it->inp);
1232 	}
1233 	while (((it->pcb_flags) &&
1234 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1235 	    ((it->pcb_features) &&
1236 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1237 		/* endpoint flags or features don't match, so keep looking */
1238 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1239 			SCTP_INP_RUNLOCK(it->inp);
1240 			goto done_with_iterator;
1241 		}
1242 		tinp = it->inp;
1243 		it->inp = LIST_NEXT(it->inp, sctp_list);
1244 		SCTP_INP_RUNLOCK(tinp);
1245 		if (it->inp == NULL) {
1246 			goto done_with_iterator;
1247 		}
1248 		SCTP_INP_RLOCK(it->inp);
1249 	}
1250 	/* now go through each assoc which is in the desired state */
1251 	if (it->done_current_ep == 0) {
1252 		if (it->function_inp != NULL)
1253 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1254 		it->done_current_ep = 1;
1255 	}
1256 	if (it->stcb == NULL) {
1257 		/* run the per instance function */
1258 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1259 	}
1260 	if ((inp_skip) || it->stcb == NULL) {
1261 		if (it->function_inp_end != NULL) {
1262 			inp_skip = (*it->function_inp_end) (it->inp,
1263 			    it->pointer,
1264 			    it->val);
1265 		}
1266 		SCTP_INP_RUNLOCK(it->inp);
1267 		goto no_stcb;
1268 	}
1269 	while (it->stcb) {
1270 		SCTP_TCB_LOCK(it->stcb);
1271 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1272 			/* not in the right state... keep looking */
1273 			SCTP_TCB_UNLOCK(it->stcb);
1274 			goto next_assoc;
1275 		}
1276 		/* see if we have limited out the iterator loop */
1277 		iteration_count++;
1278 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1279 			/* Pause to let others grab the lock */
1280 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1281 			SCTP_TCB_UNLOCK(it->stcb);
1282 			SCTP_INP_INCR_REF(it->inp);
1283 			SCTP_INP_RUNLOCK(it->inp);
1284 			SCTP_ITERATOR_UNLOCK();
1285 			SCTP_INP_INFO_RUNLOCK();
1286 			SCTP_INP_INFO_RLOCK();
1287 			SCTP_ITERATOR_LOCK();
1288 			if (sctp_it_ctl.iterator_flags) {
1289 				/* We won't be staying here */
1290 				SCTP_INP_DECR_REF(it->inp);
1291 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1292 				if (sctp_it_ctl.iterator_flags &
1293 				    SCTP_ITERATOR_MUST_EXIT) {
1294 					goto done_with_iterator;
1295 				}
1296 				if (sctp_it_ctl.iterator_flags &
1297 				    SCTP_ITERATOR_STOP_CUR_IT) {
1298 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1299 					goto done_with_iterator;
1300 				}
1301 				if (sctp_it_ctl.iterator_flags &
1302 				    SCTP_ITERATOR_STOP_CUR_INP) {
1303 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1304 					goto no_stcb;
1305 				}
1306 				/* If we reach here huh? */
1307 				printf("Unknown it ctl flag %x\n",
1308 				    sctp_it_ctl.iterator_flags);
1309 				sctp_it_ctl.iterator_flags = 0;
1310 			}
1311 			SCTP_INP_RLOCK(it->inp);
1312 			SCTP_INP_DECR_REF(it->inp);
1313 			SCTP_TCB_LOCK(it->stcb);
1314 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1315 			iteration_count = 0;
1316 		}
1317 		/* run function on this one */
1318 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1319 
1320 		/*
1321 		 * we lie here, it really needs to have its own type but
1322 		 * first I must verify that this won't effect things :-0
1323 		 */
1324 		if (it->no_chunk_output == 0)
1325 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1326 
1327 		SCTP_TCB_UNLOCK(it->stcb);
1328 next_assoc:
1329 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1330 		if (it->stcb == NULL) {
1331 			/* Run last function */
1332 			if (it->function_inp_end != NULL) {
1333 				inp_skip = (*it->function_inp_end) (it->inp,
1334 				    it->pointer,
1335 				    it->val);
1336 			}
1337 		}
1338 	}
1339 	SCTP_INP_RUNLOCK(it->inp);
1340 no_stcb:
1341 	/* done with all assocs on this endpoint, move on to next endpoint */
1342 	it->done_current_ep = 0;
1343 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1344 		it->inp = NULL;
1345 	} else {
1346 		it->inp = LIST_NEXT(it->inp, sctp_list);
1347 	}
1348 	if (it->inp == NULL) {
1349 		goto done_with_iterator;
1350 	}
1351 	goto select_a_new_ep;
1352 }
1353 
1354 void
1355 sctp_iterator_worker(void)
1356 {
1357 	struct sctp_iterator *it, *nit;
1358 
1359 	/* This function is called with the WQ lock in place */
1360 
1361 	sctp_it_ctl.iterator_running = 1;
1362 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1363 		sctp_it_ctl.cur_it = it;
1364 		/* now lets work on this one */
1365 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1366 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1367 		CURVNET_SET(it->vn);
1368 		sctp_iterator_work(it);
1369 		sctp_it_ctl.cur_it = NULL;
1370 		CURVNET_RESTORE();
1371 		SCTP_IPI_ITERATOR_WQ_LOCK();
1372 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1373 			break;
1374 		}
1375 		/* sa_ignore FREED_MEMORY */
1376 	}
1377 	sctp_it_ctl.iterator_running = 0;
1378 	return;
1379 }
1380 
1381 
1382 static void
1383 sctp_handle_addr_wq(void)
1384 {
1385 	/* deal with the ADDR wq from the rtsock calls */
1386 	struct sctp_laddr *wi, *nwi;
1387 	struct sctp_asconf_iterator *asc;
1388 
1389 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1390 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1391 	if (asc == NULL) {
1392 		/* Try later, no memory */
1393 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1394 		    (struct sctp_inpcb *)NULL,
1395 		    (struct sctp_tcb *)NULL,
1396 		    (struct sctp_nets *)NULL);
1397 		return;
1398 	}
1399 	LIST_INIT(&asc->list_of_work);
1400 	asc->cnt = 0;
1401 
1402 	SCTP_WQ_ADDR_LOCK();
1403 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1404 		LIST_REMOVE(wi, sctp_nxt_addr);
1405 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1406 		asc->cnt++;
1407 	}
1408 	SCTP_WQ_ADDR_UNLOCK();
1409 
1410 	if (asc->cnt == 0) {
1411 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1412 	} else {
1413 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1414 		    sctp_asconf_iterator_stcb,
1415 		    NULL,	/* No ep end for boundall */
1416 		    SCTP_PCB_FLAGS_BOUNDALL,
1417 		    SCTP_PCB_ANY_FEATURES,
1418 		    SCTP_ASOC_ANY_STATE,
1419 		    (void *)asc, 0,
1420 		    sctp_asconf_iterator_end, NULL, 0);
1421 	}
1422 }
1423 
1424 int retcode = 0;
1425 int cur_oerr = 0;
1426 
1427 void
1428 sctp_timeout_handler(void *t)
1429 {
1430 	struct sctp_inpcb *inp;
1431 	struct sctp_tcb *stcb;
1432 	struct sctp_nets *net;
1433 	struct sctp_timer *tmr;
1434 
1435 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1436 	struct socket *so;
1437 
1438 #endif
1439 	int did_output, type;
1440 
1441 	tmr = (struct sctp_timer *)t;
1442 	inp = (struct sctp_inpcb *)tmr->ep;
1443 	stcb = (struct sctp_tcb *)tmr->tcb;
1444 	net = (struct sctp_nets *)tmr->net;
1445 	CURVNET_SET((struct vnet *)tmr->vnet);
1446 	did_output = 1;
1447 
1448 #ifdef SCTP_AUDITING_ENABLED
1449 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1450 	sctp_auditing(3, inp, stcb, net);
1451 #endif
1452 
1453 	/* sanity checks... */
1454 	if (tmr->self != (void *)tmr) {
1455 		/*
1456 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1457 		 * tmr);
1458 		 */
1459 		CURVNET_RESTORE();
1460 		return;
1461 	}
1462 	tmr->stopped_from = 0xa001;
1463 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1464 		/*
1465 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1466 		 * tmr->type);
1467 		 */
1468 		CURVNET_RESTORE();
1469 		return;
1470 	}
1471 	tmr->stopped_from = 0xa002;
1472 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1473 		CURVNET_RESTORE();
1474 		return;
1475 	}
1476 	/* if this is an iterator timeout, get the struct and clear inp */
1477 	tmr->stopped_from = 0xa003;
1478 	type = tmr->type;
1479 	if (inp) {
1480 		SCTP_INP_INCR_REF(inp);
1481 		if ((inp->sctp_socket == 0) &&
1482 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1483 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1484 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1485 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1486 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1491 		    ) {
1492 			SCTP_INP_DECR_REF(inp);
1493 			CURVNET_RESTORE();
1494 			return;
1495 		}
1496 	}
1497 	tmr->stopped_from = 0xa004;
1498 	if (stcb) {
1499 		atomic_add_int(&stcb->asoc.refcnt, 1);
1500 		if (stcb->asoc.state == 0) {
1501 			atomic_add_int(&stcb->asoc.refcnt, -1);
1502 			if (inp) {
1503 				SCTP_INP_DECR_REF(inp);
1504 			}
1505 			CURVNET_RESTORE();
1506 			return;
1507 		}
1508 	}
1509 	tmr->stopped_from = 0xa005;
1510 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1511 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1512 		if (inp) {
1513 			SCTP_INP_DECR_REF(inp);
1514 		}
1515 		if (stcb) {
1516 			atomic_add_int(&stcb->asoc.refcnt, -1);
1517 		}
1518 		CURVNET_RESTORE();
1519 		return;
1520 	}
1521 	tmr->stopped_from = 0xa006;
1522 
1523 	if (stcb) {
1524 		SCTP_TCB_LOCK(stcb);
1525 		atomic_add_int(&stcb->asoc.refcnt, -1);
1526 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1527 		    ((stcb->asoc.state == 0) ||
1528 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1529 			SCTP_TCB_UNLOCK(stcb);
1530 			if (inp) {
1531 				SCTP_INP_DECR_REF(inp);
1532 			}
1533 			CURVNET_RESTORE();
1534 			return;
1535 		}
1536 	}
1537 	/* record in stopped what t-o occured */
1538 	tmr->stopped_from = tmr->type;
1539 
1540 	/* mark as being serviced now */
1541 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1542 		/*
1543 		 * Callout has been rescheduled.
1544 		 */
1545 		goto get_out;
1546 	}
1547 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1548 		/*
1549 		 * Not active, so no action.
1550 		 */
1551 		goto get_out;
1552 	}
1553 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1554 
1555 	/* call the handler for the appropriate timer type */
1556 	switch (tmr->type) {
1557 	case SCTP_TIMER_TYPE_ZERO_COPY:
1558 		if (inp == NULL) {
1559 			break;
1560 		}
1561 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1562 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1563 		}
1564 		break;
1565 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1566 		if (inp == NULL) {
1567 			break;
1568 		}
1569 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1570 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1571 		}
1572 		break;
1573 	case SCTP_TIMER_TYPE_ADDR_WQ:
1574 		sctp_handle_addr_wq();
1575 		break;
1576 	case SCTP_TIMER_TYPE_SEND:
1577 		if ((stcb == NULL) || (inp == NULL)) {
1578 			break;
1579 		}
1580 		SCTP_STAT_INCR(sctps_timodata);
1581 		stcb->asoc.timodata++;
1582 		stcb->asoc.num_send_timers_up--;
1583 		if (stcb->asoc.num_send_timers_up < 0) {
1584 			stcb->asoc.num_send_timers_up = 0;
1585 		}
1586 		SCTP_TCB_LOCK_ASSERT(stcb);
1587 		cur_oerr = stcb->asoc.overall_error_count;
1588 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1589 		if (retcode) {
1590 			/* no need to unlock on tcb its gone */
1591 
1592 			goto out_decr;
1593 		}
1594 		SCTP_TCB_LOCK_ASSERT(stcb);
1595 #ifdef SCTP_AUDITING_ENABLED
1596 		sctp_auditing(4, inp, stcb, net);
1597 #endif
1598 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1599 		if ((stcb->asoc.num_send_timers_up == 0) &&
1600 		    (stcb->asoc.sent_queue_cnt > 0)) {
1601 			struct sctp_tmit_chunk *chk;
1602 
1603 			/*
1604 			 * safeguard. If there on some on the sent queue
1605 			 * somewhere but no timers running something is
1606 			 * wrong... so we start a timer on the first chunk
1607 			 * on the send queue on whatever net it is sent to.
1608 			 */
1609 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1610 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1611 			    chk->whoTo);
1612 		}
1613 		break;
1614 	case SCTP_TIMER_TYPE_INIT:
1615 		if ((stcb == NULL) || (inp == NULL)) {
1616 			break;
1617 		}
1618 		SCTP_STAT_INCR(sctps_timoinit);
1619 		stcb->asoc.timoinit++;
1620 		if (sctp_t1init_timer(inp, stcb, net)) {
1621 			/* no need to unlock on tcb its gone */
1622 			goto out_decr;
1623 		}
1624 		/* We do output but not here */
1625 		did_output = 0;
1626 		break;
1627 	case SCTP_TIMER_TYPE_RECV:
1628 		if ((stcb == NULL) || (inp == NULL)) {
1629 			break;
1630 		}
1631 		SCTP_STAT_INCR(sctps_timosack);
1632 		stcb->asoc.timosack++;
1633 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1634 #ifdef SCTP_AUDITING_ENABLED
1635 		sctp_auditing(4, inp, stcb, net);
1636 #endif
1637 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1638 		break;
1639 	case SCTP_TIMER_TYPE_SHUTDOWN:
1640 		if ((stcb == NULL) || (inp == NULL)) {
1641 			break;
1642 		}
1643 		if (sctp_shutdown_timer(inp, stcb, net)) {
1644 			/* no need to unlock on tcb its gone */
1645 			goto out_decr;
1646 		}
1647 		SCTP_STAT_INCR(sctps_timoshutdown);
1648 		stcb->asoc.timoshutdown++;
1649 #ifdef SCTP_AUDITING_ENABLED
1650 		sctp_auditing(4, inp, stcb, net);
1651 #endif
1652 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1653 		break;
1654 	case SCTP_TIMER_TYPE_HEARTBEAT:
1655 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1656 			break;
1657 		}
1658 		SCTP_STAT_INCR(sctps_timoheartbeat);
1659 		stcb->asoc.timoheartbeat++;
1660 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1661 			/* no need to unlock on tcb its gone */
1662 			goto out_decr;
1663 		}
1664 #ifdef SCTP_AUDITING_ENABLED
1665 		sctp_auditing(4, inp, stcb, net);
1666 #endif
1667 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1668 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
1669 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1670 		}
1671 		break;
1672 	case SCTP_TIMER_TYPE_COOKIE:
1673 		if ((stcb == NULL) || (inp == NULL)) {
1674 			break;
1675 		}
1676 		if (sctp_cookie_timer(inp, stcb, net)) {
1677 			/* no need to unlock on tcb its gone */
1678 			goto out_decr;
1679 		}
1680 		SCTP_STAT_INCR(sctps_timocookie);
1681 		stcb->asoc.timocookie++;
1682 #ifdef SCTP_AUDITING_ENABLED
1683 		sctp_auditing(4, inp, stcb, net);
1684 #endif
1685 		/*
1686 		 * We consider T3 and Cookie timer pretty much the same with
1687 		 * respect to where from in chunk_output.
1688 		 */
1689 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1690 		break;
1691 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1692 		{
1693 			struct timeval tv;
1694 			int i, secret;
1695 
1696 			if (inp == NULL) {
1697 				break;
1698 			}
1699 			SCTP_STAT_INCR(sctps_timosecret);
1700 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1701 			SCTP_INP_WLOCK(inp);
1702 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1703 			inp->sctp_ep.last_secret_number =
1704 			    inp->sctp_ep.current_secret_number;
1705 			inp->sctp_ep.current_secret_number++;
1706 			if (inp->sctp_ep.current_secret_number >=
1707 			    SCTP_HOW_MANY_SECRETS) {
1708 				inp->sctp_ep.current_secret_number = 0;
1709 			}
1710 			secret = (int)inp->sctp_ep.current_secret_number;
1711 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1712 				inp->sctp_ep.secret_key[secret][i] =
1713 				    sctp_select_initial_TSN(&inp->sctp_ep);
1714 			}
1715 			SCTP_INP_WUNLOCK(inp);
1716 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1717 		}
1718 		did_output = 0;
1719 		break;
1720 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1721 		if ((stcb == NULL) || (inp == NULL)) {
1722 			break;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timopathmtu);
1725 		sctp_pathmtu_timer(inp, stcb, net);
1726 		did_output = 0;
1727 		break;
1728 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1729 		if ((stcb == NULL) || (inp == NULL)) {
1730 			break;
1731 		}
1732 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1733 			/* no need to unlock on tcb its gone */
1734 			goto out_decr;
1735 		}
1736 		SCTP_STAT_INCR(sctps_timoshutdownack);
1737 		stcb->asoc.timoshutdownack++;
1738 #ifdef SCTP_AUDITING_ENABLED
1739 		sctp_auditing(4, inp, stcb, net);
1740 #endif
1741 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1742 		break;
1743 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1744 		if ((stcb == NULL) || (inp == NULL)) {
1745 			break;
1746 		}
1747 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1748 		sctp_abort_an_association(inp, stcb,
1749 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1750 		/* no need to unlock on tcb its gone */
1751 		goto out_decr;
1752 
1753 	case SCTP_TIMER_TYPE_STRRESET:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		if (sctp_strreset_timer(inp, stcb, net)) {
1758 			/* no need to unlock on tcb its gone */
1759 			goto out_decr;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timostrmrst);
1762 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1763 		break;
1764 	case SCTP_TIMER_TYPE_ASCONF:
1765 		if ((stcb == NULL) || (inp == NULL)) {
1766 			break;
1767 		}
1768 		if (sctp_asconf_timer(inp, stcb, net)) {
1769 			/* no need to unlock on tcb its gone */
1770 			goto out_decr;
1771 		}
1772 		SCTP_STAT_INCR(sctps_timoasconf);
1773 #ifdef SCTP_AUDITING_ENABLED
1774 		sctp_auditing(4, inp, stcb, net);
1775 #endif
1776 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1777 		break;
1778 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1779 		if ((stcb == NULL) || (inp == NULL)) {
1780 			break;
1781 		}
1782 		sctp_delete_prim_timer(inp, stcb, net);
1783 		SCTP_STAT_INCR(sctps_timodelprim);
1784 		break;
1785 
1786 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 		SCTP_STAT_INCR(sctps_timoautoclose);
1791 		sctp_autoclose_timer(inp, stcb, net);
1792 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1793 		did_output = 0;
1794 		break;
1795 	case SCTP_TIMER_TYPE_ASOCKILL:
1796 		if ((stcb == NULL) || (inp == NULL)) {
1797 			break;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoassockill);
1800 		/* Can we free it yet? */
1801 		SCTP_INP_DECR_REF(inp);
1802 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1804 		so = SCTP_INP_SO(inp);
1805 		atomic_add_int(&stcb->asoc.refcnt, 1);
1806 		SCTP_TCB_UNLOCK(stcb);
1807 		SCTP_SOCKET_LOCK(so, 1);
1808 		SCTP_TCB_LOCK(stcb);
1809 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1810 #endif
1811 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1813 		SCTP_SOCKET_UNLOCK(so, 1);
1814 #endif
1815 		/*
1816 		 * free asoc, always unlocks (or destroy's) so prevent
1817 		 * duplicate unlock or unlock of a free mtx :-0
1818 		 */
1819 		stcb = NULL;
1820 		goto out_no_decr;
1821 	case SCTP_TIMER_TYPE_INPKILL:
1822 		SCTP_STAT_INCR(sctps_timoinpkill);
1823 		if (inp == NULL) {
1824 			break;
1825 		}
1826 		/*
1827 		 * special case, take away our increment since WE are the
1828 		 * killer
1829 		 */
1830 		SCTP_INP_DECR_REF(inp);
1831 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1832 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1833 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1834 		inp = NULL;
1835 		goto out_no_decr;
1836 	default:
1837 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1838 		    tmr->type);
1839 		break;
1840 	};
1841 #ifdef SCTP_AUDITING_ENABLED
1842 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1843 	if (inp)
1844 		sctp_auditing(5, inp, stcb, net);
1845 #endif
1846 	if ((did_output) && stcb) {
1847 		/*
1848 		 * Now we need to clean up the control chunk chain if an
1849 		 * ECNE is on it. It must be marked as UNSENT again so next
1850 		 * call will continue to send it until such time that we get
1851 		 * a CWR, to remove it. It is, however, less likely that we
1852 		 * will find a ecn echo on the chain though.
1853 		 */
1854 		sctp_fix_ecn_echo(&stcb->asoc);
1855 	}
1856 get_out:
1857 	if (stcb) {
1858 		SCTP_TCB_UNLOCK(stcb);
1859 	}
1860 out_decr:
1861 	if (inp) {
1862 		SCTP_INP_DECR_REF(inp);
1863 	}
1864 out_no_decr:
1865 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1866 	    type);
1867 	CURVNET_RESTORE();
1868 }
1869 
1870 void
1871 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1872     struct sctp_nets *net)
1873 {
1874 	uint32_t to_ticks;
1875 	struct sctp_timer *tmr;
1876 
1877 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1878 		return;
1879 
1880 	to_ticks = 0;
1881 
1882 	tmr = NULL;
1883 	if (stcb) {
1884 		SCTP_TCB_LOCK_ASSERT(stcb);
1885 	}
1886 	switch (t_type) {
1887 	case SCTP_TIMER_TYPE_ZERO_COPY:
1888 		tmr = &inp->sctp_ep.zero_copy_timer;
1889 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1890 		break;
1891 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1892 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1893 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1894 		break;
1895 	case SCTP_TIMER_TYPE_ADDR_WQ:
1896 		/* Only 1 tick away :-) */
1897 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1898 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1899 		break;
1900 	case SCTP_TIMER_TYPE_SEND:
1901 		/* Here we use the RTO timer */
1902 		{
1903 			int rto_val;
1904 
1905 			if ((stcb == NULL) || (net == NULL)) {
1906 				return;
1907 			}
1908 			tmr = &net->rxt_timer;
1909 			if (net->RTO == 0) {
1910 				rto_val = stcb->asoc.initial_rto;
1911 			} else {
1912 				rto_val = net->RTO;
1913 			}
1914 			to_ticks = MSEC_TO_TICKS(rto_val);
1915 		}
1916 		break;
1917 	case SCTP_TIMER_TYPE_INIT:
1918 		/*
1919 		 * Here we use the INIT timer default usually about 1
1920 		 * minute.
1921 		 */
1922 		if ((stcb == NULL) || (net == NULL)) {
1923 			return;
1924 		}
1925 		tmr = &net->rxt_timer;
1926 		if (net->RTO == 0) {
1927 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1928 		} else {
1929 			to_ticks = MSEC_TO_TICKS(net->RTO);
1930 		}
1931 		break;
1932 	case SCTP_TIMER_TYPE_RECV:
1933 		/*
1934 		 * Here we use the Delayed-Ack timer value from the inp
1935 		 * ususually about 200ms.
1936 		 */
1937 		if (stcb == NULL) {
1938 			return;
1939 		}
1940 		tmr = &stcb->asoc.dack_timer;
1941 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1942 		break;
1943 	case SCTP_TIMER_TYPE_SHUTDOWN:
1944 		/* Here we use the RTO of the destination. */
1945 		if ((stcb == NULL) || (net == NULL)) {
1946 			return;
1947 		}
1948 		if (net->RTO == 0) {
1949 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1950 		} else {
1951 			to_ticks = MSEC_TO_TICKS(net->RTO);
1952 		}
1953 		tmr = &net->rxt_timer;
1954 		break;
1955 	case SCTP_TIMER_TYPE_HEARTBEAT:
1956 		/*
1957 		 * the net is used here so that we can add in the RTO. Even
1958 		 * though we use a different timer. We also add the HB timer
1959 		 * PLUS a random jitter.
1960 		 */
1961 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1962 			return;
1963 		} else {
1964 			uint32_t rndval;
1965 			uint32_t jitter;
1966 
1967 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1968 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1969 				return;
1970 			}
1971 			if (net->RTO == 0) {
1972 				to_ticks = stcb->asoc.initial_rto;
1973 			} else {
1974 				to_ticks = net->RTO;
1975 			}
1976 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1977 			jitter = rndval % to_ticks;
1978 			if (jitter >= (to_ticks >> 1)) {
1979 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1980 			} else {
1981 				to_ticks = to_ticks - jitter;
1982 			}
1983 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1984 			    !(net->dest_state & SCTP_ADDR_PF)) {
1985 				to_ticks += net->heart_beat_delay;
1986 			}
1987 			/*
1988 			 * Now we must convert the to_ticks that are now in
1989 			 * ms to ticks.
1990 			 */
1991 			to_ticks = MSEC_TO_TICKS(to_ticks);
1992 			tmr = &net->hb_timer;
1993 		}
1994 		break;
1995 	case SCTP_TIMER_TYPE_COOKIE:
1996 		/*
1997 		 * Here we can use the RTO timer from the network since one
1998 		 * RTT was compelete. If a retran happened then we will be
1999 		 * using the RTO initial value.
2000 		 */
2001 		if ((stcb == NULL) || (net == NULL)) {
2002 			return;
2003 		}
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		tmr = &net->rxt_timer;
2010 		break;
2011 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2012 		/*
2013 		 * nothing needed but the endpoint here ususually about 60
2014 		 * minutes.
2015 		 */
2016 		if (inp == NULL) {
2017 			return;
2018 		}
2019 		tmr = &inp->sctp_ep.signature_change;
2020 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2021 		break;
2022 	case SCTP_TIMER_TYPE_ASOCKILL:
2023 		if (stcb == NULL) {
2024 			return;
2025 		}
2026 		tmr = &stcb->asoc.strreset_timer;
2027 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2028 		break;
2029 	case SCTP_TIMER_TYPE_INPKILL:
2030 		/*
2031 		 * The inp is setup to die. We re-use the signature_chage
2032 		 * timer since that has stopped and we are in the GONE
2033 		 * state.
2034 		 */
2035 		if (inp == NULL) {
2036 			return;
2037 		}
2038 		tmr = &inp->sctp_ep.signature_change;
2039 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2040 		break;
2041 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2042 		/*
2043 		 * Here we use the value found in the EP for PMTU ususually
2044 		 * about 10 minutes.
2045 		 */
2046 		if ((stcb == NULL) || (inp == NULL)) {
2047 			return;
2048 		}
2049 		if (net == NULL) {
2050 			return;
2051 		}
2052 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2053 		tmr = &net->pmtu_timer;
2054 		break;
2055 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2056 		/* Here we use the RTO of the destination */
2057 		if ((stcb == NULL) || (net == NULL)) {
2058 			return;
2059 		}
2060 		if (net->RTO == 0) {
2061 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2062 		} else {
2063 			to_ticks = MSEC_TO_TICKS(net->RTO);
2064 		}
2065 		tmr = &net->rxt_timer;
2066 		break;
2067 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2068 		/*
2069 		 * Here we use the endpoints shutdown guard timer usually
2070 		 * about 3 minutes.
2071 		 */
2072 		if ((inp == NULL) || (stcb == NULL)) {
2073 			return;
2074 		}
2075 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2076 		tmr = &stcb->asoc.shut_guard_timer;
2077 		break;
2078 	case SCTP_TIMER_TYPE_STRRESET:
2079 		/*
2080 		 * Here the timer comes from the stcb but its value is from
2081 		 * the net's RTO.
2082 		 */
2083 		if ((stcb == NULL) || (net == NULL)) {
2084 			return;
2085 		}
2086 		if (net->RTO == 0) {
2087 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2088 		} else {
2089 			to_ticks = MSEC_TO_TICKS(net->RTO);
2090 		}
2091 		tmr = &stcb->asoc.strreset_timer;
2092 		break;
2093 	case SCTP_TIMER_TYPE_ASCONF:
2094 		/*
2095 		 * Here the timer comes from the stcb but its value is from
2096 		 * the net's RTO.
2097 		 */
2098 		if ((stcb == NULL) || (net == NULL)) {
2099 			return;
2100 		}
2101 		if (net->RTO == 0) {
2102 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2103 		} else {
2104 			to_ticks = MSEC_TO_TICKS(net->RTO);
2105 		}
2106 		tmr = &stcb->asoc.asconf_timer;
2107 		break;
2108 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2109 		if ((stcb == NULL) || (net != NULL)) {
2110 			return;
2111 		}
2112 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2113 		tmr = &stcb->asoc.delete_prim_timer;
2114 		break;
2115 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2116 		if (stcb == NULL) {
2117 			return;
2118 		}
2119 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2120 			/*
2121 			 * Really an error since stcb is NOT set to
2122 			 * autoclose
2123 			 */
2124 			return;
2125 		}
2126 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2127 		tmr = &stcb->asoc.autoclose_timer;
2128 		break;
2129 	default:
2130 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2131 		    __FUNCTION__, t_type);
2132 		return;
2133 		break;
2134 	};
2135 	if ((to_ticks <= 0) || (tmr == NULL)) {
2136 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2137 		    __FUNCTION__, t_type, to_ticks, tmr);
2138 		return;
2139 	}
2140 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2141 		/*
2142 		 * we do NOT allow you to have it already running. if it is
2143 		 * we leave the current one up unchanged
2144 		 */
2145 		return;
2146 	}
2147 	/* At this point we can proceed */
2148 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2149 		stcb->asoc.num_send_timers_up++;
2150 	}
2151 	tmr->stopped_from = 0;
2152 	tmr->type = t_type;
2153 	tmr->ep = (void *)inp;
2154 	tmr->tcb = (void *)stcb;
2155 	tmr->net = (void *)net;
2156 	tmr->self = (void *)tmr;
2157 	tmr->vnet = (void *)curvnet;
2158 	tmr->ticks = sctp_get_tick_count();
2159 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2160 	return;
2161 }
2162 
2163 void
2164 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2165     struct sctp_nets *net, uint32_t from)
2166 {
2167 	struct sctp_timer *tmr;
2168 
2169 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2170 	    (inp == NULL))
2171 		return;
2172 
2173 	tmr = NULL;
2174 	if (stcb) {
2175 		SCTP_TCB_LOCK_ASSERT(stcb);
2176 	}
2177 	switch (t_type) {
2178 	case SCTP_TIMER_TYPE_ZERO_COPY:
2179 		tmr = &inp->sctp_ep.zero_copy_timer;
2180 		break;
2181 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2182 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_ADDR_WQ:
2185 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2186 		break;
2187 	case SCTP_TIMER_TYPE_SEND:
2188 		if ((stcb == NULL) || (net == NULL)) {
2189 			return;
2190 		}
2191 		tmr = &net->rxt_timer;
2192 		break;
2193 	case SCTP_TIMER_TYPE_INIT:
2194 		if ((stcb == NULL) || (net == NULL)) {
2195 			return;
2196 		}
2197 		tmr = &net->rxt_timer;
2198 		break;
2199 	case SCTP_TIMER_TYPE_RECV:
2200 		if (stcb == NULL) {
2201 			return;
2202 		}
2203 		tmr = &stcb->asoc.dack_timer;
2204 		break;
2205 	case SCTP_TIMER_TYPE_SHUTDOWN:
2206 		if ((stcb == NULL) || (net == NULL)) {
2207 			return;
2208 		}
2209 		tmr = &net->rxt_timer;
2210 		break;
2211 	case SCTP_TIMER_TYPE_HEARTBEAT:
2212 		if ((stcb == NULL) || (net == NULL)) {
2213 			return;
2214 		}
2215 		tmr = &net->hb_timer;
2216 		break;
2217 	case SCTP_TIMER_TYPE_COOKIE:
2218 		if ((stcb == NULL) || (net == NULL)) {
2219 			return;
2220 		}
2221 		tmr = &net->rxt_timer;
2222 		break;
2223 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2224 		/* nothing needed but the endpoint here */
2225 		tmr = &inp->sctp_ep.signature_change;
2226 		/*
2227 		 * We re-use the newcookie timer for the INP kill timer. We
2228 		 * must assure that we do not kill it by accident.
2229 		 */
2230 		break;
2231 	case SCTP_TIMER_TYPE_ASOCKILL:
2232 		/*
2233 		 * Stop the asoc kill timer.
2234 		 */
2235 		if (stcb == NULL) {
2236 			return;
2237 		}
2238 		tmr = &stcb->asoc.strreset_timer;
2239 		break;
2240 
2241 	case SCTP_TIMER_TYPE_INPKILL:
2242 		/*
2243 		 * The inp is setup to die. We re-use the signature_chage
2244 		 * timer since that has stopped and we are in the GONE
2245 		 * state.
2246 		 */
2247 		tmr = &inp->sctp_ep.signature_change;
2248 		break;
2249 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2250 		if ((stcb == NULL) || (net == NULL)) {
2251 			return;
2252 		}
2253 		tmr = &net->pmtu_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2256 		if ((stcb == NULL) || (net == NULL)) {
2257 			return;
2258 		}
2259 		tmr = &net->rxt_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2262 		if (stcb == NULL) {
2263 			return;
2264 		}
2265 		tmr = &stcb->asoc.shut_guard_timer;
2266 		break;
2267 	case SCTP_TIMER_TYPE_STRRESET:
2268 		if (stcb == NULL) {
2269 			return;
2270 		}
2271 		tmr = &stcb->asoc.strreset_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_ASCONF:
2274 		if (stcb == NULL) {
2275 			return;
2276 		}
2277 		tmr = &stcb->asoc.asconf_timer;
2278 		break;
2279 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2280 		if (stcb == NULL) {
2281 			return;
2282 		}
2283 		tmr = &stcb->asoc.delete_prim_timer;
2284 		break;
2285 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2286 		if (stcb == NULL) {
2287 			return;
2288 		}
2289 		tmr = &stcb->asoc.autoclose_timer;
2290 		break;
2291 	default:
2292 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2293 		    __FUNCTION__, t_type);
2294 		break;
2295 	};
2296 	if (tmr == NULL) {
2297 		return;
2298 	}
2299 	if ((tmr->type != t_type) && tmr->type) {
2300 		/*
2301 		 * Ok we have a timer that is under joint use. Cookie timer
2302 		 * per chance with the SEND timer. We therefore are NOT
2303 		 * running the timer that the caller wants stopped.  So just
2304 		 * return.
2305 		 */
2306 		return;
2307 	}
2308 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2309 		stcb->asoc.num_send_timers_up--;
2310 		if (stcb->asoc.num_send_timers_up < 0) {
2311 			stcb->asoc.num_send_timers_up = 0;
2312 		}
2313 	}
2314 	tmr->self = NULL;
2315 	tmr->stopped_from = from;
2316 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2317 	return;
2318 }
2319 
2320 uint32_t
2321 sctp_calculate_len(struct mbuf *m)
2322 {
2323 	uint32_t tlen = 0;
2324 	struct mbuf *at;
2325 
2326 	at = m;
2327 	while (at) {
2328 		tlen += SCTP_BUF_LEN(at);
2329 		at = SCTP_BUF_NEXT(at);
2330 	}
2331 	return (tlen);
2332 }
2333 
2334 void
2335 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2336     struct sctp_association *asoc, uint32_t mtu)
2337 {
2338 	/*
2339 	 * Reset the P-MTU size on this association, this involves changing
2340 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2341 	 * allow the DF flag to be cleared.
2342 	 */
2343 	struct sctp_tmit_chunk *chk;
2344 	unsigned int eff_mtu, ovh;
2345 
2346 	asoc->smallest_mtu = mtu;
2347 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2348 		ovh = SCTP_MIN_OVERHEAD;
2349 	} else {
2350 		ovh = SCTP_MIN_V4_OVERHEAD;
2351 	}
2352 	eff_mtu = mtu - ovh;
2353 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2354 		if (chk->send_size > eff_mtu) {
2355 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2356 		}
2357 	}
2358 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2359 		if (chk->send_size > eff_mtu) {
2360 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2361 		}
2362 	}
2363 }
2364 
2365 
2366 /*
2367  * given an association and starting time of the current RTT period return
2368  * RTO in number of msecs net should point to the current network
2369  */
2370 
2371 uint32_t
2372 sctp_calculate_rto(struct sctp_tcb *stcb,
2373     struct sctp_association *asoc,
2374     struct sctp_nets *net,
2375     struct timeval *told,
2376     int safe, int rtt_from_sack)
2377 {
2378 	/*-
2379 	 * given an association and the starting time of the current RTT
2380 	 * period (in value1/value2) return RTO in number of msecs.
2381 	 */
2382 	int32_t rtt;		/* RTT in ms */
2383 	uint32_t new_rto;
2384 	int first_measure = 0;
2385 	struct timeval now, then, *old;
2386 
2387 	/* Copy it out for sparc64 */
2388 	if (safe == sctp_align_unsafe_makecopy) {
2389 		old = &then;
2390 		memcpy(&then, told, sizeof(struct timeval));
2391 	} else if (safe == sctp_align_safe_nocopy) {
2392 		old = told;
2393 	} else {
2394 		/* error */
2395 		SCTP_PRINTF("Huh, bad rto calc call\n");
2396 		return (0);
2397 	}
2398 	/************************/
2399 	/* 1. calculate new RTT */
2400 	/************************/
2401 	/* get the current time */
2402 	if (stcb->asoc.use_precise_time) {
2403 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2404 	} else {
2405 		(void)SCTP_GETTIME_TIMEVAL(&now);
2406 	}
2407 	timevalsub(&now, old);
2408 	/* store the current RTT in us */
2409 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2410 	         (uint64_t) now.tv_usec;
2411 
2412 	/* computer rtt in ms */
2413 	rtt = net->rtt / 1000;
2414 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2415 		/*
2416 		 * Tell the CC module that a new update has just occurred
2417 		 * from a sack
2418 		 */
2419 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2420 	}
2421 	/*
2422 	 * Do we need to determine the lan? We do this only on sacks i.e.
2423 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2424 	 */
2425 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2426 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2427 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2428 			net->lan_type = SCTP_LAN_INTERNET;
2429 		} else {
2430 			net->lan_type = SCTP_LAN_LOCAL;
2431 		}
2432 	}
2433 	/***************************/
2434 	/* 2. update RTTVAR & SRTT */
2435 	/***************************/
2436 	/*-
2437 	 * Compute the scaled average lastsa and the
2438 	 * scaled variance lastsv as described in van Jacobson
2439 	 * Paper "Congestion Avoidance and Control", Annex A.
2440 	 *
2441 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2442 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2443 	 */
2444 	if (net->RTO_measured) {
2445 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2446 		net->lastsa += rtt;
2447 		if (rtt < 0) {
2448 			rtt = -rtt;
2449 		}
2450 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2451 		net->lastsv += rtt;
2452 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2453 			rto_logging(net, SCTP_LOG_RTTVAR);
2454 		}
2455 	} else {
2456 		/* First RTO measurment */
2457 		net->RTO_measured = 1;
2458 		first_measure = 1;
2459 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2460 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2461 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2462 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2463 		}
2464 	}
2465 	if (net->lastsv == 0) {
2466 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2467 	}
2468 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2469 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2470 	    (stcb->asoc.sat_network_lockout == 0)) {
2471 		stcb->asoc.sat_network = 1;
2472 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2473 		stcb->asoc.sat_network = 0;
2474 		stcb->asoc.sat_network_lockout = 1;
2475 	}
2476 	/* bound it, per C6/C7 in Section 5.3.1 */
2477 	if (new_rto < stcb->asoc.minrto) {
2478 		new_rto = stcb->asoc.minrto;
2479 	}
2480 	if (new_rto > stcb->asoc.maxrto) {
2481 		new_rto = stcb->asoc.maxrto;
2482 	}
2483 	/* we are now returning the RTO */
2484 	return (new_rto);
2485 }
2486 
2487 /*
2488  * return a pointer to a contiguous piece of data from the given mbuf chain
2489  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2490  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2491  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2492  */
2493 caddr_t
2494 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2495 {
2496 	uint32_t count;
2497 	uint8_t *ptr;
2498 
2499 	ptr = in_ptr;
2500 	if ((off < 0) || (len <= 0))
2501 		return (NULL);
2502 
2503 	/* find the desired start location */
2504 	while ((m != NULL) && (off > 0)) {
2505 		if (off < SCTP_BUF_LEN(m))
2506 			break;
2507 		off -= SCTP_BUF_LEN(m);
2508 		m = SCTP_BUF_NEXT(m);
2509 	}
2510 	if (m == NULL)
2511 		return (NULL);
2512 
2513 	/* is the current mbuf large enough (eg. contiguous)? */
2514 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2515 		return (mtod(m, caddr_t)+off);
2516 	} else {
2517 		/* else, it spans more than one mbuf, so save a temp copy... */
2518 		while ((m != NULL) && (len > 0)) {
2519 			count = min(SCTP_BUF_LEN(m) - off, len);
2520 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2521 			len -= count;
2522 			ptr += count;
2523 			off = 0;
2524 			m = SCTP_BUF_NEXT(m);
2525 		}
2526 		if ((m == NULL) && (len > 0))
2527 			return (NULL);
2528 		else
2529 			return ((caddr_t)in_ptr);
2530 	}
2531 }
2532 
2533 
2534 
2535 struct sctp_paramhdr *
2536 sctp_get_next_param(struct mbuf *m,
2537     int offset,
2538     struct sctp_paramhdr *pull,
2539     int pull_limit)
2540 {
2541 	/* This just provides a typed signature to Peter's Pull routine */
2542 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2543 	    (uint8_t *) pull));
2544 }
2545 
2546 
2547 int
2548 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2549 {
2550 	/*
2551 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2552 	 * padlen is > 3 this routine will fail.
2553 	 */
2554 	uint8_t *dp;
2555 	int i;
2556 
2557 	if (padlen > 3) {
2558 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2559 		return (ENOBUFS);
2560 	}
2561 	if (padlen <= M_TRAILINGSPACE(m)) {
2562 		/*
2563 		 * The easy way. We hope the majority of the time we hit
2564 		 * here :)
2565 		 */
2566 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2567 		SCTP_BUF_LEN(m) += padlen;
2568 	} else {
2569 		/* Hard way we must grow the mbuf */
2570 		struct mbuf *tmp;
2571 
2572 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2573 		if (tmp == NULL) {
2574 			/* Out of space GAK! we are in big trouble. */
2575 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2576 			return (ENOSPC);
2577 		}
2578 		/* setup and insert in middle */
2579 		SCTP_BUF_LEN(tmp) = padlen;
2580 		SCTP_BUF_NEXT(tmp) = NULL;
2581 		SCTP_BUF_NEXT(m) = tmp;
2582 		dp = mtod(tmp, uint8_t *);
2583 	}
2584 	/* zero out the pad */
2585 	for (i = 0; i < padlen; i++) {
2586 		*dp = 0;
2587 		dp++;
2588 	}
2589 	return (0);
2590 }
2591 
2592 int
2593 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2594 {
2595 	/* find the last mbuf in chain and pad it */
2596 	struct mbuf *m_at;
2597 
2598 	m_at = m;
2599 	if (last_mbuf) {
2600 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2601 	} else {
2602 		while (m_at) {
2603 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2604 				return (sctp_add_pad_tombuf(m_at, padval));
2605 			}
2606 			m_at = SCTP_BUF_NEXT(m_at);
2607 		}
2608 	}
2609 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2610 	return (EFAULT);
2611 }
2612 
2613 static void
2614 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2615     uint32_t error, void *data, int so_locked
2616 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2617     SCTP_UNUSED
2618 #endif
2619 )
2620 {
2621 	struct mbuf *m_notify;
2622 	struct sctp_assoc_change *sac;
2623 	struct sctp_queued_to_read *control;
2624 
2625 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2626 	struct socket *so;
2627 
2628 #endif
2629 
2630 	/*
2631 	 * For TCP model AND UDP connected sockets we will send an error up
2632 	 * when an ABORT comes in.
2633 	 */
2634 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2635 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2636 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2637 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2638 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2639 			stcb->sctp_socket->so_error = ECONNREFUSED;
2640 		} else {
2641 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2642 			stcb->sctp_socket->so_error = ECONNRESET;
2643 		}
2644 		/* Wake ANY sleepers */
2645 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2646 		so = SCTP_INP_SO(stcb->sctp_ep);
2647 		if (!so_locked) {
2648 			atomic_add_int(&stcb->asoc.refcnt, 1);
2649 			SCTP_TCB_UNLOCK(stcb);
2650 			SCTP_SOCKET_LOCK(so, 1);
2651 			SCTP_TCB_LOCK(stcb);
2652 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2653 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2654 				SCTP_SOCKET_UNLOCK(so, 1);
2655 				return;
2656 			}
2657 		}
2658 #endif
2659 		socantrcvmore(stcb->sctp_socket);
2660 		sorwakeup(stcb->sctp_socket);
2661 		sowwakeup(stcb->sctp_socket);
2662 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2663 		if (!so_locked) {
2664 			SCTP_SOCKET_UNLOCK(so, 1);
2665 		}
2666 #endif
2667 	}
2668 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2669 		/* event not enabled */
2670 		return;
2671 	}
2672 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2673 	if (m_notify == NULL)
2674 		/* no space left */
2675 		return;
2676 	SCTP_BUF_LEN(m_notify) = 0;
2677 
2678 	sac = mtod(m_notify, struct sctp_assoc_change *);
2679 	sac->sac_type = SCTP_ASSOC_CHANGE;
2680 	sac->sac_flags = 0;
2681 	sac->sac_length = sizeof(struct sctp_assoc_change);
2682 	sac->sac_state = event;
2683 	sac->sac_error = error;
2684 	/* XXX verify these stream counts */
2685 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2686 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2687 	sac->sac_assoc_id = sctp_get_associd(stcb);
2688 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2689 	SCTP_BUF_NEXT(m_notify) = NULL;
2690 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2691 	    0, 0, 0, 0, 0, 0,
2692 	    m_notify);
2693 	if (control == NULL) {
2694 		/* no memory */
2695 		sctp_m_freem(m_notify);
2696 		return;
2697 	}
2698 	control->length = SCTP_BUF_LEN(m_notify);
2699 	/* not that we need this */
2700 	control->tail_mbuf = m_notify;
2701 	control->spec_flags = M_NOTIFICATION;
2702 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2703 	    control,
2704 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2705 	    so_locked);
2706 	if (event == SCTP_COMM_LOST) {
2707 		/* Wake up any sleeper */
2708 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2709 		so = SCTP_INP_SO(stcb->sctp_ep);
2710 		if (!so_locked) {
2711 			atomic_add_int(&stcb->asoc.refcnt, 1);
2712 			SCTP_TCB_UNLOCK(stcb);
2713 			SCTP_SOCKET_LOCK(so, 1);
2714 			SCTP_TCB_LOCK(stcb);
2715 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2716 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2717 				SCTP_SOCKET_UNLOCK(so, 1);
2718 				return;
2719 			}
2720 		}
2721 #endif
2722 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2723 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2724 		if (!so_locked) {
2725 			SCTP_SOCKET_UNLOCK(so, 1);
2726 		}
2727 #endif
2728 	}
2729 }
2730 
2731 static void
2732 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2733     struct sockaddr *sa, uint32_t error)
2734 {
2735 	struct mbuf *m_notify;
2736 	struct sctp_paddr_change *spc;
2737 	struct sctp_queued_to_read *control;
2738 
2739 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2740 		/* event not enabled */
2741 		return;
2742 	}
2743 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2744 	if (m_notify == NULL)
2745 		return;
2746 	SCTP_BUF_LEN(m_notify) = 0;
2747 	spc = mtod(m_notify, struct sctp_paddr_change *);
2748 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2749 	spc->spc_flags = 0;
2750 	spc->spc_length = sizeof(struct sctp_paddr_change);
2751 	switch (sa->sa_family) {
2752 #ifdef INET
2753 	case AF_INET:
2754 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2755 		break;
2756 #endif
2757 #ifdef INET6
2758 	case AF_INET6:
2759 		{
2760 			struct sockaddr_in6 *sin6;
2761 
2762 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2763 
2764 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2765 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2766 				if (sin6->sin6_scope_id == 0) {
2767 					/* recover scope_id for user */
2768 					(void)sa6_recoverscope(sin6);
2769 				} else {
2770 					/* clear embedded scope_id for user */
2771 					in6_clearscope(&sin6->sin6_addr);
2772 				}
2773 			}
2774 			break;
2775 		}
2776 #endif
2777 	default:
2778 		/* TSNH */
2779 		break;
2780 	}
2781 	spc->spc_state = state;
2782 	spc->spc_error = error;
2783 	spc->spc_assoc_id = sctp_get_associd(stcb);
2784 
2785 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2786 	SCTP_BUF_NEXT(m_notify) = NULL;
2787 
2788 	/* append to socket */
2789 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2790 	    0, 0, 0, 0, 0, 0,
2791 	    m_notify);
2792 	if (control == NULL) {
2793 		/* no memory */
2794 		sctp_m_freem(m_notify);
2795 		return;
2796 	}
2797 	control->length = SCTP_BUF_LEN(m_notify);
2798 	control->spec_flags = M_NOTIFICATION;
2799 	/* not that we need this */
2800 	control->tail_mbuf = m_notify;
2801 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2802 	    control,
2803 	    &stcb->sctp_socket->so_rcv, 1,
2804 	    SCTP_READ_LOCK_NOT_HELD,
2805 	    SCTP_SO_NOT_LOCKED);
2806 }
2807 
2808 
2809 static void
2810 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2811     struct sctp_tmit_chunk *chk, int so_locked
2812 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2813     SCTP_UNUSED
2814 #endif
2815 )
2816 {
2817 	struct mbuf *m_notify;
2818 	struct sctp_send_failed *ssf;
2819 	struct sctp_queued_to_read *control;
2820 	int length;
2821 
2822 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2823 		/* event not enabled */
2824 		return;
2825 	}
2826 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2827 	if (m_notify == NULL)
2828 		/* no space left */
2829 		return;
2830 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2831 	length -= sizeof(struct sctp_data_chunk);
2832 	SCTP_BUF_LEN(m_notify) = 0;
2833 	ssf = mtod(m_notify, struct sctp_send_failed *);
2834 	ssf->ssf_type = SCTP_SEND_FAILED;
2835 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2836 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2837 	else
2838 		ssf->ssf_flags = SCTP_DATA_SENT;
2839 	ssf->ssf_length = length;
2840 	ssf->ssf_error = error;
2841 	/* not exactly what the user sent in, but should be close :) */
2842 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2843 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2844 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2845 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2846 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2847 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2848 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2849 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2850 
2851 	if (chk->data) {
2852 		/*
2853 		 * trim off the sctp chunk header(it should be there)
2854 		 */
2855 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2856 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2857 			sctp_mbuf_crush(chk->data);
2858 			chk->send_size -= sizeof(struct sctp_data_chunk);
2859 		}
2860 	}
2861 	SCTP_BUF_NEXT(m_notify) = chk->data;
2862 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2863 	/* Steal off the mbuf */
2864 	chk->data = NULL;
2865 	/*
2866 	 * For this case, we check the actual socket buffer, since the assoc
2867 	 * is going away we don't want to overfill the socket buffer for a
2868 	 * non-reader
2869 	 */
2870 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2871 		sctp_m_freem(m_notify);
2872 		return;
2873 	}
2874 	/* append to socket */
2875 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2876 	    0, 0, 0, 0, 0, 0,
2877 	    m_notify);
2878 	if (control == NULL) {
2879 		/* no memory */
2880 		sctp_m_freem(m_notify);
2881 		return;
2882 	}
2883 	control->spec_flags = M_NOTIFICATION;
2884 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2885 	    control,
2886 	    &stcb->sctp_socket->so_rcv, 1,
2887 	    SCTP_READ_LOCK_NOT_HELD,
2888 	    so_locked);
2889 }
2890 
2891 
2892 static void
2893 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2894     struct sctp_stream_queue_pending *sp, int so_locked
2895 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2896     SCTP_UNUSED
2897 #endif
2898 )
2899 {
2900 	struct mbuf *m_notify;
2901 	struct sctp_send_failed *ssf;
2902 	struct sctp_queued_to_read *control;
2903 	int length;
2904 
2905 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2906 		/* event not enabled */
2907 		return;
2908 	}
2909 	length = sizeof(struct sctp_send_failed) + sp->length;
2910 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2911 	if (m_notify == NULL)
2912 		/* no space left */
2913 		return;
2914 	SCTP_BUF_LEN(m_notify) = 0;
2915 	ssf = mtod(m_notify, struct sctp_send_failed *);
2916 	ssf->ssf_type = SCTP_SEND_FAILED;
2917 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2918 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2919 	else
2920 		ssf->ssf_flags = SCTP_DATA_SENT;
2921 	ssf->ssf_length = length;
2922 	ssf->ssf_error = error;
2923 	/* not exactly what the user sent in, but should be close :) */
2924 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2925 	ssf->ssf_info.sinfo_stream = sp->stream;
2926 	ssf->ssf_info.sinfo_ssn = sp->strseq;
2927 	if (sp->some_taken) {
2928 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2929 	} else {
2930 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2931 	}
2932 	ssf->ssf_info.sinfo_ppid = sp->ppid;
2933 	ssf->ssf_info.sinfo_context = sp->context;
2934 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2935 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2936 	SCTP_BUF_NEXT(m_notify) = sp->data;
2937 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2938 
2939 	/* Steal off the mbuf */
2940 	sp->data = NULL;
2941 	/*
2942 	 * For this case, we check the actual socket buffer, since the assoc
2943 	 * is going away we don't want to overfill the socket buffer for a
2944 	 * non-reader
2945 	 */
2946 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2947 		sctp_m_freem(m_notify);
2948 		return;
2949 	}
2950 	/* append to socket */
2951 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2952 	    0, 0, 0, 0, 0, 0,
2953 	    m_notify);
2954 	if (control == NULL) {
2955 		/* no memory */
2956 		sctp_m_freem(m_notify);
2957 		return;
2958 	}
2959 	control->spec_flags = M_NOTIFICATION;
2960 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2961 	    control,
2962 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
2963 }
2964 
2965 
2966 
2967 static void
2968 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
2969     uint32_t error)
2970 {
2971 	struct mbuf *m_notify;
2972 	struct sctp_adaptation_event *sai;
2973 	struct sctp_queued_to_read *control;
2974 
2975 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
2976 		/* event not enabled */
2977 		return;
2978 	}
2979 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2980 	if (m_notify == NULL)
2981 		/* no space left */
2982 		return;
2983 	SCTP_BUF_LEN(m_notify) = 0;
2984 	sai = mtod(m_notify, struct sctp_adaptation_event *);
2985 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
2986 	sai->sai_flags = 0;
2987 	sai->sai_length = sizeof(struct sctp_adaptation_event);
2988 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
2989 	sai->sai_assoc_id = sctp_get_associd(stcb);
2990 
2991 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2992 	SCTP_BUF_NEXT(m_notify) = NULL;
2993 
2994 	/* append to socket */
2995 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2996 	    0, 0, 0, 0, 0, 0,
2997 	    m_notify);
2998 	if (control == NULL) {
2999 		/* no memory */
3000 		sctp_m_freem(m_notify);
3001 		return;
3002 	}
3003 	control->length = SCTP_BUF_LEN(m_notify);
3004 	control->spec_flags = M_NOTIFICATION;
3005 	/* not that we need this */
3006 	control->tail_mbuf = m_notify;
3007 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3008 	    control,
3009 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3010 }
3011 
3012 /* This always must be called with the read-queue LOCKED in the INP */
3013 static void
3014 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3015     uint32_t val, int so_locked
3016 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3017     SCTP_UNUSED
3018 #endif
3019 )
3020 {
3021 	struct mbuf *m_notify;
3022 	struct sctp_pdapi_event *pdapi;
3023 	struct sctp_queued_to_read *control;
3024 	struct sockbuf *sb;
3025 
3026 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3027 		/* event not enabled */
3028 		return;
3029 	}
3030 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3031 		return;
3032 	}
3033 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3034 	if (m_notify == NULL)
3035 		/* no space left */
3036 		return;
3037 	SCTP_BUF_LEN(m_notify) = 0;
3038 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3039 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3040 	pdapi->pdapi_flags = 0;
3041 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3042 	pdapi->pdapi_indication = error;
3043 	pdapi->pdapi_stream = (val >> 16);
3044 	pdapi->pdapi_seq = (val & 0x0000ffff);
3045 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3046 
3047 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3048 	SCTP_BUF_NEXT(m_notify) = NULL;
3049 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3050 	    0, 0, 0, 0, 0, 0,
3051 	    m_notify);
3052 	if (control == NULL) {
3053 		/* no memory */
3054 		sctp_m_freem(m_notify);
3055 		return;
3056 	}
3057 	control->spec_flags = M_NOTIFICATION;
3058 	control->length = SCTP_BUF_LEN(m_notify);
3059 	/* not that we need this */
3060 	control->tail_mbuf = m_notify;
3061 	control->held_length = 0;
3062 	control->length = 0;
3063 	sb = &stcb->sctp_socket->so_rcv;
3064 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3065 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3066 	}
3067 	sctp_sballoc(stcb, sb, m_notify);
3068 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3069 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3070 	}
3071 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3072 	control->end_added = 1;
3073 	if (stcb->asoc.control_pdapi)
3074 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3075 	else {
3076 		/* we really should not see this case */
3077 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3078 	}
3079 	if (stcb->sctp_ep && stcb->sctp_socket) {
3080 		/* This should always be the case */
3081 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3082 		struct socket *so;
3083 
3084 		so = SCTP_INP_SO(stcb->sctp_ep);
3085 		if (!so_locked) {
3086 			atomic_add_int(&stcb->asoc.refcnt, 1);
3087 			SCTP_TCB_UNLOCK(stcb);
3088 			SCTP_SOCKET_LOCK(so, 1);
3089 			SCTP_TCB_LOCK(stcb);
3090 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3091 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3092 				SCTP_SOCKET_UNLOCK(so, 1);
3093 				return;
3094 			}
3095 		}
3096 #endif
3097 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3098 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3099 		if (!so_locked) {
3100 			SCTP_SOCKET_UNLOCK(so, 1);
3101 		}
3102 #endif
3103 	}
3104 }
3105 
3106 static void
3107 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3108 {
3109 	struct mbuf *m_notify;
3110 	struct sctp_shutdown_event *sse;
3111 	struct sctp_queued_to_read *control;
3112 
3113 	/*
3114 	 * For TCP model AND UDP connected sockets we will send an error up
3115 	 * when an SHUTDOWN completes
3116 	 */
3117 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3118 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3119 		/* mark socket closed for read/write and wakeup! */
3120 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3121 		struct socket *so;
3122 
3123 		so = SCTP_INP_SO(stcb->sctp_ep);
3124 		atomic_add_int(&stcb->asoc.refcnt, 1);
3125 		SCTP_TCB_UNLOCK(stcb);
3126 		SCTP_SOCKET_LOCK(so, 1);
3127 		SCTP_TCB_LOCK(stcb);
3128 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3129 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3130 			SCTP_SOCKET_UNLOCK(so, 1);
3131 			return;
3132 		}
3133 #endif
3134 		socantsendmore(stcb->sctp_socket);
3135 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3136 		SCTP_SOCKET_UNLOCK(so, 1);
3137 #endif
3138 	}
3139 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3140 		/* event not enabled */
3141 		return;
3142 	}
3143 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3144 	if (m_notify == NULL)
3145 		/* no space left */
3146 		return;
3147 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3148 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3149 	sse->sse_flags = 0;
3150 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3151 	sse->sse_assoc_id = sctp_get_associd(stcb);
3152 
3153 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3154 	SCTP_BUF_NEXT(m_notify) = NULL;
3155 
3156 	/* append to socket */
3157 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3158 	    0, 0, 0, 0, 0, 0,
3159 	    m_notify);
3160 	if (control == NULL) {
3161 		/* no memory */
3162 		sctp_m_freem(m_notify);
3163 		return;
3164 	}
3165 	control->spec_flags = M_NOTIFICATION;
3166 	control->length = SCTP_BUF_LEN(m_notify);
3167 	/* not that we need this */
3168 	control->tail_mbuf = m_notify;
3169 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3170 	    control,
3171 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3172 }
3173 
3174 static void
3175 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3176     int so_locked
3177 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3178     SCTP_UNUSED
3179 #endif
3180 )
3181 {
3182 	struct mbuf *m_notify;
3183 	struct sctp_sender_dry_event *event;
3184 	struct sctp_queued_to_read *control;
3185 
3186 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3187 		/* event not enabled */
3188 		return;
3189 	}
3190 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3191 	if (m_notify == NULL) {
3192 		/* no space left */
3193 		return;
3194 	}
3195 	SCTP_BUF_LEN(m_notify) = 0;
3196 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3197 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3198 	event->sender_dry_flags = 0;
3199 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3200 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3201 
3202 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3203 	SCTP_BUF_NEXT(m_notify) = NULL;
3204 
3205 	/* append to socket */
3206 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3207 	    0, 0, 0, 0, 0, 0, m_notify);
3208 	if (control == NULL) {
3209 		/* no memory */
3210 		sctp_m_freem(m_notify);
3211 		return;
3212 	}
3213 	control->length = SCTP_BUF_LEN(m_notify);
3214 	control->spec_flags = M_NOTIFICATION;
3215 	/* not that we need this */
3216 	control->tail_mbuf = m_notify;
3217 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3218 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3219 }
3220 
3221 
3222 static void
3223 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3224 {
3225 	struct mbuf *m_notify;
3226 	struct sctp_queued_to_read *control;
3227 	struct sctp_stream_reset_event *strreset;
3228 	int len;
3229 
3230 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3231 		/* event not enabled */
3232 		return;
3233 	}
3234 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3235 	if (m_notify == NULL)
3236 		/* no space left */
3237 		return;
3238 	SCTP_BUF_LEN(m_notify) = 0;
3239 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3240 	if (len > M_TRAILINGSPACE(m_notify)) {
3241 		/* never enough room */
3242 		sctp_m_freem(m_notify);
3243 		return;
3244 	}
3245 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3246 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3247 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3248 	strreset->strreset_length = len;
3249 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3250 	strreset->strreset_list[0] = number_entries;
3251 
3252 	SCTP_BUF_LEN(m_notify) = len;
3253 	SCTP_BUF_NEXT(m_notify) = NULL;
3254 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3255 		/* no space */
3256 		sctp_m_freem(m_notify);
3257 		return;
3258 	}
3259 	/* append to socket */
3260 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3261 	    0, 0, 0, 0, 0, 0,
3262 	    m_notify);
3263 	if (control == NULL) {
3264 		/* no memory */
3265 		sctp_m_freem(m_notify);
3266 		return;
3267 	}
3268 	control->spec_flags = M_NOTIFICATION;
3269 	control->length = SCTP_BUF_LEN(m_notify);
3270 	/* not that we need this */
3271 	control->tail_mbuf = m_notify;
3272 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3273 	    control,
3274 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3275 }
3276 
3277 
3278 static void
3279 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3280     int number_entries, uint16_t * list, int flag)
3281 {
3282 	struct mbuf *m_notify;
3283 	struct sctp_queued_to_read *control;
3284 	struct sctp_stream_reset_event *strreset;
3285 	int len;
3286 
3287 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3288 		/* event not enabled */
3289 		return;
3290 	}
3291 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3292 	if (m_notify == NULL)
3293 		/* no space left */
3294 		return;
3295 	SCTP_BUF_LEN(m_notify) = 0;
3296 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3297 	if (len > M_TRAILINGSPACE(m_notify)) {
3298 		/* never enough room */
3299 		sctp_m_freem(m_notify);
3300 		return;
3301 	}
3302 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3303 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3304 	if (number_entries == 0) {
3305 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3306 	} else {
3307 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3308 	}
3309 	strreset->strreset_length = len;
3310 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3311 	if (number_entries) {
3312 		int i;
3313 
3314 		for (i = 0; i < number_entries; i++) {
3315 			strreset->strreset_list[i] = ntohs(list[i]);
3316 		}
3317 	}
3318 	SCTP_BUF_LEN(m_notify) = len;
3319 	SCTP_BUF_NEXT(m_notify) = NULL;
3320 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3321 		/* no space */
3322 		sctp_m_freem(m_notify);
3323 		return;
3324 	}
3325 	/* append to socket */
3326 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3327 	    0, 0, 0, 0, 0, 0,
3328 	    m_notify);
3329 	if (control == NULL) {
3330 		/* no memory */
3331 		sctp_m_freem(m_notify);
3332 		return;
3333 	}
3334 	control->spec_flags = M_NOTIFICATION;
3335 	control->length = SCTP_BUF_LEN(m_notify);
3336 	/* not that we need this */
3337 	control->tail_mbuf = m_notify;
3338 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3339 	    control,
3340 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3341 }
3342 
3343 
3344 void
3345 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3346     uint32_t error, void *data, int so_locked
3347 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3348     SCTP_UNUSED
3349 #endif
3350 )
3351 {
3352 	if ((stcb == NULL) ||
3353 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3354 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3355 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3356 		/* If the socket is gone we are out of here */
3357 		return;
3358 	}
3359 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3360 		return;
3361 	}
3362 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3363 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3364 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3365 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3366 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3367 			/* Don't report these in front states */
3368 			return;
3369 		}
3370 	}
3371 	switch (notification) {
3372 	case SCTP_NOTIFY_ASSOC_UP:
3373 		if (stcb->asoc.assoc_up_sent == 0) {
3374 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3375 			stcb->asoc.assoc_up_sent = 1;
3376 		}
3377 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3378 			sctp_notify_adaptation_layer(stcb, error);
3379 		}
3380 		if (stcb->asoc.peer_supports_auth == 0) {
3381 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3382 			    NULL, so_locked);
3383 		}
3384 		break;
3385 	case SCTP_NOTIFY_ASSOC_DOWN:
3386 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3387 		break;
3388 	case SCTP_NOTIFY_INTERFACE_DOWN:
3389 		{
3390 			struct sctp_nets *net;
3391 
3392 			net = (struct sctp_nets *)data;
3393 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3394 			    (struct sockaddr *)&net->ro._l_addr, error);
3395 			break;
3396 		}
3397 	case SCTP_NOTIFY_INTERFACE_UP:
3398 		{
3399 			struct sctp_nets *net;
3400 
3401 			net = (struct sctp_nets *)data;
3402 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3403 			    (struct sockaddr *)&net->ro._l_addr, error);
3404 			break;
3405 		}
3406 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3407 		{
3408 			struct sctp_nets *net;
3409 
3410 			net = (struct sctp_nets *)data;
3411 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3412 			    (struct sockaddr *)&net->ro._l_addr, error);
3413 			break;
3414 		}
3415 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3416 		sctp_notify_send_failed2(stcb, error,
3417 		    (struct sctp_stream_queue_pending *)data, so_locked);
3418 		break;
3419 	case SCTP_NOTIFY_DG_FAIL:
3420 		sctp_notify_send_failed(stcb, error,
3421 		    (struct sctp_tmit_chunk *)data, so_locked);
3422 		break;
3423 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3424 		{
3425 			uint32_t val;
3426 
3427 			val = *((uint32_t *) data);
3428 
3429 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3430 			break;
3431 		}
3432 	case SCTP_NOTIFY_STRDATA_ERR:
3433 		break;
3434 	case SCTP_NOTIFY_ASSOC_ABORTED:
3435 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3436 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3437 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3438 		} else {
3439 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3440 		}
3441 		break;
3442 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3443 		break;
3444 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3445 		break;
3446 	case SCTP_NOTIFY_ASSOC_RESTART:
3447 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3448 		if (stcb->asoc.peer_supports_auth == 0) {
3449 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3450 			    NULL, so_locked);
3451 		}
3452 		break;
3453 	case SCTP_NOTIFY_HB_RESP:
3454 		break;
3455 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3456 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3457 		break;
3458 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3459 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3460 		break;
3461 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3462 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3463 		break;
3464 
3465 	case SCTP_NOTIFY_STR_RESET_SEND:
3466 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3467 		break;
3468 	case SCTP_NOTIFY_STR_RESET_RECV:
3469 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3470 		break;
3471 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3472 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3473 		break;
3474 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3475 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3476 		break;
3477 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3478 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3479 		    error);
3480 		break;
3481 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3482 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3483 		    error);
3484 		break;
3485 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3486 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3487 		    error);
3488 		break;
3489 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3490 		break;
3491 	case SCTP_NOTIFY_ASCONF_FAILED:
3492 		break;
3493 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3494 		sctp_notify_shutdown_event(stcb);
3495 		break;
3496 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3497 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3498 		    (uint16_t) (uintptr_t) data,
3499 		    so_locked);
3500 		break;
3501 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3502 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3503 		    (uint16_t) (uintptr_t) data,
3504 		    so_locked);
3505 		break;
3506 	case SCTP_NOTIFY_NO_PEER_AUTH:
3507 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3508 		    (uint16_t) (uintptr_t) data,
3509 		    so_locked);
3510 		break;
3511 	case SCTP_NOTIFY_SENDER_DRY:
3512 		sctp_notify_sender_dry_event(stcb, so_locked);
3513 		break;
3514 	default:
3515 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3516 		    __FUNCTION__, notification, notification);
3517 		break;
3518 	}			/* end switch */
3519 }
3520 
3521 void
3522 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3523 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3524     SCTP_UNUSED
3525 #endif
3526 )
3527 {
3528 	struct sctp_association *asoc;
3529 	struct sctp_stream_out *outs;
3530 	struct sctp_tmit_chunk *chk, *nchk;
3531 	struct sctp_stream_queue_pending *sp, *nsp;
3532 	int i;
3533 
3534 	if (stcb == NULL) {
3535 		return;
3536 	}
3537 	asoc = &stcb->asoc;
3538 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3539 		/* already being freed */
3540 		return;
3541 	}
3542 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3543 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3544 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3545 		return;
3546 	}
3547 	/* now through all the gunk freeing chunks */
3548 	if (holds_lock == 0) {
3549 		SCTP_TCB_SEND_LOCK(stcb);
3550 	}
3551 	/* sent queue SHOULD be empty */
3552 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3553 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3554 		asoc->sent_queue_cnt--;
3555 		if (chk->data != NULL) {
3556 			sctp_free_bufspace(stcb, asoc, chk, 1);
3557 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3558 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3559 			if (chk->data) {
3560 				sctp_m_freem(chk->data);
3561 				chk->data = NULL;
3562 			}
3563 		}
3564 		sctp_free_a_chunk(stcb, chk, so_locked);
3565 		/* sa_ignore FREED_MEMORY */
3566 	}
3567 	/* pending send queue SHOULD be empty */
3568 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3569 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3570 		asoc->send_queue_cnt--;
3571 		if (chk->data != NULL) {
3572 			sctp_free_bufspace(stcb, asoc, chk, 1);
3573 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3574 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3575 			if (chk->data) {
3576 				sctp_m_freem(chk->data);
3577 				chk->data = NULL;
3578 			}
3579 		}
3580 		sctp_free_a_chunk(stcb, chk, so_locked);
3581 		/* sa_ignore FREED_MEMORY */
3582 	}
3583 	for (i = 0; i < asoc->streamoutcnt; i++) {
3584 		/* For each stream */
3585 		outs = &asoc->strmout[i];
3586 		/* clean up any sends there */
3587 		asoc->locked_on_sending = NULL;
3588 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3589 			asoc->stream_queue_cnt--;
3590 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3591 			sctp_free_spbufspace(stcb, asoc, sp);
3592 			if (sp->data) {
3593 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3594 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3595 				if (sp->data) {
3596 					sctp_m_freem(sp->data);
3597 					sp->data = NULL;
3598 				}
3599 			}
3600 			if (sp->net) {
3601 				sctp_free_remote_addr(sp->net);
3602 				sp->net = NULL;
3603 			}
3604 			/* Free the chunk */
3605 			sctp_free_a_strmoq(stcb, sp, so_locked);
3606 			/* sa_ignore FREED_MEMORY */
3607 		}
3608 	}
3609 
3610 	if (holds_lock == 0) {
3611 		SCTP_TCB_SEND_UNLOCK(stcb);
3612 	}
3613 }
3614 
3615 void
3616 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3617 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3618     SCTP_UNUSED
3619 #endif
3620 )
3621 {
3622 
3623 	if (stcb == NULL) {
3624 		return;
3625 	}
3626 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3627 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3628 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3629 		return;
3630 	}
3631 	/* Tell them we lost the asoc */
3632 	sctp_report_all_outbound(stcb, 1, so_locked);
3633 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3634 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3635 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3636 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3637 	}
3638 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3639 }
3640 
3641 void
3642 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3643     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3644     uint32_t vrf_id, uint16_t port)
3645 {
3646 	uint32_t vtag;
3647 
3648 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3649 	struct socket *so;
3650 
3651 #endif
3652 
3653 	vtag = 0;
3654 	if (stcb != NULL) {
3655 		/* We have a TCB to abort, send notification too */
3656 		vtag = stcb->asoc.peer_vtag;
3657 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3658 		/* get the assoc vrf id and table id */
3659 		vrf_id = stcb->asoc.vrf_id;
3660 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3661 	}
3662 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3663 	if (stcb != NULL) {
3664 		/* Ok, now lets free it */
3665 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3666 		so = SCTP_INP_SO(inp);
3667 		atomic_add_int(&stcb->asoc.refcnt, 1);
3668 		SCTP_TCB_UNLOCK(stcb);
3669 		SCTP_SOCKET_LOCK(so, 1);
3670 		SCTP_TCB_LOCK(stcb);
3671 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3672 #endif
3673 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3674 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3675 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3676 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3677 		}
3678 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3679 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3680 		SCTP_SOCKET_UNLOCK(so, 1);
3681 #endif
3682 	}
3683 }
3684 
3685 #ifdef SCTP_ASOCLOG_OF_TSNS
3686 void
3687 sctp_print_out_track_log(struct sctp_tcb *stcb)
3688 {
3689 #ifdef NOSIY_PRINTS
3690 	int i;
3691 
3692 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3693 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3694 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3695 		SCTP_PRINTF("None rcvd\n");
3696 		goto none_in;
3697 	}
3698 	if (stcb->asoc.tsn_in_wrapped) {
3699 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3700 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3701 			    stcb->asoc.in_tsnlog[i].tsn,
3702 			    stcb->asoc.in_tsnlog[i].strm,
3703 			    stcb->asoc.in_tsnlog[i].seq,
3704 			    stcb->asoc.in_tsnlog[i].flgs,
3705 			    stcb->asoc.in_tsnlog[i].sz);
3706 		}
3707 	}
3708 	if (stcb->asoc.tsn_in_at) {
3709 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3710 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3711 			    stcb->asoc.in_tsnlog[i].tsn,
3712 			    stcb->asoc.in_tsnlog[i].strm,
3713 			    stcb->asoc.in_tsnlog[i].seq,
3714 			    stcb->asoc.in_tsnlog[i].flgs,
3715 			    stcb->asoc.in_tsnlog[i].sz);
3716 		}
3717 	}
3718 none_in:
3719 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3720 	if ((stcb->asoc.tsn_out_at == 0) &&
3721 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3722 		SCTP_PRINTF("None sent\n");
3723 	}
3724 	if (stcb->asoc.tsn_out_wrapped) {
3725 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3726 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3727 			    stcb->asoc.out_tsnlog[i].tsn,
3728 			    stcb->asoc.out_tsnlog[i].strm,
3729 			    stcb->asoc.out_tsnlog[i].seq,
3730 			    stcb->asoc.out_tsnlog[i].flgs,
3731 			    stcb->asoc.out_tsnlog[i].sz);
3732 		}
3733 	}
3734 	if (stcb->asoc.tsn_out_at) {
3735 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3736 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3737 			    stcb->asoc.out_tsnlog[i].tsn,
3738 			    stcb->asoc.out_tsnlog[i].strm,
3739 			    stcb->asoc.out_tsnlog[i].seq,
3740 			    stcb->asoc.out_tsnlog[i].flgs,
3741 			    stcb->asoc.out_tsnlog[i].sz);
3742 		}
3743 	}
3744 #endif
3745 }
3746 
3747 #endif
3748 
3749 void
3750 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3751     int error, struct mbuf *op_err,
3752     int so_locked
3753 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3754     SCTP_UNUSED
3755 #endif
3756 )
3757 {
3758 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3759 	struct socket *so;
3760 
3761 #endif
3762 
3763 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3764 	so = SCTP_INP_SO(inp);
3765 #endif
3766 	if (stcb == NULL) {
3767 		/* Got to have a TCB */
3768 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3769 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3770 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3771 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3772 			}
3773 		}
3774 		return;
3775 	} else {
3776 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3777 	}
3778 	/* notify the ulp */
3779 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3780 		sctp_abort_notification(stcb, error, so_locked);
3781 	/* notify the peer */
3782 #if defined(SCTP_PANIC_ON_ABORT)
3783 	panic("aborting an association");
3784 #endif
3785 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3786 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3787 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3788 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3789 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3790 	}
3791 	/* now free the asoc */
3792 #ifdef SCTP_ASOCLOG_OF_TSNS
3793 	sctp_print_out_track_log(stcb);
3794 #endif
3795 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3796 	if (!so_locked) {
3797 		atomic_add_int(&stcb->asoc.refcnt, 1);
3798 		SCTP_TCB_UNLOCK(stcb);
3799 		SCTP_SOCKET_LOCK(so, 1);
3800 		SCTP_TCB_LOCK(stcb);
3801 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3802 	}
3803 #endif
3804 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3805 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3806 	if (!so_locked) {
3807 		SCTP_SOCKET_UNLOCK(so, 1);
3808 	}
3809 #endif
3810 }
3811 
3812 void
3813 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3814     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3815 {
3816 	struct sctp_chunkhdr *ch, chunk_buf;
3817 	unsigned int chk_length;
3818 
3819 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3820 	/* Generate a TO address for future reference */
3821 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3822 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3823 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3824 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3825 		}
3826 	}
3827 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3828 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3829 	while (ch != NULL) {
3830 		chk_length = ntohs(ch->chunk_length);
3831 		if (chk_length < sizeof(*ch)) {
3832 			/* break to abort land */
3833 			break;
3834 		}
3835 		switch (ch->chunk_type) {
3836 		case SCTP_COOKIE_ECHO:
3837 			/* We hit here only if the assoc is being freed */
3838 			return;
3839 		case SCTP_PACKET_DROPPED:
3840 			/* we don't respond to pkt-dropped */
3841 			return;
3842 		case SCTP_ABORT_ASSOCIATION:
3843 			/* we don't respond with an ABORT to an ABORT */
3844 			return;
3845 		case SCTP_SHUTDOWN_COMPLETE:
3846 			/*
3847 			 * we ignore it since we are not waiting for it and
3848 			 * peer is gone
3849 			 */
3850 			return;
3851 		case SCTP_SHUTDOWN_ACK:
3852 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3853 			return;
3854 		default:
3855 			break;
3856 		}
3857 		offset += SCTP_SIZE32(chk_length);
3858 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3859 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3860 	}
3861 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3862 }
3863 
3864 /*
3865  * check the inbound datagram to make sure there is not an abort inside it,
3866  * if there is return 1, else return 0.
3867  */
3868 int
3869 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3870 {
3871 	struct sctp_chunkhdr *ch;
3872 	struct sctp_init_chunk *init_chk, chunk_buf;
3873 	int offset;
3874 	unsigned int chk_length;
3875 
3876 	offset = iphlen + sizeof(struct sctphdr);
3877 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3878 	    (uint8_t *) & chunk_buf);
3879 	while (ch != NULL) {
3880 		chk_length = ntohs(ch->chunk_length);
3881 		if (chk_length < sizeof(*ch)) {
3882 			/* packet is probably corrupt */
3883 			break;
3884 		}
3885 		/* we seem to be ok, is it an abort? */
3886 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3887 			/* yep, tell them */
3888 			return (1);
3889 		}
3890 		if (ch->chunk_type == SCTP_INITIATION) {
3891 			/* need to update the Vtag */
3892 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3893 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3894 			if (init_chk != NULL) {
3895 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3896 			}
3897 		}
3898 		/* Nope, move to the next chunk */
3899 		offset += SCTP_SIZE32(chk_length);
3900 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3901 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3902 	}
3903 	return (0);
3904 }
3905 
3906 /*
3907  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3908  * set (i.e. it's 0) so, create this function to compare link local scopes
3909  */
3910 #ifdef INET6
3911 uint32_t
3912 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3913 {
3914 	struct sockaddr_in6 a, b;
3915 
3916 	/* save copies */
3917 	a = *addr1;
3918 	b = *addr2;
3919 
3920 	if (a.sin6_scope_id == 0)
3921 		if (sa6_recoverscope(&a)) {
3922 			/* can't get scope, so can't match */
3923 			return (0);
3924 		}
3925 	if (b.sin6_scope_id == 0)
3926 		if (sa6_recoverscope(&b)) {
3927 			/* can't get scope, so can't match */
3928 			return (0);
3929 		}
3930 	if (a.sin6_scope_id != b.sin6_scope_id)
3931 		return (0);
3932 
3933 	return (1);
3934 }
3935 
3936 /*
3937  * returns a sockaddr_in6 with embedded scope recovered and removed
3938  */
3939 struct sockaddr_in6 *
3940 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3941 {
3942 	/* check and strip embedded scope junk */
3943 	if (addr->sin6_family == AF_INET6) {
3944 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3945 			if (addr->sin6_scope_id == 0) {
3946 				*store = *addr;
3947 				if (!sa6_recoverscope(store)) {
3948 					/* use the recovered scope */
3949 					addr = store;
3950 				}
3951 			} else {
3952 				/* else, return the original "to" addr */
3953 				in6_clearscope(&addr->sin6_addr);
3954 			}
3955 		}
3956 	}
3957 	return (addr);
3958 }
3959 
3960 #endif
3961 
3962 /*
3963  * are the two addresses the same?  currently a "scopeless" check returns: 1
3964  * if same, 0 if not
3965  */
3966 int
3967 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3968 {
3969 
3970 	/* must be valid */
3971 	if (sa1 == NULL || sa2 == NULL)
3972 		return (0);
3973 
3974 	/* must be the same family */
3975 	if (sa1->sa_family != sa2->sa_family)
3976 		return (0);
3977 
3978 	switch (sa1->sa_family) {
3979 #ifdef INET6
3980 	case AF_INET6:
3981 		{
3982 			/* IPv6 addresses */
3983 			struct sockaddr_in6 *sin6_1, *sin6_2;
3984 
3985 			sin6_1 = (struct sockaddr_in6 *)sa1;
3986 			sin6_2 = (struct sockaddr_in6 *)sa2;
3987 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
3988 			    sin6_2));
3989 		}
3990 #endif
3991 #ifdef INET
3992 	case AF_INET:
3993 		{
3994 			/* IPv4 addresses */
3995 			struct sockaddr_in *sin_1, *sin_2;
3996 
3997 			sin_1 = (struct sockaddr_in *)sa1;
3998 			sin_2 = (struct sockaddr_in *)sa2;
3999 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4000 		}
4001 #endif
4002 	default:
4003 		/* we don't do these... */
4004 		return (0);
4005 	}
4006 }
4007 
4008 void
4009 sctp_print_address(struct sockaddr *sa)
4010 {
4011 #ifdef INET6
4012 	char ip6buf[INET6_ADDRSTRLEN];
4013 
4014 	ip6buf[0] = 0;
4015 #endif
4016 
4017 	switch (sa->sa_family) {
4018 #ifdef INET6
4019 	case AF_INET6:
4020 		{
4021 			struct sockaddr_in6 *sin6;
4022 
4023 			sin6 = (struct sockaddr_in6 *)sa;
4024 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4025 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4026 			    ntohs(sin6->sin6_port),
4027 			    sin6->sin6_scope_id);
4028 			break;
4029 		}
4030 #endif
4031 #ifdef INET
4032 	case AF_INET:
4033 		{
4034 			struct sockaddr_in *sin;
4035 			unsigned char *p;
4036 
4037 			sin = (struct sockaddr_in *)sa;
4038 			p = (unsigned char *)&sin->sin_addr;
4039 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4040 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4041 			break;
4042 		}
4043 #endif
4044 	default:
4045 		SCTP_PRINTF("?\n");
4046 		break;
4047 	}
4048 }
4049 
4050 void
4051 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4052 {
4053 	switch (iph->ip_v) {
4054 #ifdef INET
4055 	case IPVERSION:
4056 		{
4057 			struct sockaddr_in lsa, fsa;
4058 
4059 			bzero(&lsa, sizeof(lsa));
4060 			lsa.sin_len = sizeof(lsa);
4061 			lsa.sin_family = AF_INET;
4062 			lsa.sin_addr = iph->ip_src;
4063 			lsa.sin_port = sh->src_port;
4064 			bzero(&fsa, sizeof(fsa));
4065 			fsa.sin_len = sizeof(fsa);
4066 			fsa.sin_family = AF_INET;
4067 			fsa.sin_addr = iph->ip_dst;
4068 			fsa.sin_port = sh->dest_port;
4069 			SCTP_PRINTF("src: ");
4070 			sctp_print_address((struct sockaddr *)&lsa);
4071 			SCTP_PRINTF("dest: ");
4072 			sctp_print_address((struct sockaddr *)&fsa);
4073 			break;
4074 		}
4075 #endif
4076 #ifdef INET6
4077 	case IPV6_VERSION >> 4:
4078 		{
4079 			struct ip6_hdr *ip6;
4080 			struct sockaddr_in6 lsa6, fsa6;
4081 
4082 			ip6 = (struct ip6_hdr *)iph;
4083 			bzero(&lsa6, sizeof(lsa6));
4084 			lsa6.sin6_len = sizeof(lsa6);
4085 			lsa6.sin6_family = AF_INET6;
4086 			lsa6.sin6_addr = ip6->ip6_src;
4087 			lsa6.sin6_port = sh->src_port;
4088 			bzero(&fsa6, sizeof(fsa6));
4089 			fsa6.sin6_len = sizeof(fsa6);
4090 			fsa6.sin6_family = AF_INET6;
4091 			fsa6.sin6_addr = ip6->ip6_dst;
4092 			fsa6.sin6_port = sh->dest_port;
4093 			SCTP_PRINTF("src: ");
4094 			sctp_print_address((struct sockaddr *)&lsa6);
4095 			SCTP_PRINTF("dest: ");
4096 			sctp_print_address((struct sockaddr *)&fsa6);
4097 			break;
4098 		}
4099 #endif
4100 	default:
4101 		/* TSNH */
4102 		break;
4103 	}
4104 }
4105 
4106 void
4107 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4108     struct sctp_inpcb *new_inp,
4109     struct sctp_tcb *stcb,
4110     int waitflags)
4111 {
4112 	/*
4113 	 * go through our old INP and pull off any control structures that
4114 	 * belong to stcb and move then to the new inp.
4115 	 */
4116 	struct socket *old_so, *new_so;
4117 	struct sctp_queued_to_read *control, *nctl;
4118 	struct sctp_readhead tmp_queue;
4119 	struct mbuf *m;
4120 	int error = 0;
4121 
4122 	old_so = old_inp->sctp_socket;
4123 	new_so = new_inp->sctp_socket;
4124 	TAILQ_INIT(&tmp_queue);
4125 	error = sblock(&old_so->so_rcv, waitflags);
4126 	if (error) {
4127 		/*
4128 		 * Gak, can't get sblock, we have a problem. data will be
4129 		 * left stranded.. and we don't dare look at it since the
4130 		 * other thread may be reading something. Oh well, its a
4131 		 * screwed up app that does a peeloff OR a accept while
4132 		 * reading from the main socket... actually its only the
4133 		 * peeloff() case, since I think read will fail on a
4134 		 * listening socket..
4135 		 */
4136 		return;
4137 	}
4138 	/* lock the socket buffers */
4139 	SCTP_INP_READ_LOCK(old_inp);
4140 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4141 		/* Pull off all for out target stcb */
4142 		if (control->stcb == stcb) {
4143 			/* remove it we want it */
4144 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4145 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4146 			m = control->data;
4147 			while (m) {
4148 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4149 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4150 				}
4151 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4152 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4153 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4154 				}
4155 				m = SCTP_BUF_NEXT(m);
4156 			}
4157 		}
4158 	}
4159 	SCTP_INP_READ_UNLOCK(old_inp);
4160 	/* Remove the sb-lock on the old socket */
4161 
4162 	sbunlock(&old_so->so_rcv);
4163 	/* Now we move them over to the new socket buffer */
4164 	SCTP_INP_READ_LOCK(new_inp);
4165 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4166 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4167 		m = control->data;
4168 		while (m) {
4169 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4170 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4171 			}
4172 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4173 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4174 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4175 			}
4176 			m = SCTP_BUF_NEXT(m);
4177 		}
4178 	}
4179 	SCTP_INP_READ_UNLOCK(new_inp);
4180 }
4181 
4182 void
4183 sctp_add_to_readq(struct sctp_inpcb *inp,
4184     struct sctp_tcb *stcb,
4185     struct sctp_queued_to_read *control,
4186     struct sockbuf *sb,
4187     int end,
4188     int inp_read_lock_held,
4189     int so_locked
4190 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4191     SCTP_UNUSED
4192 #endif
4193 )
4194 {
4195 	/*
4196 	 * Here we must place the control on the end of the socket read
4197 	 * queue AND increment sb_cc so that select will work properly on
4198 	 * read.
4199 	 */
4200 	struct mbuf *m, *prev = NULL;
4201 
4202 	if (inp == NULL) {
4203 		/* Gak, TSNH!! */
4204 #ifdef INVARIANTS
4205 		panic("Gak, inp NULL on add_to_readq");
4206 #endif
4207 		return;
4208 	}
4209 	if (inp_read_lock_held == 0)
4210 		SCTP_INP_READ_LOCK(inp);
4211 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4212 		sctp_free_remote_addr(control->whoFrom);
4213 		if (control->data) {
4214 			sctp_m_freem(control->data);
4215 			control->data = NULL;
4216 		}
4217 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4218 		if (inp_read_lock_held == 0)
4219 			SCTP_INP_READ_UNLOCK(inp);
4220 		return;
4221 	}
4222 	if (!(control->spec_flags & M_NOTIFICATION)) {
4223 		atomic_add_int(&inp->total_recvs, 1);
4224 		if (!control->do_not_ref_stcb) {
4225 			atomic_add_int(&stcb->total_recvs, 1);
4226 		}
4227 	}
4228 	m = control->data;
4229 	control->held_length = 0;
4230 	control->length = 0;
4231 	while (m) {
4232 		if (SCTP_BUF_LEN(m) == 0) {
4233 			/* Skip mbufs with NO length */
4234 			if (prev == NULL) {
4235 				/* First one */
4236 				control->data = sctp_m_free(m);
4237 				m = control->data;
4238 			} else {
4239 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4240 				m = SCTP_BUF_NEXT(prev);
4241 			}
4242 			if (m == NULL) {
4243 				control->tail_mbuf = prev;
4244 			}
4245 			continue;
4246 		}
4247 		prev = m;
4248 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4249 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4250 		}
4251 		sctp_sballoc(stcb, sb, m);
4252 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4253 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4254 		}
4255 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4256 		m = SCTP_BUF_NEXT(m);
4257 	}
4258 	if (prev != NULL) {
4259 		control->tail_mbuf = prev;
4260 	} else {
4261 		/* Everything got collapsed out?? */
4262 		sctp_free_remote_addr(control->whoFrom);
4263 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4264 		if (inp_read_lock_held == 0)
4265 			SCTP_INP_READ_UNLOCK(inp);
4266 		return;
4267 	}
4268 	if (end) {
4269 		control->end_added = 1;
4270 	}
4271 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4272 	if (inp_read_lock_held == 0)
4273 		SCTP_INP_READ_UNLOCK(inp);
4274 	if (inp && inp->sctp_socket) {
4275 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4276 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4277 		} else {
4278 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4279 			struct socket *so;
4280 
4281 			so = SCTP_INP_SO(inp);
4282 			if (!so_locked) {
4283 				atomic_add_int(&stcb->asoc.refcnt, 1);
4284 				SCTP_TCB_UNLOCK(stcb);
4285 				SCTP_SOCKET_LOCK(so, 1);
4286 				SCTP_TCB_LOCK(stcb);
4287 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4288 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4289 					SCTP_SOCKET_UNLOCK(so, 1);
4290 					return;
4291 				}
4292 			}
4293 #endif
4294 			sctp_sorwakeup(inp, inp->sctp_socket);
4295 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4296 			if (!so_locked) {
4297 				SCTP_SOCKET_UNLOCK(so, 1);
4298 			}
4299 #endif
4300 		}
4301 	}
4302 }
4303 
4304 
4305 int
4306 sctp_append_to_readq(struct sctp_inpcb *inp,
4307     struct sctp_tcb *stcb,
4308     struct sctp_queued_to_read *control,
4309     struct mbuf *m,
4310     int end,
4311     int ctls_cumack,
4312     struct sockbuf *sb)
4313 {
4314 	/*
4315 	 * A partial delivery API event is underway. OR we are appending on
4316 	 * the reassembly queue.
4317 	 *
4318 	 * If PDAPI this means we need to add m to the end of the data.
4319 	 * Increase the length in the control AND increment the sb_cc.
4320 	 * Otherwise sb is NULL and all we need to do is put it at the end
4321 	 * of the mbuf chain.
4322 	 */
4323 	int len = 0;
4324 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4325 
4326 	if (inp) {
4327 		SCTP_INP_READ_LOCK(inp);
4328 	}
4329 	if (control == NULL) {
4330 get_out:
4331 		if (inp) {
4332 			SCTP_INP_READ_UNLOCK(inp);
4333 		}
4334 		return (-1);
4335 	}
4336 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4337 		SCTP_INP_READ_UNLOCK(inp);
4338 		return 0;
4339 	}
4340 	if (control->end_added) {
4341 		/* huh this one is complete? */
4342 		goto get_out;
4343 	}
4344 	mm = m;
4345 	if (mm == NULL) {
4346 		goto get_out;
4347 	}
4348 	while (mm) {
4349 		if (SCTP_BUF_LEN(mm) == 0) {
4350 			/* Skip mbufs with NO lenght */
4351 			if (prev == NULL) {
4352 				/* First one */
4353 				m = sctp_m_free(mm);
4354 				mm = m;
4355 			} else {
4356 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4357 				mm = SCTP_BUF_NEXT(prev);
4358 			}
4359 			continue;
4360 		}
4361 		prev = mm;
4362 		len += SCTP_BUF_LEN(mm);
4363 		if (sb) {
4364 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4365 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4366 			}
4367 			sctp_sballoc(stcb, sb, mm);
4368 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4369 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4370 			}
4371 		}
4372 		mm = SCTP_BUF_NEXT(mm);
4373 	}
4374 	if (prev) {
4375 		tail = prev;
4376 	} else {
4377 		/* Really there should always be a prev */
4378 		if (m == NULL) {
4379 			/* Huh nothing left? */
4380 #ifdef INVARIANTS
4381 			panic("Nothing left to add?");
4382 #else
4383 			goto get_out;
4384 #endif
4385 		}
4386 		tail = m;
4387 	}
4388 	if (control->tail_mbuf) {
4389 		/* append */
4390 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4391 		control->tail_mbuf = tail;
4392 	} else {
4393 		/* nothing there */
4394 #ifdef INVARIANTS
4395 		if (control->data != NULL) {
4396 			panic("This should NOT happen");
4397 		}
4398 #endif
4399 		control->data = m;
4400 		control->tail_mbuf = tail;
4401 	}
4402 	atomic_add_int(&control->length, len);
4403 	if (end) {
4404 		/* message is complete */
4405 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4406 			stcb->asoc.control_pdapi = NULL;
4407 		}
4408 		control->held_length = 0;
4409 		control->end_added = 1;
4410 	}
4411 	if (stcb == NULL) {
4412 		control->do_not_ref_stcb = 1;
4413 	}
4414 	/*
4415 	 * When we are appending in partial delivery, the cum-ack is used
4416 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4417 	 * is populated in the outbound sinfo structure from the true cumack
4418 	 * if the association exists...
4419 	 */
4420 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4421 	if (inp) {
4422 		SCTP_INP_READ_UNLOCK(inp);
4423 	}
4424 	if (inp && inp->sctp_socket) {
4425 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4426 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4427 		} else {
4428 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4429 			struct socket *so;
4430 
4431 			so = SCTP_INP_SO(inp);
4432 			atomic_add_int(&stcb->asoc.refcnt, 1);
4433 			SCTP_TCB_UNLOCK(stcb);
4434 			SCTP_SOCKET_LOCK(so, 1);
4435 			SCTP_TCB_LOCK(stcb);
4436 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4437 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4438 				SCTP_SOCKET_UNLOCK(so, 1);
4439 				return (0);
4440 			}
4441 #endif
4442 			sctp_sorwakeup(inp, inp->sctp_socket);
4443 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4444 			SCTP_SOCKET_UNLOCK(so, 1);
4445 #endif
4446 		}
4447 	}
4448 	return (0);
4449 }
4450 
4451 
4452 
4453 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4454  *************ALTERNATE ROUTING CODE
4455  */
4456 
4457 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4458  *************ALTERNATE ROUTING CODE
4459  */
4460 
4461 struct mbuf *
4462 sctp_generate_invmanparam(int err)
4463 {
4464 	/* Return a MBUF with a invalid mandatory parameter */
4465 	struct mbuf *m;
4466 
4467 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4468 	if (m) {
4469 		struct sctp_paramhdr *ph;
4470 
4471 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4472 		ph = mtod(m, struct sctp_paramhdr *);
4473 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4474 		ph->param_type = htons(err);
4475 	}
4476 	return (m);
4477 }
4478 
4479 #ifdef SCTP_MBCNT_LOGGING
4480 void
4481 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4482     struct sctp_tmit_chunk *tp1, int chk_cnt)
4483 {
4484 	if (tp1->data == NULL) {
4485 		return;
4486 	}
4487 	asoc->chunks_on_out_queue -= chk_cnt;
4488 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4489 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4490 		    asoc->total_output_queue_size,
4491 		    tp1->book_size,
4492 		    0,
4493 		    tp1->mbcnt);
4494 	}
4495 	if (asoc->total_output_queue_size >= tp1->book_size) {
4496 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4497 	} else {
4498 		asoc->total_output_queue_size = 0;
4499 	}
4500 
4501 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4502 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4503 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4504 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4505 		} else {
4506 			stcb->sctp_socket->so_snd.sb_cc = 0;
4507 
4508 		}
4509 	}
4510 }
4511 
4512 #endif
4513 
4514 int
4515 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4516     int reason, int so_locked
4517 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4518     SCTP_UNUSED
4519 #endif
4520 )
4521 {
4522 	struct sctp_stream_out *strq;
4523 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4524 	struct sctp_stream_queue_pending *sp;
4525 	uint16_t stream = 0, seq = 0;
4526 	uint8_t foundeom = 0;
4527 	int ret_sz = 0;
4528 	int notdone;
4529 	int do_wakeup_routine = 0;
4530 
4531 	stream = tp1->rec.data.stream_number;
4532 	seq = tp1->rec.data.stream_seq;
4533 	do {
4534 		ret_sz += tp1->book_size;
4535 		if (tp1->data != NULL) {
4536 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4537 				sctp_flight_size_decrease(tp1);
4538 				sctp_total_flight_decrease(stcb, tp1);
4539 			}
4540 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4541 			stcb->asoc.peers_rwnd += tp1->send_size;
4542 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4543 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4544 			if (tp1->data) {
4545 				sctp_m_freem(tp1->data);
4546 				tp1->data = NULL;
4547 			}
4548 			do_wakeup_routine = 1;
4549 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4550 				stcb->asoc.sent_queue_cnt_removeable--;
4551 			}
4552 		}
4553 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4554 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4555 		    SCTP_DATA_NOT_FRAG) {
4556 			/* not frag'ed we ae done   */
4557 			notdone = 0;
4558 			foundeom = 1;
4559 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4560 			/* end of frag, we are done */
4561 			notdone = 0;
4562 			foundeom = 1;
4563 		} else {
4564 			/*
4565 			 * Its a begin or middle piece, we must mark all of
4566 			 * it
4567 			 */
4568 			notdone = 1;
4569 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4570 		}
4571 	} while (tp1 && notdone);
4572 	if (foundeom == 0) {
4573 		/*
4574 		 * The multi-part message was scattered across the send and
4575 		 * sent queue.
4576 		 */
4577 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4578 			if ((tp1->rec.data.stream_number != stream) ||
4579 			    (tp1->rec.data.stream_seq != seq)) {
4580 				break;
4581 			}
4582 			/*
4583 			 * save to chk in case we have some on stream out
4584 			 * queue. If so and we have an un-transmitted one we
4585 			 * don't have to fudge the TSN.
4586 			 */
4587 			chk = tp1;
4588 			ret_sz += tp1->book_size;
4589 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4590 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4591 			if (tp1->data) {
4592 				sctp_m_freem(tp1->data);
4593 				tp1->data = NULL;
4594 			}
4595 			/* No flight involved here book the size to 0 */
4596 			tp1->book_size = 0;
4597 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4598 				foundeom = 1;
4599 			}
4600 			do_wakeup_routine = 1;
4601 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4602 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4603 			/*
4604 			 * on to the sent queue so we can wait for it to be
4605 			 * passed by.
4606 			 */
4607 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4608 			    sctp_next);
4609 			stcb->asoc.send_queue_cnt--;
4610 			stcb->asoc.sent_queue_cnt++;
4611 		}
4612 	}
4613 	if (foundeom == 0) {
4614 		/*
4615 		 * Still no eom found. That means there is stuff left on the
4616 		 * stream out queue.. yuck.
4617 		 */
4618 		strq = &stcb->asoc.strmout[stream];
4619 		SCTP_TCB_SEND_LOCK(stcb);
4620 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4621 			/* FIXME: Shouldn't this be a serial number check? */
4622 			if (sp->strseq > seq) {
4623 				break;
4624 			}
4625 			/* Check if its our SEQ */
4626 			if (sp->strseq == seq) {
4627 				sp->discard_rest = 1;
4628 				/*
4629 				 * We may need to put a chunk on the queue
4630 				 * that holds the TSN that would have been
4631 				 * sent with the LAST bit.
4632 				 */
4633 				if (chk == NULL) {
4634 					/* Yep, we have to */
4635 					sctp_alloc_a_chunk(stcb, chk);
4636 					if (chk == NULL) {
4637 						/*
4638 						 * we are hosed. All we can
4639 						 * do is nothing.. which
4640 						 * will cause an abort if
4641 						 * the peer is paying
4642 						 * attention.
4643 						 */
4644 						goto oh_well;
4645 					}
4646 					memset(chk, 0, sizeof(*chk));
4647 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4648 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4649 					chk->asoc = &stcb->asoc;
4650 					chk->rec.data.stream_seq = sp->strseq;
4651 					chk->rec.data.stream_number = sp->stream;
4652 					chk->rec.data.payloadtype = sp->ppid;
4653 					chk->rec.data.context = sp->context;
4654 					chk->flags = sp->act_flags;
4655 					if (sp->net)
4656 						chk->whoTo = sp->net;
4657 					else
4658 						chk->whoTo = stcb->asoc.primary_destination;
4659 					atomic_add_int(&chk->whoTo->ref_count, 1);
4660 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4661 					stcb->asoc.pr_sctp_cnt++;
4662 					chk->pr_sctp_on = 1;
4663 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4664 					stcb->asoc.sent_queue_cnt++;
4665 					stcb->asoc.pr_sctp_cnt++;
4666 				} else {
4667 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4668 				}
4669 		oh_well:
4670 				if (sp->data) {
4671 					/*
4672 					 * Pull any data to free up the SB
4673 					 * and allow sender to "add more"
4674 					 * whilc we will throw away :-)
4675 					 */
4676 					sctp_free_spbufspace(stcb, &stcb->asoc,
4677 					    sp);
4678 					ret_sz += sp->length;
4679 					do_wakeup_routine = 1;
4680 					sp->some_taken = 1;
4681 					sctp_m_freem(sp->data);
4682 					sp->length = 0;
4683 					sp->data = NULL;
4684 					sp->tail_mbuf = NULL;
4685 				}
4686 				break;
4687 			}
4688 		}		/* End tailq_foreach */
4689 		SCTP_TCB_SEND_UNLOCK(stcb);
4690 	}
4691 	if (do_wakeup_routine) {
4692 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4693 		struct socket *so;
4694 
4695 		so = SCTP_INP_SO(stcb->sctp_ep);
4696 		if (!so_locked) {
4697 			atomic_add_int(&stcb->asoc.refcnt, 1);
4698 			SCTP_TCB_UNLOCK(stcb);
4699 			SCTP_SOCKET_LOCK(so, 1);
4700 			SCTP_TCB_LOCK(stcb);
4701 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4702 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4703 				/* assoc was freed while we were unlocked */
4704 				SCTP_SOCKET_UNLOCK(so, 1);
4705 				return (ret_sz);
4706 			}
4707 		}
4708 #endif
4709 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4710 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4711 		if (!so_locked) {
4712 			SCTP_SOCKET_UNLOCK(so, 1);
4713 		}
4714 #endif
4715 	}
4716 	return (ret_sz);
4717 }
4718 
4719 /*
4720  * checks to see if the given address, sa, is one that is currently known by
4721  * the kernel note: can't distinguish the same address on multiple interfaces
4722  * and doesn't handle multiple addresses with different zone/scope id's note:
4723  * ifa_ifwithaddr() compares the entire sockaddr struct
4724  */
4725 struct sctp_ifa *
4726 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4727     int holds_lock)
4728 {
4729 	struct sctp_laddr *laddr;
4730 
4731 	if (holds_lock == 0) {
4732 		SCTP_INP_RLOCK(inp);
4733 	}
4734 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4735 		if (laddr->ifa == NULL)
4736 			continue;
4737 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4738 			continue;
4739 #ifdef INET
4740 		if (addr->sa_family == AF_INET) {
4741 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4742 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4743 				/* found him. */
4744 				if (holds_lock == 0) {
4745 					SCTP_INP_RUNLOCK(inp);
4746 				}
4747 				return (laddr->ifa);
4748 				break;
4749 			}
4750 		}
4751 #endif
4752 #ifdef INET6
4753 		if (addr->sa_family == AF_INET6) {
4754 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4755 			    &laddr->ifa->address.sin6)) {
4756 				/* found him. */
4757 				if (holds_lock == 0) {
4758 					SCTP_INP_RUNLOCK(inp);
4759 				}
4760 				return (laddr->ifa);
4761 				break;
4762 			}
4763 		}
4764 #endif
4765 	}
4766 	if (holds_lock == 0) {
4767 		SCTP_INP_RUNLOCK(inp);
4768 	}
4769 	return (NULL);
4770 }
4771 
4772 uint32_t
4773 sctp_get_ifa_hash_val(struct sockaddr *addr)
4774 {
4775 	switch (addr->sa_family) {
4776 #ifdef INET
4777 	case AF_INET:
4778 		{
4779 			struct sockaddr_in *sin;
4780 
4781 			sin = (struct sockaddr_in *)addr;
4782 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4783 		}
4784 #endif
4785 #ifdef INET6
4786 	case INET6:
4787 		{
4788 			struct sockaddr_in6 *sin6;
4789 			uint32_t hash_of_addr;
4790 
4791 			sin6 = (struct sockaddr_in6 *)addr;
4792 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4793 			    sin6->sin6_addr.s6_addr32[1] +
4794 			    sin6->sin6_addr.s6_addr32[2] +
4795 			    sin6->sin6_addr.s6_addr32[3]);
4796 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4797 			return (hash_of_addr);
4798 		}
4799 #endif
4800 	default:
4801 		break;
4802 	}
4803 	return (0);
4804 }
4805 
4806 struct sctp_ifa *
4807 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4808 {
4809 	struct sctp_ifa *sctp_ifap;
4810 	struct sctp_vrf *vrf;
4811 	struct sctp_ifalist *hash_head;
4812 	uint32_t hash_of_addr;
4813 
4814 	if (holds_lock == 0)
4815 		SCTP_IPI_ADDR_RLOCK();
4816 
4817 	vrf = sctp_find_vrf(vrf_id);
4818 	if (vrf == NULL) {
4819 stage_right:
4820 		if (holds_lock == 0)
4821 			SCTP_IPI_ADDR_RUNLOCK();
4822 		return (NULL);
4823 	}
4824 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4825 
4826 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4827 	if (hash_head == NULL) {
4828 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4829 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4830 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4831 		sctp_print_address(addr);
4832 		SCTP_PRINTF("No such bucket for address\n");
4833 		if (holds_lock == 0)
4834 			SCTP_IPI_ADDR_RUNLOCK();
4835 
4836 		return (NULL);
4837 	}
4838 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4839 		if (sctp_ifap == NULL) {
4840 #ifdef INVARIANTS
4841 			panic("Huh LIST_FOREACH corrupt");
4842 			goto stage_right;
4843 #else
4844 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4845 			goto stage_right;
4846 #endif
4847 		}
4848 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4849 			continue;
4850 #ifdef INET
4851 		if (addr->sa_family == AF_INET) {
4852 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4853 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4854 				/* found him. */
4855 				if (holds_lock == 0)
4856 					SCTP_IPI_ADDR_RUNLOCK();
4857 				return (sctp_ifap);
4858 				break;
4859 			}
4860 		}
4861 #endif
4862 #ifdef INET6
4863 		if (addr->sa_family == AF_INET6) {
4864 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4865 			    &sctp_ifap->address.sin6)) {
4866 				/* found him. */
4867 				if (holds_lock == 0)
4868 					SCTP_IPI_ADDR_RUNLOCK();
4869 				return (sctp_ifap);
4870 				break;
4871 			}
4872 		}
4873 #endif
4874 	}
4875 	if (holds_lock == 0)
4876 		SCTP_IPI_ADDR_RUNLOCK();
4877 	return (NULL);
4878 }
4879 
4880 static void
4881 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4882     uint32_t rwnd_req)
4883 {
4884 	/* User pulled some data, do we need a rwnd update? */
4885 	int r_unlocked = 0;
4886 	uint32_t dif, rwnd;
4887 	struct socket *so = NULL;
4888 
4889 	if (stcb == NULL)
4890 		return;
4891 
4892 	atomic_add_int(&stcb->asoc.refcnt, 1);
4893 
4894 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4895 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4896 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4897 		/* Pre-check If we are freeing no update */
4898 		goto no_lock;
4899 	}
4900 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4901 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4902 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4903 		goto out;
4904 	}
4905 	so = stcb->sctp_socket;
4906 	if (so == NULL) {
4907 		goto out;
4908 	}
4909 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4910 	/* Have you have freed enough to look */
4911 	*freed_so_far = 0;
4912 	/* Yep, its worth a look and the lock overhead */
4913 
4914 	/* Figure out what the rwnd would be */
4915 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4916 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4917 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4918 	} else {
4919 		dif = 0;
4920 	}
4921 	if (dif >= rwnd_req) {
4922 		if (hold_rlock) {
4923 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4924 			r_unlocked = 1;
4925 		}
4926 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4927 			/*
4928 			 * One last check before we allow the guy possibly
4929 			 * to get in. There is a race, where the guy has not
4930 			 * reached the gate. In that case
4931 			 */
4932 			goto out;
4933 		}
4934 		SCTP_TCB_LOCK(stcb);
4935 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4936 			/* No reports here */
4937 			SCTP_TCB_UNLOCK(stcb);
4938 			goto out;
4939 		}
4940 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4941 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
4942 
4943 		sctp_chunk_output(stcb->sctp_ep, stcb,
4944 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4945 		/* make sure no timer is running */
4946 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4947 		SCTP_TCB_UNLOCK(stcb);
4948 	} else {
4949 		/* Update how much we have pending */
4950 		stcb->freed_by_sorcv_sincelast = dif;
4951 	}
4952 out:
4953 	if (so && r_unlocked && hold_rlock) {
4954 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4955 	}
4956 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4957 no_lock:
4958 	atomic_add_int(&stcb->asoc.refcnt, -1);
4959 	return;
4960 }
4961 
4962 int
4963 sctp_sorecvmsg(struct socket *so,
4964     struct uio *uio,
4965     struct mbuf **mp,
4966     struct sockaddr *from,
4967     int fromlen,
4968     int *msg_flags,
4969     struct sctp_sndrcvinfo *sinfo,
4970     int filling_sinfo)
4971 {
4972 	/*
4973 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4974 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4975 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4976 	 * On the way out we may send out any combination of:
4977 	 * MSG_NOTIFICATION MSG_EOR
4978 	 *
4979 	 */
4980 	struct sctp_inpcb *inp = NULL;
4981 	int my_len = 0;
4982 	int cp_len = 0, error = 0;
4983 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4984 	struct mbuf *m = NULL;
4985 	struct sctp_tcb *stcb = NULL;
4986 	int wakeup_read_socket = 0;
4987 	int freecnt_applied = 0;
4988 	int out_flags = 0, in_flags = 0;
4989 	int block_allowed = 1;
4990 	uint32_t freed_so_far = 0;
4991 	uint32_t copied_so_far = 0;
4992 	int in_eeor_mode = 0;
4993 	int no_rcv_needed = 0;
4994 	uint32_t rwnd_req = 0;
4995 	int hold_sblock = 0;
4996 	int hold_rlock = 0;
4997 	int slen = 0;
4998 	uint32_t held_length = 0;
4999 	int sockbuf_lock = 0;
5000 
5001 	if (uio == NULL) {
5002 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5003 		return (EINVAL);
5004 	}
5005 	if (msg_flags) {
5006 		in_flags = *msg_flags;
5007 		if (in_flags & MSG_PEEK)
5008 			SCTP_STAT_INCR(sctps_read_peeks);
5009 	} else {
5010 		in_flags = 0;
5011 	}
5012 	slen = uio->uio_resid;
5013 
5014 	/* Pull in and set up our int flags */
5015 	if (in_flags & MSG_OOB) {
5016 		/* Out of band's NOT supported */
5017 		return (EOPNOTSUPP);
5018 	}
5019 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5020 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5021 		return (EINVAL);
5022 	}
5023 	if ((in_flags & (MSG_DONTWAIT
5024 	    | MSG_NBIO
5025 	    )) ||
5026 	    SCTP_SO_IS_NBIO(so)) {
5027 		block_allowed = 0;
5028 	}
5029 	/* setup the endpoint */
5030 	inp = (struct sctp_inpcb *)so->so_pcb;
5031 	if (inp == NULL) {
5032 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5033 		return (EFAULT);
5034 	}
5035 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5036 	/* Must be at least a MTU's worth */
5037 	if (rwnd_req < SCTP_MIN_RWND)
5038 		rwnd_req = SCTP_MIN_RWND;
5039 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5040 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5041 		sctp_misc_ints(SCTP_SORECV_ENTER,
5042 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5043 	}
5044 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5045 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5046 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5047 	}
5048 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5049 	sockbuf_lock = 1;
5050 	if (error) {
5051 		goto release_unlocked;
5052 	}
5053 restart:
5054 
5055 
5056 restart_nosblocks:
5057 	if (hold_sblock == 0) {
5058 		SOCKBUF_LOCK(&so->so_rcv);
5059 		hold_sblock = 1;
5060 	}
5061 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5062 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5063 		goto out;
5064 	}
5065 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5066 		if (so->so_error) {
5067 			error = so->so_error;
5068 			if ((in_flags & MSG_PEEK) == 0)
5069 				so->so_error = 0;
5070 			goto out;
5071 		} else {
5072 			if (so->so_rcv.sb_cc == 0) {
5073 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5074 				/* indicate EOF */
5075 				error = 0;
5076 				goto out;
5077 			}
5078 		}
5079 	}
5080 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5081 		/* we need to wait for data */
5082 		if ((so->so_rcv.sb_cc == 0) &&
5083 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5084 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5085 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5086 				/*
5087 				 * For active open side clear flags for
5088 				 * re-use passive open is blocked by
5089 				 * connect.
5090 				 */
5091 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5092 					/*
5093 					 * You were aborted, passive side
5094 					 * always hits here
5095 					 */
5096 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5097 					error = ECONNRESET;
5098 					/*
5099 					 * You get this once if you are
5100 					 * active open side
5101 					 */
5102 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5103 						/*
5104 						 * Remove flag if on the
5105 						 * active open side
5106 						 */
5107 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5108 					}
5109 				}
5110 				so->so_state &= ~(SS_ISCONNECTING |
5111 				    SS_ISDISCONNECTING |
5112 				    SS_ISCONFIRMING |
5113 				    SS_ISCONNECTED);
5114 				if (error == 0) {
5115 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5116 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5117 						error = ENOTCONN;
5118 					} else {
5119 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5120 					}
5121 				}
5122 				goto out;
5123 			}
5124 		}
5125 		error = sbwait(&so->so_rcv);
5126 		if (error) {
5127 			goto out;
5128 		}
5129 		held_length = 0;
5130 		goto restart_nosblocks;
5131 	} else if (so->so_rcv.sb_cc == 0) {
5132 		if (so->so_error) {
5133 			error = so->so_error;
5134 			if ((in_flags & MSG_PEEK) == 0)
5135 				so->so_error = 0;
5136 		} else {
5137 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5138 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5139 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5140 					/*
5141 					 * For active open side clear flags
5142 					 * for re-use passive open is
5143 					 * blocked by connect.
5144 					 */
5145 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5146 						/*
5147 						 * You were aborted, passive
5148 						 * side always hits here
5149 						 */
5150 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5151 						error = ECONNRESET;
5152 						/*
5153 						 * You get this once if you
5154 						 * are active open side
5155 						 */
5156 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5157 							/*
5158 							 * Remove flag if on
5159 							 * the active open
5160 							 * side
5161 							 */
5162 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5163 						}
5164 					}
5165 					so->so_state &= ~(SS_ISCONNECTING |
5166 					    SS_ISDISCONNECTING |
5167 					    SS_ISCONFIRMING |
5168 					    SS_ISCONNECTED);
5169 					if (error == 0) {
5170 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5171 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5172 							error = ENOTCONN;
5173 						} else {
5174 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5175 						}
5176 					}
5177 					goto out;
5178 				}
5179 			}
5180 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5181 			error = EWOULDBLOCK;
5182 		}
5183 		goto out;
5184 	}
5185 	if (hold_sblock == 1) {
5186 		SOCKBUF_UNLOCK(&so->so_rcv);
5187 		hold_sblock = 0;
5188 	}
5189 	/* we possibly have data we can read */
5190 	/* sa_ignore FREED_MEMORY */
5191 	control = TAILQ_FIRST(&inp->read_queue);
5192 	if (control == NULL) {
5193 		/*
5194 		 * This could be happening since the appender did the
5195 		 * increment but as not yet did the tailq insert onto the
5196 		 * read_queue
5197 		 */
5198 		if (hold_rlock == 0) {
5199 			SCTP_INP_READ_LOCK(inp);
5200 			hold_rlock = 1;
5201 		}
5202 		control = TAILQ_FIRST(&inp->read_queue);
5203 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5204 #ifdef INVARIANTS
5205 			panic("Huh, its non zero and nothing on control?");
5206 #endif
5207 			so->so_rcv.sb_cc = 0;
5208 		}
5209 		SCTP_INP_READ_UNLOCK(inp);
5210 		hold_rlock = 0;
5211 		goto restart;
5212 	}
5213 	if ((control->length == 0) &&
5214 	    (control->do_not_ref_stcb)) {
5215 		/*
5216 		 * Clean up code for freeing assoc that left behind a
5217 		 * pdapi.. maybe a peer in EEOR that just closed after
5218 		 * sending and never indicated a EOR.
5219 		 */
5220 		if (hold_rlock == 0) {
5221 			hold_rlock = 1;
5222 			SCTP_INP_READ_LOCK(inp);
5223 		}
5224 		control->held_length = 0;
5225 		if (control->data) {
5226 			/* Hmm there is data here .. fix */
5227 			struct mbuf *m_tmp;
5228 			int cnt = 0;
5229 
5230 			m_tmp = control->data;
5231 			while (m_tmp) {
5232 				cnt += SCTP_BUF_LEN(m_tmp);
5233 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5234 					control->tail_mbuf = m_tmp;
5235 					control->end_added = 1;
5236 				}
5237 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5238 			}
5239 			control->length = cnt;
5240 		} else {
5241 			/* remove it */
5242 			TAILQ_REMOVE(&inp->read_queue, control, next);
5243 			/* Add back any hiddend data */
5244 			sctp_free_remote_addr(control->whoFrom);
5245 			sctp_free_a_readq(stcb, control);
5246 		}
5247 		if (hold_rlock) {
5248 			hold_rlock = 0;
5249 			SCTP_INP_READ_UNLOCK(inp);
5250 		}
5251 		goto restart;
5252 	}
5253 	if ((control->length == 0) &&
5254 	    (control->end_added == 1)) {
5255 		/*
5256 		 * Do we also need to check for (control->pdapi_aborted ==
5257 		 * 1)?
5258 		 */
5259 		if (hold_rlock == 0) {
5260 			hold_rlock = 1;
5261 			SCTP_INP_READ_LOCK(inp);
5262 		}
5263 		TAILQ_REMOVE(&inp->read_queue, control, next);
5264 		if (control->data) {
5265 #ifdef INVARIANTS
5266 			panic("control->data not null but control->length == 0");
5267 #else
5268 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5269 			sctp_m_freem(control->data);
5270 			control->data = NULL;
5271 #endif
5272 		}
5273 		if (control->aux_data) {
5274 			sctp_m_free(control->aux_data);
5275 			control->aux_data = NULL;
5276 		}
5277 		sctp_free_remote_addr(control->whoFrom);
5278 		sctp_free_a_readq(stcb, control);
5279 		if (hold_rlock) {
5280 			hold_rlock = 0;
5281 			SCTP_INP_READ_UNLOCK(inp);
5282 		}
5283 		goto restart;
5284 	}
5285 	if (control->length == 0) {
5286 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5287 		    (filling_sinfo)) {
5288 			/* find a more suitable one then this */
5289 			ctl = TAILQ_NEXT(control, next);
5290 			while (ctl) {
5291 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5292 				    (ctl->some_taken ||
5293 				    (ctl->spec_flags & M_NOTIFICATION) ||
5294 				    ((ctl->do_not_ref_stcb == 0) &&
5295 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5296 				    ) {
5297 					/*-
5298 					 * If we have a different TCB next, and there is data
5299 					 * present. If we have already taken some (pdapi), OR we can
5300 					 * ref the tcb and no delivery as started on this stream, we
5301 					 * take it. Note we allow a notification on a different
5302 					 * assoc to be delivered..
5303 					 */
5304 					control = ctl;
5305 					goto found_one;
5306 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5307 					    (ctl->length) &&
5308 					    ((ctl->some_taken) ||
5309 					    ((ctl->do_not_ref_stcb == 0) &&
5310 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5311 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5312 					/*-
5313 					 * If we have the same tcb, and there is data present, and we
5314 					 * have the strm interleave feature present. Then if we have
5315 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5316 					 * not started a delivery for this stream, we can take it.
5317 					 * Note we do NOT allow a notificaiton on the same assoc to
5318 					 * be delivered.
5319 					 */
5320 					control = ctl;
5321 					goto found_one;
5322 				}
5323 				ctl = TAILQ_NEXT(ctl, next);
5324 			}
5325 		}
5326 		/*
5327 		 * if we reach here, not suitable replacement is available
5328 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5329 		 * into the our held count, and its time to sleep again.
5330 		 */
5331 		held_length = so->so_rcv.sb_cc;
5332 		control->held_length = so->so_rcv.sb_cc;
5333 		goto restart;
5334 	}
5335 	/* Clear the held length since there is something to read */
5336 	control->held_length = 0;
5337 	if (hold_rlock) {
5338 		SCTP_INP_READ_UNLOCK(inp);
5339 		hold_rlock = 0;
5340 	}
5341 found_one:
5342 	/*
5343 	 * If we reach here, control has a some data for us to read off.
5344 	 * Note that stcb COULD be NULL.
5345 	 */
5346 	control->some_taken++;
5347 	if (hold_sblock) {
5348 		SOCKBUF_UNLOCK(&so->so_rcv);
5349 		hold_sblock = 0;
5350 	}
5351 	stcb = control->stcb;
5352 	if (stcb) {
5353 		if ((control->do_not_ref_stcb == 0) &&
5354 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5355 			if (freecnt_applied == 0)
5356 				stcb = NULL;
5357 		} else if (control->do_not_ref_stcb == 0) {
5358 			/* you can't free it on me please */
5359 			/*
5360 			 * The lock on the socket buffer protects us so the
5361 			 * free code will stop. But since we used the
5362 			 * socketbuf lock and the sender uses the tcb_lock
5363 			 * to increment, we need to use the atomic add to
5364 			 * the refcnt
5365 			 */
5366 			if (freecnt_applied) {
5367 #ifdef INVARIANTS
5368 				panic("refcnt already incremented");
5369 #else
5370 				printf("refcnt already incremented?\n");
5371 #endif
5372 			} else {
5373 				atomic_add_int(&stcb->asoc.refcnt, 1);
5374 				freecnt_applied = 1;
5375 			}
5376 			/*
5377 			 * Setup to remember how much we have not yet told
5378 			 * the peer our rwnd has opened up. Note we grab the
5379 			 * value from the tcb from last time. Note too that
5380 			 * sack sending clears this when a sack is sent,
5381 			 * which is fine. Once we hit the rwnd_req, we then
5382 			 * will go to the sctp_user_rcvd() that will not
5383 			 * lock until it KNOWs it MUST send a WUP-SACK.
5384 			 */
5385 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5386 			stcb->freed_by_sorcv_sincelast = 0;
5387 		}
5388 	}
5389 	if (stcb &&
5390 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5391 	    control->do_not_ref_stcb == 0) {
5392 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5393 	}
5394 	/* First lets get off the sinfo and sockaddr info */
5395 	if ((sinfo) && filling_sinfo) {
5396 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5397 		nxt = TAILQ_NEXT(control, next);
5398 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5399 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5400 			struct sctp_extrcvinfo *s_extra;
5401 
5402 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5403 			if ((nxt) &&
5404 			    (nxt->length)) {
5405 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5406 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5407 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5408 				}
5409 				if (nxt->spec_flags & M_NOTIFICATION) {
5410 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5411 				}
5412 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5413 				s_extra->sreinfo_next_length = nxt->length;
5414 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5415 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5416 				if (nxt->tail_mbuf != NULL) {
5417 					if (nxt->end_added) {
5418 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5419 					}
5420 				}
5421 			} else {
5422 				/*
5423 				 * we explicitly 0 this, since the memcpy
5424 				 * got some other things beyond the older
5425 				 * sinfo_ that is on the control's structure
5426 				 * :-D
5427 				 */
5428 				nxt = NULL;
5429 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5430 				s_extra->sreinfo_next_aid = 0;
5431 				s_extra->sreinfo_next_length = 0;
5432 				s_extra->sreinfo_next_ppid = 0;
5433 				s_extra->sreinfo_next_stream = 0;
5434 			}
5435 		}
5436 		/*
5437 		 * update off the real current cum-ack, if we have an stcb.
5438 		 */
5439 		if ((control->do_not_ref_stcb == 0) && stcb)
5440 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5441 		/*
5442 		 * mask off the high bits, we keep the actual chunk bits in
5443 		 * there.
5444 		 */
5445 		sinfo->sinfo_flags &= 0x00ff;
5446 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5447 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5448 		}
5449 	}
5450 #ifdef SCTP_ASOCLOG_OF_TSNS
5451 	{
5452 		int index, newindex;
5453 		struct sctp_pcbtsn_rlog *entry;
5454 
5455 		do {
5456 			index = inp->readlog_index;
5457 			newindex = index + 1;
5458 			if (newindex >= SCTP_READ_LOG_SIZE) {
5459 				newindex = 0;
5460 			}
5461 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5462 		entry = &inp->readlog[index];
5463 		entry->vtag = control->sinfo_assoc_id;
5464 		entry->strm = control->sinfo_stream;
5465 		entry->seq = control->sinfo_ssn;
5466 		entry->sz = control->length;
5467 		entry->flgs = control->sinfo_flags;
5468 	}
5469 #endif
5470 	if (fromlen && from) {
5471 		struct sockaddr *to;
5472 
5473 #ifdef INET
5474 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5475 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5476 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5477 #else
5478 		/* No AF_INET use AF_INET6 */
5479 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5480 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5481 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5482 #endif
5483 
5484 		to = from;
5485 #if defined(INET) && defined(INET6)
5486 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5487 		    (to->sa_family == AF_INET) &&
5488 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5489 			struct sockaddr_in *sin;
5490 			struct sockaddr_in6 sin6;
5491 
5492 			sin = (struct sockaddr_in *)to;
5493 			bzero(&sin6, sizeof(sin6));
5494 			sin6.sin6_family = AF_INET6;
5495 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5496 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5497 			bcopy(&sin->sin_addr,
5498 			    &sin6.sin6_addr.s6_addr32[3],
5499 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5500 			sin6.sin6_port = sin->sin_port;
5501 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5502 		}
5503 #endif
5504 #if defined(INET6)
5505 		{
5506 			struct sockaddr_in6 lsa6, *to6;
5507 
5508 			to6 = (struct sockaddr_in6 *)to;
5509 			sctp_recover_scope_mac(to6, (&lsa6));
5510 		}
5511 #endif
5512 	}
5513 	/* now copy out what data we can */
5514 	if (mp == NULL) {
5515 		/* copy out each mbuf in the chain up to length */
5516 get_more_data:
5517 		m = control->data;
5518 		while (m) {
5519 			/* Move out all we can */
5520 			cp_len = (int)uio->uio_resid;
5521 			my_len = (int)SCTP_BUF_LEN(m);
5522 			if (cp_len > my_len) {
5523 				/* not enough in this buf */
5524 				cp_len = my_len;
5525 			}
5526 			if (hold_rlock) {
5527 				SCTP_INP_READ_UNLOCK(inp);
5528 				hold_rlock = 0;
5529 			}
5530 			if (cp_len > 0)
5531 				error = uiomove(mtod(m, char *), cp_len, uio);
5532 			/* re-read */
5533 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5534 				goto release;
5535 			}
5536 			if ((control->do_not_ref_stcb == 0) && stcb &&
5537 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5538 				no_rcv_needed = 1;
5539 			}
5540 			if (error) {
5541 				/* error we are out of here */
5542 				goto release;
5543 			}
5544 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5545 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5546 			    ((control->end_added == 0) ||
5547 			    (control->end_added &&
5548 			    (TAILQ_NEXT(control, next) == NULL)))
5549 			    ) {
5550 				SCTP_INP_READ_LOCK(inp);
5551 				hold_rlock = 1;
5552 			}
5553 			if (cp_len == SCTP_BUF_LEN(m)) {
5554 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5555 				    (control->end_added)) {
5556 					out_flags |= MSG_EOR;
5557 					if ((control->do_not_ref_stcb == 0) &&
5558 					    (control->stcb != NULL) &&
5559 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5560 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5561 				}
5562 				if (control->spec_flags & M_NOTIFICATION) {
5563 					out_flags |= MSG_NOTIFICATION;
5564 				}
5565 				/* we ate up the mbuf */
5566 				if (in_flags & MSG_PEEK) {
5567 					/* just looking */
5568 					m = SCTP_BUF_NEXT(m);
5569 					copied_so_far += cp_len;
5570 				} else {
5571 					/* dispose of the mbuf */
5572 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5573 						sctp_sblog(&so->so_rcv,
5574 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5575 					}
5576 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5577 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5578 						sctp_sblog(&so->so_rcv,
5579 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5580 					}
5581 					copied_so_far += cp_len;
5582 					freed_so_far += cp_len;
5583 					freed_so_far += MSIZE;
5584 					atomic_subtract_int(&control->length, cp_len);
5585 					control->data = sctp_m_free(m);
5586 					m = control->data;
5587 					/*
5588 					 * been through it all, must hold sb
5589 					 * lock ok to null tail
5590 					 */
5591 					if (control->data == NULL) {
5592 #ifdef INVARIANTS
5593 						if ((control->end_added == 0) ||
5594 						    (TAILQ_NEXT(control, next) == NULL)) {
5595 							/*
5596 							 * If the end is not
5597 							 * added, OR the
5598 							 * next is NOT null
5599 							 * we MUST have the
5600 							 * lock.
5601 							 */
5602 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5603 								panic("Hmm we don't own the lock?");
5604 							}
5605 						}
5606 #endif
5607 						control->tail_mbuf = NULL;
5608 #ifdef INVARIANTS
5609 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5610 							panic("end_added, nothing left and no MSG_EOR");
5611 						}
5612 #endif
5613 					}
5614 				}
5615 			} else {
5616 				/* Do we need to trim the mbuf? */
5617 				if (control->spec_flags & M_NOTIFICATION) {
5618 					out_flags |= MSG_NOTIFICATION;
5619 				}
5620 				if ((in_flags & MSG_PEEK) == 0) {
5621 					SCTP_BUF_RESV_UF(m, cp_len);
5622 					SCTP_BUF_LEN(m) -= cp_len;
5623 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5624 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5625 					}
5626 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5627 					if ((control->do_not_ref_stcb == 0) &&
5628 					    stcb) {
5629 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5630 					}
5631 					copied_so_far += cp_len;
5632 					freed_so_far += cp_len;
5633 					freed_so_far += MSIZE;
5634 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5635 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5636 						    SCTP_LOG_SBRESULT, 0);
5637 					}
5638 					atomic_subtract_int(&control->length, cp_len);
5639 				} else {
5640 					copied_so_far += cp_len;
5641 				}
5642 			}
5643 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5644 				break;
5645 			}
5646 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5647 			    (control->do_not_ref_stcb == 0) &&
5648 			    (freed_so_far >= rwnd_req)) {
5649 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5650 			}
5651 		}		/* end while(m) */
5652 		/*
5653 		 * At this point we have looked at it all and we either have
5654 		 * a MSG_EOR/or read all the user wants... <OR>
5655 		 * control->length == 0.
5656 		 */
5657 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5658 			/* we are done with this control */
5659 			if (control->length == 0) {
5660 				if (control->data) {
5661 #ifdef INVARIANTS
5662 					panic("control->data not null at read eor?");
5663 #else
5664 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5665 					sctp_m_freem(control->data);
5666 					control->data = NULL;
5667 #endif
5668 				}
5669 		done_with_control:
5670 				if (TAILQ_NEXT(control, next) == NULL) {
5671 					/*
5672 					 * If we don't have a next we need a
5673 					 * lock, if there is a next
5674 					 * interrupt is filling ahead of us
5675 					 * and we don't need a lock to
5676 					 * remove this guy (which is the
5677 					 * head of the queue).
5678 					 */
5679 					if (hold_rlock == 0) {
5680 						SCTP_INP_READ_LOCK(inp);
5681 						hold_rlock = 1;
5682 					}
5683 				}
5684 				TAILQ_REMOVE(&inp->read_queue, control, next);
5685 				/* Add back any hiddend data */
5686 				if (control->held_length) {
5687 					held_length = 0;
5688 					control->held_length = 0;
5689 					wakeup_read_socket = 1;
5690 				}
5691 				if (control->aux_data) {
5692 					sctp_m_free(control->aux_data);
5693 					control->aux_data = NULL;
5694 				}
5695 				no_rcv_needed = control->do_not_ref_stcb;
5696 				sctp_free_remote_addr(control->whoFrom);
5697 				control->data = NULL;
5698 				sctp_free_a_readq(stcb, control);
5699 				control = NULL;
5700 				if ((freed_so_far >= rwnd_req) &&
5701 				    (no_rcv_needed == 0))
5702 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5703 
5704 			} else {
5705 				/*
5706 				 * The user did not read all of this
5707 				 * message, turn off the returned MSG_EOR
5708 				 * since we are leaving more behind on the
5709 				 * control to read.
5710 				 */
5711 #ifdef INVARIANTS
5712 				if (control->end_added &&
5713 				    (control->data == NULL) &&
5714 				    (control->tail_mbuf == NULL)) {
5715 					panic("Gak, control->length is corrupt?");
5716 				}
5717 #endif
5718 				no_rcv_needed = control->do_not_ref_stcb;
5719 				out_flags &= ~MSG_EOR;
5720 			}
5721 		}
5722 		if (out_flags & MSG_EOR) {
5723 			goto release;
5724 		}
5725 		if ((uio->uio_resid == 0) ||
5726 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5727 		    ) {
5728 			goto release;
5729 		}
5730 		/*
5731 		 * If I hit here the receiver wants more and this message is
5732 		 * NOT done (pd-api). So two questions. Can we block? if not
5733 		 * we are done. Did the user NOT set MSG_WAITALL?
5734 		 */
5735 		if (block_allowed == 0) {
5736 			goto release;
5737 		}
5738 		/*
5739 		 * We need to wait for more data a few things: - We don't
5740 		 * sbunlock() so we don't get someone else reading. - We
5741 		 * must be sure to account for the case where what is added
5742 		 * is NOT to our control when we wakeup.
5743 		 */
5744 
5745 		/*
5746 		 * Do we need to tell the transport a rwnd update might be
5747 		 * needed before we go to sleep?
5748 		 */
5749 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5750 		    ((freed_so_far >= rwnd_req) &&
5751 		    (control->do_not_ref_stcb == 0) &&
5752 		    (no_rcv_needed == 0))) {
5753 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5754 		}
5755 wait_some_more:
5756 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5757 			goto release;
5758 		}
5759 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5760 			goto release;
5761 
5762 		if (hold_rlock == 1) {
5763 			SCTP_INP_READ_UNLOCK(inp);
5764 			hold_rlock = 0;
5765 		}
5766 		if (hold_sblock == 0) {
5767 			SOCKBUF_LOCK(&so->so_rcv);
5768 			hold_sblock = 1;
5769 		}
5770 		if ((copied_so_far) && (control->length == 0) &&
5771 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5772 			goto release;
5773 		}
5774 		if (so->so_rcv.sb_cc <= control->held_length) {
5775 			error = sbwait(&so->so_rcv);
5776 			if (error) {
5777 				goto release;
5778 			}
5779 			control->held_length = 0;
5780 		}
5781 		if (hold_sblock) {
5782 			SOCKBUF_UNLOCK(&so->so_rcv);
5783 			hold_sblock = 0;
5784 		}
5785 		if (control->length == 0) {
5786 			/* still nothing here */
5787 			if (control->end_added == 1) {
5788 				/* he aborted, or is done i.e.did a shutdown */
5789 				out_flags |= MSG_EOR;
5790 				if (control->pdapi_aborted) {
5791 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5792 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5793 
5794 					out_flags |= MSG_TRUNC;
5795 				} else {
5796 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5797 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5798 				}
5799 				goto done_with_control;
5800 			}
5801 			if (so->so_rcv.sb_cc > held_length) {
5802 				control->held_length = so->so_rcv.sb_cc;
5803 				held_length = 0;
5804 			}
5805 			goto wait_some_more;
5806 		} else if (control->data == NULL) {
5807 			/*
5808 			 * we must re-sync since data is probably being
5809 			 * added
5810 			 */
5811 			SCTP_INP_READ_LOCK(inp);
5812 			if ((control->length > 0) && (control->data == NULL)) {
5813 				/*
5814 				 * big trouble.. we have the lock and its
5815 				 * corrupt?
5816 				 */
5817 #ifdef INVARIANTS
5818 				panic("Impossible data==NULL length !=0");
5819 #endif
5820 				out_flags |= MSG_EOR;
5821 				out_flags |= MSG_TRUNC;
5822 				control->length = 0;
5823 				SCTP_INP_READ_UNLOCK(inp);
5824 				goto done_with_control;
5825 			}
5826 			SCTP_INP_READ_UNLOCK(inp);
5827 			/* We will fall around to get more data */
5828 		}
5829 		goto get_more_data;
5830 	} else {
5831 		/*-
5832 		 * Give caller back the mbuf chain,
5833 		 * store in uio_resid the length
5834 		 */
5835 		wakeup_read_socket = 0;
5836 		if ((control->end_added == 0) ||
5837 		    (TAILQ_NEXT(control, next) == NULL)) {
5838 			/* Need to get rlock */
5839 			if (hold_rlock == 0) {
5840 				SCTP_INP_READ_LOCK(inp);
5841 				hold_rlock = 1;
5842 			}
5843 		}
5844 		if (control->end_added) {
5845 			out_flags |= MSG_EOR;
5846 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5847 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5848 		}
5849 		if (control->spec_flags & M_NOTIFICATION) {
5850 			out_flags |= MSG_NOTIFICATION;
5851 		}
5852 		uio->uio_resid = control->length;
5853 		*mp = control->data;
5854 		m = control->data;
5855 		while (m) {
5856 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5857 				sctp_sblog(&so->so_rcv,
5858 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5859 			}
5860 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5861 			freed_so_far += SCTP_BUF_LEN(m);
5862 			freed_so_far += MSIZE;
5863 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5864 				sctp_sblog(&so->so_rcv,
5865 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5866 			}
5867 			m = SCTP_BUF_NEXT(m);
5868 		}
5869 		control->data = control->tail_mbuf = NULL;
5870 		control->length = 0;
5871 		if (out_flags & MSG_EOR) {
5872 			/* Done with this control */
5873 			goto done_with_control;
5874 		}
5875 	}
5876 release:
5877 	if (hold_rlock == 1) {
5878 		SCTP_INP_READ_UNLOCK(inp);
5879 		hold_rlock = 0;
5880 	}
5881 	if (hold_sblock == 1) {
5882 		SOCKBUF_UNLOCK(&so->so_rcv);
5883 		hold_sblock = 0;
5884 	}
5885 	sbunlock(&so->so_rcv);
5886 	sockbuf_lock = 0;
5887 
5888 release_unlocked:
5889 	if (hold_sblock) {
5890 		SOCKBUF_UNLOCK(&so->so_rcv);
5891 		hold_sblock = 0;
5892 	}
5893 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5894 		if ((freed_so_far >= rwnd_req) &&
5895 		    (control && (control->do_not_ref_stcb == 0)) &&
5896 		    (no_rcv_needed == 0))
5897 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5898 	}
5899 out:
5900 	if (msg_flags) {
5901 		*msg_flags = out_flags;
5902 	}
5903 	if (((out_flags & MSG_EOR) == 0) &&
5904 	    ((in_flags & MSG_PEEK) == 0) &&
5905 	    (sinfo) &&
5906 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5907 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
5908 		struct sctp_extrcvinfo *s_extra;
5909 
5910 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5911 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5912 	}
5913 	if (hold_rlock == 1) {
5914 		SCTP_INP_READ_UNLOCK(inp);
5915 		hold_rlock = 0;
5916 	}
5917 	if (hold_sblock) {
5918 		SOCKBUF_UNLOCK(&so->so_rcv);
5919 		hold_sblock = 0;
5920 	}
5921 	if (sockbuf_lock) {
5922 		sbunlock(&so->so_rcv);
5923 	}
5924 	if (freecnt_applied) {
5925 		/*
5926 		 * The lock on the socket buffer protects us so the free
5927 		 * code will stop. But since we used the socketbuf lock and
5928 		 * the sender uses the tcb_lock to increment, we need to use
5929 		 * the atomic add to the refcnt.
5930 		 */
5931 		if (stcb == NULL) {
5932 #ifdef INVARIANTS
5933 			panic("stcb for refcnt has gone NULL?");
5934 			goto stage_left;
5935 #else
5936 			goto stage_left;
5937 #endif
5938 		}
5939 		atomic_add_int(&stcb->asoc.refcnt, -1);
5940 		freecnt_applied = 0;
5941 		/* Save the value back for next time */
5942 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5943 	}
5944 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5945 		if (stcb) {
5946 			sctp_misc_ints(SCTP_SORECV_DONE,
5947 			    freed_so_far,
5948 			    ((uio) ? (slen - uio->uio_resid) : slen),
5949 			    stcb->asoc.my_rwnd,
5950 			    so->so_rcv.sb_cc);
5951 		} else {
5952 			sctp_misc_ints(SCTP_SORECV_DONE,
5953 			    freed_so_far,
5954 			    ((uio) ? (slen - uio->uio_resid) : slen),
5955 			    0,
5956 			    so->so_rcv.sb_cc);
5957 		}
5958 	}
5959 stage_left:
5960 	if (wakeup_read_socket) {
5961 		sctp_sorwakeup(inp, so);
5962 	}
5963 	return (error);
5964 }
5965 
5966 
5967 #ifdef SCTP_MBUF_LOGGING
5968 struct mbuf *
5969 sctp_m_free(struct mbuf *m)
5970 {
5971 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5972 		if (SCTP_BUF_IS_EXTENDED(m)) {
5973 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5974 		}
5975 	}
5976 	return (m_free(m));
5977 }
5978 
5979 void
5980 sctp_m_freem(struct mbuf *mb)
5981 {
5982 	while (mb != NULL)
5983 		mb = sctp_m_free(mb);
5984 }
5985 
5986 #endif
5987 
5988 int
5989 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5990 {
5991 	/*
5992 	 * Given a local address. For all associations that holds the
5993 	 * address, request a peer-set-primary.
5994 	 */
5995 	struct sctp_ifa *ifa;
5996 	struct sctp_laddr *wi;
5997 
5998 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5999 	if (ifa == NULL) {
6000 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6001 		return (EADDRNOTAVAIL);
6002 	}
6003 	/*
6004 	 * Now that we have the ifa we must awaken the iterator with this
6005 	 * message.
6006 	 */
6007 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6008 	if (wi == NULL) {
6009 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6010 		return (ENOMEM);
6011 	}
6012 	/* Now incr the count and int wi structure */
6013 	SCTP_INCR_LADDR_COUNT();
6014 	bzero(wi, sizeof(*wi));
6015 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6016 	wi->ifa = ifa;
6017 	wi->action = SCTP_SET_PRIM_ADDR;
6018 	atomic_add_int(&ifa->refcount, 1);
6019 
6020 	/* Now add it to the work queue */
6021 	SCTP_WQ_ADDR_LOCK();
6022 	/*
6023 	 * Should this really be a tailq? As it is we will process the
6024 	 * newest first :-0
6025 	 */
6026 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6027 	SCTP_WQ_ADDR_UNLOCK();
6028 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6029 	    (struct sctp_inpcb *)NULL,
6030 	    (struct sctp_tcb *)NULL,
6031 	    (struct sctp_nets *)NULL);
6032 	return (0);
6033 }
6034 
6035 
6036 int
6037 sctp_soreceive(struct socket *so,
6038     struct sockaddr **psa,
6039     struct uio *uio,
6040     struct mbuf **mp0,
6041     struct mbuf **controlp,
6042     int *flagsp)
6043 {
6044 	int error, fromlen;
6045 	uint8_t sockbuf[256];
6046 	struct sockaddr *from;
6047 	struct sctp_extrcvinfo sinfo;
6048 	int filling_sinfo = 1;
6049 	struct sctp_inpcb *inp;
6050 
6051 	inp = (struct sctp_inpcb *)so->so_pcb;
6052 	/* pickup the assoc we are reading from */
6053 	if (inp == NULL) {
6054 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6055 		return (EINVAL);
6056 	}
6057 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6058 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6059 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6060 	    (controlp == NULL)) {
6061 		/* user does not want the sndrcv ctl */
6062 		filling_sinfo = 0;
6063 	}
6064 	if (psa) {
6065 		from = (struct sockaddr *)sockbuf;
6066 		fromlen = sizeof(sockbuf);
6067 		from->sa_len = 0;
6068 	} else {
6069 		from = NULL;
6070 		fromlen = 0;
6071 	}
6072 
6073 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6074 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6075 	if ((controlp) && (filling_sinfo)) {
6076 		/* copy back the sinfo in a CMSG format */
6077 		if (filling_sinfo)
6078 			*controlp = sctp_build_ctl_nchunk(inp,
6079 			    (struct sctp_sndrcvinfo *)&sinfo);
6080 		else
6081 			*controlp = NULL;
6082 	}
6083 	if (psa) {
6084 		/* copy back the address info */
6085 		if (from && from->sa_len) {
6086 			*psa = sodupsockaddr(from, M_NOWAIT);
6087 		} else {
6088 			*psa = NULL;
6089 		}
6090 	}
6091 	return (error);
6092 }
6093 
6094 
6095 
6096 
6097 
6098 int
6099 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6100     int totaddr, int *error)
6101 {
6102 	int added = 0;
6103 	int i;
6104 	struct sctp_inpcb *inp;
6105 	struct sockaddr *sa;
6106 	size_t incr = 0;
6107 
6108 	sa = addr;
6109 	inp = stcb->sctp_ep;
6110 	*error = 0;
6111 	for (i = 0; i < totaddr; i++) {
6112 		switch (sa->sa_family) {
6113 #ifdef INET
6114 		case AF_INET:
6115 			incr = sizeof(struct sockaddr_in);
6116 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6117 				/* assoc gone no un-lock */
6118 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6119 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6120 				*error = ENOBUFS;
6121 				goto out_now;
6122 			}
6123 			added++;
6124 			break;
6125 #endif
6126 #ifdef INET6
6127 		case AF_INET6:
6128 			incr = sizeof(struct sockaddr_in6);
6129 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6130 				/* assoc gone no un-lock */
6131 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6132 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6133 				*error = ENOBUFS;
6134 				goto out_now;
6135 			}
6136 			added++;
6137 			break;
6138 #endif
6139 		default:
6140 			break;
6141 		}
6142 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6143 	}
6144 out_now:
6145 	return (added);
6146 }
6147 
6148 struct sctp_tcb *
6149 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6150     int *totaddr, int *num_v4, int *num_v6, int *error,
6151     int limit, int *bad_addr)
6152 {
6153 	struct sockaddr *sa;
6154 	struct sctp_tcb *stcb = NULL;
6155 	size_t incr, at, i;
6156 
6157 	at = incr = 0;
6158 	sa = addr;
6159 
6160 	*error = *num_v6 = *num_v4 = 0;
6161 	/* account and validate addresses */
6162 	for (i = 0; i < (size_t)*totaddr; i++) {
6163 		switch (sa->sa_family) {
6164 #ifdef INET
6165 		case AF_INET:
6166 			(*num_v4) += 1;
6167 			incr = sizeof(struct sockaddr_in);
6168 			if (sa->sa_len != incr) {
6169 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6170 				*error = EINVAL;
6171 				*bad_addr = 1;
6172 				return (NULL);
6173 			}
6174 			break;
6175 #endif
6176 #ifdef INET6
6177 		case AF_INET6:
6178 			{
6179 				struct sockaddr_in6 *sin6;
6180 
6181 				sin6 = (struct sockaddr_in6 *)sa;
6182 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6183 					/* Must be non-mapped for connectx */
6184 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6185 					*error = EINVAL;
6186 					*bad_addr = 1;
6187 					return (NULL);
6188 				}
6189 				(*num_v6) += 1;
6190 				incr = sizeof(struct sockaddr_in6);
6191 				if (sa->sa_len != incr) {
6192 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6193 					*error = EINVAL;
6194 					*bad_addr = 1;
6195 					return (NULL);
6196 				}
6197 				break;
6198 			}
6199 #endif
6200 		default:
6201 			*totaddr = i;
6202 			/* we are done */
6203 			break;
6204 		}
6205 		if (i == (size_t)*totaddr) {
6206 			break;
6207 		}
6208 		SCTP_INP_INCR_REF(inp);
6209 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6210 		if (stcb != NULL) {
6211 			/* Already have or am bring up an association */
6212 			return (stcb);
6213 		} else {
6214 			SCTP_INP_DECR_REF(inp);
6215 		}
6216 		if ((at + incr) > (size_t)limit) {
6217 			*totaddr = i;
6218 			break;
6219 		}
6220 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6221 	}
6222 	return ((struct sctp_tcb *)NULL);
6223 }
6224 
6225 /*
6226  * sctp_bindx(ADD) for one address.
6227  * assumes all arguments are valid/checked by caller.
6228  */
6229 void
6230 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6231     struct sockaddr *sa, sctp_assoc_t assoc_id,
6232     uint32_t vrf_id, int *error, void *p)
6233 {
6234 	struct sockaddr *addr_touse;
6235 
6236 #ifdef INET6
6237 	struct sockaddr_in sin;
6238 
6239 #endif
6240 
6241 	/* see if we're bound all already! */
6242 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6243 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6244 		*error = EINVAL;
6245 		return;
6246 	}
6247 	addr_touse = sa;
6248 #ifdef INET6
6249 	if (sa->sa_family == AF_INET6) {
6250 		struct sockaddr_in6 *sin6;
6251 
6252 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6253 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6254 			*error = EINVAL;
6255 			return;
6256 		}
6257 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6258 			/* can only bind v6 on PF_INET6 sockets */
6259 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6260 			*error = EINVAL;
6261 			return;
6262 		}
6263 		sin6 = (struct sockaddr_in6 *)addr_touse;
6264 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6265 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6266 			    SCTP_IPV6_V6ONLY(inp)) {
6267 				/* can't bind v4-mapped on PF_INET sockets */
6268 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6269 				*error = EINVAL;
6270 				return;
6271 			}
6272 			in6_sin6_2_sin(&sin, sin6);
6273 			addr_touse = (struct sockaddr *)&sin;
6274 		}
6275 	}
6276 #endif
6277 #ifdef INET
6278 	if (sa->sa_family == AF_INET) {
6279 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6280 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6281 			*error = EINVAL;
6282 			return;
6283 		}
6284 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6285 		    SCTP_IPV6_V6ONLY(inp)) {
6286 			/* can't bind v4 on PF_INET sockets */
6287 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6288 			*error = EINVAL;
6289 			return;
6290 		}
6291 	}
6292 #endif
6293 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6294 		if (p == NULL) {
6295 			/* Can't get proc for Net/Open BSD */
6296 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6297 			*error = EINVAL;
6298 			return;
6299 		}
6300 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6301 		return;
6302 	}
6303 	/*
6304 	 * No locks required here since bind and mgmt_ep_sa all do their own
6305 	 * locking. If we do something for the FIX: below we may need to
6306 	 * lock in that case.
6307 	 */
6308 	if (assoc_id == 0) {
6309 		/* add the address */
6310 		struct sctp_inpcb *lep;
6311 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6312 
6313 		/* validate the incoming port */
6314 		if ((lsin->sin_port != 0) &&
6315 		    (lsin->sin_port != inp->sctp_lport)) {
6316 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6317 			*error = EINVAL;
6318 			return;
6319 		} else {
6320 			/* user specified 0 port, set it to existing port */
6321 			lsin->sin_port = inp->sctp_lport;
6322 		}
6323 
6324 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6325 		if (lep != NULL) {
6326 			/*
6327 			 * We must decrement the refcount since we have the
6328 			 * ep already and are binding. No remove going on
6329 			 * here.
6330 			 */
6331 			SCTP_INP_DECR_REF(lep);
6332 		}
6333 		if (lep == inp) {
6334 			/* already bound to it.. ok */
6335 			return;
6336 		} else if (lep == NULL) {
6337 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6338 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6339 			    SCTP_ADD_IP_ADDRESS,
6340 			    vrf_id, NULL);
6341 		} else {
6342 			*error = EADDRINUSE;
6343 		}
6344 		if (*error)
6345 			return;
6346 	} else {
6347 		/*
6348 		 * FIX: decide whether we allow assoc based bindx
6349 		 */
6350 	}
6351 }
6352 
6353 /*
6354  * sctp_bindx(DELETE) for one address.
6355  * assumes all arguments are valid/checked by caller.
6356  */
6357 void
6358 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6359     struct sockaddr *sa, sctp_assoc_t assoc_id,
6360     uint32_t vrf_id, int *error)
6361 {
6362 	struct sockaddr *addr_touse;
6363 
6364 #ifdef INET6
6365 	struct sockaddr_in sin;
6366 
6367 #endif
6368 
6369 	/* see if we're bound all already! */
6370 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6371 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6372 		*error = EINVAL;
6373 		return;
6374 	}
6375 	addr_touse = sa;
6376 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6377 	if (sa->sa_family == AF_INET6) {
6378 		struct sockaddr_in6 *sin6;
6379 
6380 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6381 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6382 			*error = EINVAL;
6383 			return;
6384 		}
6385 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6386 			/* can only bind v6 on PF_INET6 sockets */
6387 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6388 			*error = EINVAL;
6389 			return;
6390 		}
6391 		sin6 = (struct sockaddr_in6 *)addr_touse;
6392 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6393 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6394 			    SCTP_IPV6_V6ONLY(inp)) {
6395 				/* can't bind mapped-v4 on PF_INET sockets */
6396 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6397 				*error = EINVAL;
6398 				return;
6399 			}
6400 			in6_sin6_2_sin(&sin, sin6);
6401 			addr_touse = (struct sockaddr *)&sin;
6402 		}
6403 	}
6404 #endif
6405 #ifdef INET
6406 	if (sa->sa_family == AF_INET) {
6407 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6408 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6409 			*error = EINVAL;
6410 			return;
6411 		}
6412 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6413 		    SCTP_IPV6_V6ONLY(inp)) {
6414 			/* can't bind v4 on PF_INET sockets */
6415 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 			*error = EINVAL;
6417 			return;
6418 		}
6419 	}
6420 #endif
6421 	/*
6422 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6423 	 * below is ever changed we may need to lock before calling
6424 	 * association level binding.
6425 	 */
6426 	if (assoc_id == 0) {
6427 		/* delete the address */
6428 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6429 		    SCTP_DEL_IP_ADDRESS,
6430 		    vrf_id, NULL);
6431 	} else {
6432 		/*
6433 		 * FIX: decide whether we allow assoc based bindx
6434 		 */
6435 	}
6436 }
6437 
6438 /*
6439  * returns the valid local address count for an assoc, taking into account
6440  * all scoping rules
6441  */
6442 int
6443 sctp_local_addr_count(struct sctp_tcb *stcb)
6444 {
6445 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6446 	int ipv4_addr_legal, ipv6_addr_legal;
6447 	struct sctp_vrf *vrf;
6448 	struct sctp_ifn *sctp_ifn;
6449 	struct sctp_ifa *sctp_ifa;
6450 	int count = 0;
6451 
6452 	/* Turn on all the appropriate scopes */
6453 	loopback_scope = stcb->asoc.loopback_scope;
6454 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6455 	local_scope = stcb->asoc.local_scope;
6456 	site_scope = stcb->asoc.site_scope;
6457 	ipv4_addr_legal = ipv6_addr_legal = 0;
6458 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6459 		ipv6_addr_legal = 1;
6460 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6461 			ipv4_addr_legal = 1;
6462 		}
6463 	} else {
6464 		ipv4_addr_legal = 1;
6465 	}
6466 
6467 	SCTP_IPI_ADDR_RLOCK();
6468 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6469 	if (vrf == NULL) {
6470 		/* no vrf, no addresses */
6471 		SCTP_IPI_ADDR_RUNLOCK();
6472 		return (0);
6473 	}
6474 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6475 		/*
6476 		 * bound all case: go through all ifns on the vrf
6477 		 */
6478 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6479 			if ((loopback_scope == 0) &&
6480 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6481 				continue;
6482 			}
6483 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6484 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6485 					continue;
6486 				switch (sctp_ifa->address.sa.sa_family) {
6487 #ifdef INET
6488 				case AF_INET:
6489 					if (ipv4_addr_legal) {
6490 						struct sockaddr_in *sin;
6491 
6492 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6493 						if (sin->sin_addr.s_addr == 0) {
6494 							/*
6495 							 * skip unspecified
6496 							 * addrs
6497 							 */
6498 							continue;
6499 						}
6500 						if ((ipv4_local_scope == 0) &&
6501 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6502 							continue;
6503 						}
6504 						/* count this one */
6505 						count++;
6506 					} else {
6507 						continue;
6508 					}
6509 					break;
6510 #endif
6511 #ifdef INET6
6512 				case AF_INET6:
6513 					if (ipv6_addr_legal) {
6514 						struct sockaddr_in6 *sin6;
6515 
6516 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6517 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6518 							continue;
6519 						}
6520 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6521 							if (local_scope == 0)
6522 								continue;
6523 							if (sin6->sin6_scope_id == 0) {
6524 								if (sa6_recoverscope(sin6) != 0)
6525 									/*
6526 									 *
6527 									 * bad
6528 									 *
6529 									 * li
6530 									 * nk
6531 									 *
6532 									 * loc
6533 									 * al
6534 									 *
6535 									 * add
6536 									 * re
6537 									 * ss
6538 									 * */
6539 									continue;
6540 							}
6541 						}
6542 						if ((site_scope == 0) &&
6543 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6544 							continue;
6545 						}
6546 						/* count this one */
6547 						count++;
6548 					}
6549 					break;
6550 #endif
6551 				default:
6552 					/* TSNH */
6553 					break;
6554 				}
6555 			}
6556 		}
6557 	} else {
6558 		/*
6559 		 * subset bound case
6560 		 */
6561 		struct sctp_laddr *laddr;
6562 
6563 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6564 		    sctp_nxt_addr) {
6565 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6566 				continue;
6567 			}
6568 			/* count this one */
6569 			count++;
6570 		}
6571 	}
6572 	SCTP_IPI_ADDR_RUNLOCK();
6573 	return (count);
6574 }
6575 
6576 #if defined(SCTP_LOCAL_TRACE_BUF)
6577 
6578 void
6579 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6580 {
6581 	uint32_t saveindex, newindex;
6582 
6583 	do {
6584 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6585 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6586 			newindex = 1;
6587 		} else {
6588 			newindex = saveindex + 1;
6589 		}
6590 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6591 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6592 		saveindex = 0;
6593 	}
6594 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6595 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6596 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6597 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6598 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6599 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6600 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6601 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6602 }
6603 
6604 #endif
6605 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6606 #ifdef INET
6607 /* We will need to add support
6608  * to bind the ports and such here
6609  * so we can do UDP tunneling. In
6610  * the mean-time, we return error
6611  */
6612 #include <netinet/udp.h>
6613 #include <netinet/udp_var.h>
6614 #include <sys/proc.h>
6615 #ifdef INET6
6616 #include <netinet6/sctp6_var.h>
6617 #endif
6618 
6619 static void
6620 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6621 {
6622 	struct ip *iph;
6623 	struct mbuf *sp, *last;
6624 	struct udphdr *uhdr;
6625 	uint16_t port = 0;
6626 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6627 
6628 	/*
6629 	 * Split out the mbuf chain. Leave the IP header in m, place the
6630 	 * rest in the sp.
6631 	 */
6632 	if ((m->m_flags & M_PKTHDR) == 0) {
6633 		/* Can't handle one that is not a pkt hdr */
6634 		goto out;
6635 	}
6636 	/* pull the src port */
6637 	iph = mtod(m, struct ip *);
6638 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6639 
6640 	port = uhdr->uh_sport;
6641 	sp = m_split(m, off, M_DONTWAIT);
6642 	if (sp == NULL) {
6643 		/* Gak, drop packet, we can't do a split */
6644 		goto out;
6645 	}
6646 	if (sp->m_pkthdr.len < header_size) {
6647 		/* Gak, packet can't have an SCTP header in it - to small */
6648 		m_freem(sp);
6649 		goto out;
6650 	}
6651 	/* ok now pull up the UDP header and SCTP header together */
6652 	sp = m_pullup(sp, header_size);
6653 	if (sp == NULL) {
6654 		/* Gak pullup failed */
6655 		goto out;
6656 	}
6657 	/* trim out the UDP header */
6658 	m_adj(sp, sizeof(struct udphdr));
6659 
6660 	/* Now reconstruct the mbuf chain */
6661 	/* 1) find last one */
6662 	last = m;
6663 	while (last->m_next != NULL) {
6664 		last = last->m_next;
6665 	}
6666 	last->m_next = sp;
6667 	m->m_pkthdr.len += sp->m_pkthdr.len;
6668 	last = m;
6669 	while (last != NULL) {
6670 		last = last->m_next;
6671 	}
6672 	/* Now its ready for sctp_input or sctp6_input */
6673 	iph = mtod(m, struct ip *);
6674 	switch (iph->ip_v) {
6675 #ifdef INET
6676 	case IPVERSION:
6677 		{
6678 			uint16_t len;
6679 
6680 			/* its IPv4 */
6681 			len = SCTP_GET_IPV4_LENGTH(iph);
6682 			len -= sizeof(struct udphdr);
6683 			SCTP_GET_IPV4_LENGTH(iph) = len;
6684 			sctp_input_with_port(m, off, port);
6685 			break;
6686 		}
6687 #endif
6688 #ifdef INET6
6689 	case IPV6_VERSION >> 4:
6690 		{
6691 			/* its IPv6 - NOT supported */
6692 			goto out;
6693 			break;
6694 
6695 		}
6696 #endif
6697 	default:
6698 		{
6699 			m_freem(m);
6700 			break;
6701 		}
6702 	}
6703 	return;
6704 out:
6705 	m_freem(m);
6706 }
6707 
6708 void
6709 sctp_over_udp_stop(void)
6710 {
6711 	struct socket *sop;
6712 
6713 	/*
6714 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6715 	 * for writting!
6716 	 */
6717 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6718 		/* Nothing to do */
6719 		return;
6720 	}
6721 	sop = SCTP_BASE_INFO(udp_tun_socket);
6722 	soclose(sop);
6723 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6724 }
6725 
6726 int
6727 sctp_over_udp_start(void)
6728 {
6729 	uint16_t port;
6730 	int ret;
6731 	struct sockaddr_in sin;
6732 	struct socket *sop = NULL;
6733 	struct thread *th;
6734 	struct ucred *cred;
6735 
6736 	/*
6737 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6738 	 * for writting!
6739 	 */
6740 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6741 	if (port == 0) {
6742 		/* Must have a port set */
6743 		return (EINVAL);
6744 	}
6745 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6746 		/* Already running -- must stop first */
6747 		return (EALREADY);
6748 	}
6749 	th = curthread;
6750 	cred = th->td_ucred;
6751 	if ((ret = socreate(PF_INET, &sop,
6752 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6753 		return (ret);
6754 	}
6755 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6756 	/* call the special UDP hook */
6757 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6758 	if (ret) {
6759 		goto exit_stage_left;
6760 	}
6761 	/* Ok we have a socket, bind it to the port */
6762 	memset(&sin, 0, sizeof(sin));
6763 	sin.sin_len = sizeof(sin);
6764 	sin.sin_family = AF_INET;
6765 	sin.sin_port = htons(port);
6766 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6767 	if (ret) {
6768 		/* Close up we cant get the port */
6769 exit_stage_left:
6770 		sctp_over_udp_stop();
6771 		return (ret);
6772 	}
6773 	/*
6774 	 * Ok we should now get UDP packets directly to our input routine
6775 	 * sctp_recv_upd_tunneled_packet().
6776 	 */
6777 	return (0);
6778 }
6779 
6780 #endif
6781